test/: apply clang-format
Change-Id: I1138fbeff5f63beb5c0de2c357793da12502d453
diff --git a/test/acm_random.h b/test/acm_random.h
index b94b6e1..c2f6b0e 100644
--- a/test/acm_random.h
+++ b/test/acm_random.h
@@ -23,9 +23,7 @@
explicit ACMRandom(int seed) : random_(seed) {}
- void Reset(int seed) {
- random_.Reseed(seed);
- }
+ void Reset(int seed) { random_.Reseed(seed); }
uint16_t Rand16(void) {
const uint32_t value =
random_.Generate(testing::internal::Random::kMaxRange);
@@ -52,17 +50,11 @@
return r < 128 ? r << 4 : r >> 4;
}
- int PseudoUniform(int range) {
- return random_.Generate(range);
- }
+ int PseudoUniform(int range) { return random_.Generate(range); }
- int operator()(int n) {
- return PseudoUniform(n);
- }
+ int operator()(int n) { return PseudoUniform(n); }
- static int DeterministicSeed(void) {
- return 0xbaba;
- }
+ static int DeterministicSeed(void) { return 0xbaba; }
private:
testing::internal::Random random_;
diff --git a/test/active_map_refresh_test.cc b/test/active_map_refresh_test.cc
index 577fe33..4e69f52 100644
--- a/test/active_map_refresh_test.cc
+++ b/test/active_map_refresh_test.cc
@@ -17,8 +17,8 @@
namespace {
// Check if any pixel in a 16x16 macroblock varies between frames.
-int CheckMb(const vpx_image_t ¤t, const vpx_image_t &previous,
- int mb_r, int mb_c) {
+int CheckMb(const vpx_image_t ¤t, const vpx_image_t &previous, int mb_r,
+ int mb_c) {
for (int plane = 0; plane < 3; plane++) {
int r = 16 * mb_r;
int c0 = 16 * mb_c;
diff --git a/test/active_map_test.cc b/test/active_map_test.cc
index f4b4605..cdf8f0b 100644
--- a/test/active_map_test.cc
+++ b/test/active_map_test.cc
@@ -39,6 +39,7 @@
encoder->Control(VP8E_SET_CPUUSED, cpu_used_);
} else if (video->frame() == 3) {
vpx_active_map_t map = vpx_active_map_t();
+ /* clang-format off */
uint8_t active_map[9 * 13] = {
1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0,
1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0,
@@ -50,6 +51,7 @@
0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1,
1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0,
};
+ /* clang-format on */
map.cols = (kWidth + 15) / 16;
map.rows = (kHeight + 15) / 16;
ASSERT_EQ(map.cols, 13u);
@@ -82,15 +84,11 @@
int cpu_used_;
};
-TEST_P(ActiveMapTest, Test) {
- DoTest();
-}
+TEST_P(ActiveMapTest, Test) { DoTest(); }
class ActiveMapTestLarge : public ActiveMapTest {};
-TEST_P(ActiveMapTestLarge, Test) {
- DoTest();
-}
+TEST_P(ActiveMapTestLarge, Test) { DoTest(); }
VP10_INSTANTIATE_TEST_CASE(ActiveMapTestLarge,
::testing::Values(::libvpx_test::kRealTime),
diff --git a/test/add_noise_test.cc b/test/add_noise_test.cc
index 35aaadf..4eff8b9 100644
--- a/test/add_noise_test.cc
+++ b/test/add_noise_test.cc
@@ -24,12 +24,9 @@
char bothclamp[16], unsigned int width,
unsigned int height, int pitch);
-class AddNoiseTest
- : public ::testing::TestWithParam<AddNoiseFunc> {
+class AddNoiseTest : public ::testing::TestWithParam<AddNoiseFunc> {
public:
- virtual void TearDown() {
- libvpx_test::ClearSystemState();
- }
+ virtual void TearDown() { libvpx_test::ClearSystemState(); }
virtual ~AddNoiseTest() {}
};
@@ -45,7 +42,7 @@
DECLARE_ALIGNED(16, char, blackclamp[16]);
DECLARE_ALIGNED(16, char, whiteclamp[16]);
DECLARE_ALIGNED(16, char, bothclamp[16]);
- const int width = 64;
+ const int width = 64;
const int height = 64;
const int image_size = width * height;
char noise[3072];
@@ -105,7 +102,7 @@
DECLARE_ALIGNED(16, char, blackclamp[16]);
DECLARE_ALIGNED(16, char, whiteclamp[16]);
DECLARE_ALIGNED(16, char, bothclamp[16]);
- const int width = 64;
+ const int width = 64;
const int height = 64;
const int image_size = width * height;
char noise[3072];
@@ -128,9 +125,8 @@
ASM_REGISTER_STATE_CHECK(GetParam()(s, noise, blackclamp, whiteclamp,
bothclamp, width, height, width));
srand(0);
- ASM_REGISTER_STATE_CHECK(vpx_plane_add_noise_c(d, noise, blackclamp,
- whiteclamp, bothclamp,
- width, height, width));
+ ASM_REGISTER_STATE_CHECK(vpx_plane_add_noise_c(
+ d, noise, blackclamp, whiteclamp, bothclamp, width, height, width));
for (int i = 0; i < image_size; ++i) {
EXPECT_EQ(static_cast<int>(s[i]), static_cast<int>(d[i])) << "i = " << i;
diff --git a/test/altref_test.cc b/test/altref_test.cc
index 1552488..8c7dee4 100644
--- a/test/altref_test.cc
+++ b/test/altref_test.cc
@@ -19,11 +19,8 @@
public ::libvpx_test::CodecTestWith2Params<libvpx_test::TestMode, int> {
protected:
AltRefForcedKeyTestLarge()
- : EncoderTest(GET_PARAM(0)),
- encoding_mode_(GET_PARAM(1)),
- cpu_used_(GET_PARAM(2)),
- forced_kf_frame_num_(1),
- frame_num_(0) {}
+ : EncoderTest(GET_PARAM(0)), encoding_mode_(GET_PARAM(1)),
+ cpu_used_(GET_PARAM(2)), forced_kf_frame_num_(1), frame_num_(0) {}
virtual ~AltRefForcedKeyTestLarge() {}
virtual void SetUp() {
@@ -38,8 +35,8 @@
if (video->frame() == 0) {
encoder->Control(VP8E_SET_CPUUSED, cpu_used_);
encoder->Control(VP8E_SET_ENABLEAUTOALTREF, 1);
- // override test default for tile columns if necessary.
#if CONFIG_VP10_ENCODER
+ // override test default for tile columns if necessary.
if (GET_PARAM(0) == &libvpx_test::kVP10) {
encoder->Control(VP9E_SET_TILE_COLUMNS, 6);
}
@@ -91,9 +88,8 @@
}
}
-VP10_INSTANTIATE_TEST_CASE(
- AltRefForcedKeyTestLarge,
- ::testing::Values(::libvpx_test::kOnePassGood),
- ::testing::Range(0, 9));
+VP10_INSTANTIATE_TEST_CASE(AltRefForcedKeyTestLarge,
+ ::testing::Values(::libvpx_test::kOnePassGood),
+ ::testing::Range(0, 9));
} // namespace
diff --git a/test/aq_segment_test.cc b/test/aq_segment_test.cc
index ee4b682..396a4fa 100644
--- a/test/aq_segment_test.cc
+++ b/test/aq_segment_test.cc
@@ -49,8 +49,8 @@
cfg_.rc_buf_optimal_sz = 500;
cfg_.rc_buf_sz = 1000;
cfg_.rc_target_bitrate = 300;
- ::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv",
- 352, 288, 30, 1, 0, 15);
+ ::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352,
+ 288, 30, 1, 0, 15);
ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
}
@@ -60,36 +60,23 @@
// Validate that this AQ segmentation mode (AQ=1, variance_ap)
// encodes and decodes without a mismatch.
-TEST_P(AqSegmentTest, TestNoMisMatchAQ1) {
- DoTest(1);
-}
+TEST_P(AqSegmentTest, TestNoMisMatchAQ1) { DoTest(1); }
// Validate that this AQ segmentation mode (AQ=2, complexity_aq)
// encodes and decodes without a mismatch.
-TEST_P(AqSegmentTest, TestNoMisMatchAQ2) {
- DoTest(2);
-}
+TEST_P(AqSegmentTest, TestNoMisMatchAQ2) { DoTest(2); }
// Validate that this AQ segmentation mode (AQ=3, cyclic_refresh_aq)
// encodes and decodes without a mismatch.
-TEST_P(AqSegmentTest, TestNoMisMatchAQ3) {
- DoTest(3);
-}
+TEST_P(AqSegmentTest, TestNoMisMatchAQ3) { DoTest(3); }
class AqSegmentTestLarge : public AqSegmentTest {};
-TEST_P(AqSegmentTestLarge, TestNoMisMatchAQ1) {
- DoTest(1);
-}
+TEST_P(AqSegmentTestLarge, TestNoMisMatchAQ1) { DoTest(1); }
-TEST_P(AqSegmentTestLarge, TestNoMisMatchAQ2) {
- DoTest(2);
-}
+TEST_P(AqSegmentTestLarge, TestNoMisMatchAQ2) { DoTest(2); }
-TEST_P(AqSegmentTestLarge, TestNoMisMatchAQ3) {
- DoTest(3);
-}
-
+TEST_P(AqSegmentTestLarge, TestNoMisMatchAQ3) { DoTest(3); }
VP10_INSTANTIATE_TEST_CASE(AqSegmentTest,
::testing::Values(::libvpx_test::kRealTime,
diff --git a/test/arf_freq_test.cc b/test/arf_freq_test.cc
index 4f4a75c..9c00ded 100644
--- a/test/arf_freq_test.cc
+++ b/test/arf_freq_test.cc
@@ -22,8 +22,8 @@
const unsigned int kFrames = 100;
const int kBitrate = 500;
-#define ARF_NOT_SEEN 1000001
-#define ARF_SEEN_ONCE 1000000
+#define ARF_NOT_SEEN 1000001
+#define ARF_SEEN_ONCE 1000000
typedef struct {
const char *filename;
@@ -44,24 +44,20 @@
const TestVideoParam kTestVectors[] = {
// artificially increase framerate to trigger default check
- {"hantro_collage_w352h288.yuv", 352, 288, 5000, 1,
- 8, VPX_IMG_FMT_I420, VPX_BITS_8, 0},
- {"hantro_collage_w352h288.yuv", 352, 288, 30, 1,
- 8, VPX_IMG_FMT_I420, VPX_BITS_8, 0},
- {"rush_hour_444.y4m", 352, 288, 30, 1,
- 8, VPX_IMG_FMT_I444, VPX_BITS_8, 1},
+ { "hantro_collage_w352h288.yuv", 352, 288, 5000, 1, 8, VPX_IMG_FMT_I420,
+ VPX_BITS_8, 0 },
+ { "hantro_collage_w352h288.yuv", 352, 288, 30, 1, 8, VPX_IMG_FMT_I420,
+ VPX_BITS_8, 0 },
+ { "rush_hour_444.y4m", 352, 288, 30, 1, 8, VPX_IMG_FMT_I444, VPX_BITS_8, 1 },
#if CONFIG_VP9_HIGHBITDEPTH
- // Add list of profile 2/3 test videos here ...
+// Add list of profile 2/3 test videos here ...
#endif // CONFIG_VP9_HIGHBITDEPTH
};
const TestEncodeParam kEncodeVectors[] = {
- {::libvpx_test::kOnePassGood, 2},
- {::libvpx_test::kOnePassGood, 5},
- {::libvpx_test::kTwoPassGood, 1},
- {::libvpx_test::kTwoPassGood, 2},
- {::libvpx_test::kTwoPassGood, 5},
- {::libvpx_test::kRealTime, 5},
+ { ::libvpx_test::kOnePassGood, 2 }, { ::libvpx_test::kOnePassGood, 5 },
+ { ::libvpx_test::kTwoPassGood, 1 }, { ::libvpx_test::kTwoPassGood, 2 },
+ { ::libvpx_test::kTwoPassGood, 5 }, { ::libvpx_test::kRealTime, 5 },
};
const int kMinArfVectors[] = {
@@ -80,15 +76,12 @@
class ArfFreqTestLarge
: public ::libvpx_test::EncoderTest,
- public ::libvpx_test::CodecTestWith3Params<TestVideoParam, \
+ public ::libvpx_test::CodecTestWith3Params<TestVideoParam,
TestEncodeParam, int> {
protected:
ArfFreqTestLarge()
- : EncoderTest(GET_PARAM(0)),
- test_video_param_(GET_PARAM(1)),
- test_encode_param_(GET_PARAM(2)),
- min_arf_requested_(GET_PARAM(3)) {
- }
+ : EncoderTest(GET_PARAM(0)), test_video_param_(GET_PARAM(1)),
+ test_encode_param_(GET_PARAM(2)), min_arf_requested_(GET_PARAM(3)) {}
virtual ~ArfFreqTestLarge() {}
@@ -114,17 +107,16 @@
}
int GetNumFramesInPkt(const vpx_codec_cx_pkt_t *pkt) {
- const uint8_t *buffer = reinterpret_cast<uint8_t*>(pkt->data.frame.buf);
+ const uint8_t *buffer = reinterpret_cast<uint8_t *>(pkt->data.frame.buf);
const uint8_t marker = buffer[pkt->data.frame.sz - 1];
const int mag = ((marker >> 3) & 3) + 1;
int frames = (marker & 0x7) + 1;
- const unsigned int index_sz = 2 + mag * frames;
+ const unsigned int index_sz = 2 + mag * frames;
// Check for superframe or not.
// Assume superframe has only one visible frame, the rest being
// invisible. If superframe index is not found, then there is only
// one frame.
- if (!((marker & 0xe0) == 0xc0 &&
- pkt->data.frame.sz >= index_sz &&
+ if (!((marker & 0xe0) == 0xc0 && pkt->data.frame.sz >= index_sz &&
buffer[pkt->data.frame.sz - index_sz] == marker)) {
frames = 1;
}
@@ -132,8 +124,7 @@
}
virtual void FramePktHook(const vpx_codec_cx_pkt_t *pkt) {
- if (pkt->kind != VPX_CODEC_CX_FRAME_PKT)
- return;
+ if (pkt->kind != VPX_CODEC_CX_FRAME_PKT) return;
const int frames = GetNumFramesInPkt(pkt);
if (frames == 1) {
run_of_visible_frames_++;
@@ -167,9 +158,7 @@
}
}
- int GetMinVisibleRun() const {
- return min_run_;
- }
+ int GetMinVisibleRun() const { return min_run_; }
int GetMinArfDistanceRequested() const {
if (min_arf_requested_)
@@ -178,7 +167,7 @@
return vp10_rc_get_default_min_gf_interval(
test_video_param_.width, test_video_param_.height,
(double)test_video_param_.framerate_num /
- test_video_param_.framerate_den);
+ test_video_param_.framerate_den);
}
TestVideoParam test_video_param_;
@@ -197,21 +186,18 @@
cfg_.g_input_bit_depth = test_video_param_.input_bit_depth;
cfg_.g_bit_depth = test_video_param_.bit_depth;
init_flags_ = VPX_CODEC_USE_PSNR;
- if (cfg_.g_bit_depth > 8)
- init_flags_ |= VPX_CODEC_USE_HIGHBITDEPTH;
+ if (cfg_.g_bit_depth > 8) init_flags_ |= VPX_CODEC_USE_HIGHBITDEPTH;
testing::internal::scoped_ptr<libvpx_test::VideoSource> video;
if (is_extension_y4m(test_video_param_.filename)) {
- video.reset(new libvpx_test::Y4mVideoSource(test_video_param_.filename,
- 0, kFrames));
+ video.reset(new libvpx_test::Y4mVideoSource(test_video_param_.filename, 0,
+ kFrames));
} else {
- video.reset(new libvpx_test::YUVVideoSource(test_video_param_.filename,
- test_video_param_.fmt,
- test_video_param_.width,
- test_video_param_.height,
- test_video_param_.framerate_num,
- test_video_param_.framerate_den,
- 0, kFrames));
+ video.reset(new libvpx_test::YUVVideoSource(
+ test_video_param_.filename, test_video_param_.fmt,
+ test_video_param_.width, test_video_param_.height,
+ test_video_param_.framerate_num, test_video_param_.framerate_den, 0,
+ kFrames));
}
ASSERT_NO_FATAL_FAILURE(RunLoop(video.get()));
@@ -235,15 +221,12 @@
::testing::Combine(
::testing::Values(static_cast<const libvpx_test::CodecFactory *>(
&libvpx_test::kVP10)),
- ::testing::ValuesIn(kTestVectors),
- ::testing::ValuesIn(kEncodeVectors),
+ ::testing::ValuesIn(kTestVectors), ::testing::ValuesIn(kEncodeVectors),
::testing::ValuesIn(kMinArfVectors)));
#endif // CONFIG_VP10_ENCODER
#else
-VP10_INSTANTIATE_TEST_CASE(
- ArfFreqTestLarge,
- ::testing::ValuesIn(kTestVectors),
- ::testing::ValuesIn(kEncodeVectors),
- ::testing::ValuesIn(kMinArfVectors));
+VP10_INSTANTIATE_TEST_CASE(ArfFreqTestLarge, ::testing::ValuesIn(kTestVectors),
+ ::testing::ValuesIn(kEncodeVectors),
+ ::testing::ValuesIn(kMinArfVectors));
#endif // CONFIG_VP9_HIGHBITDEPTH || CONFIG_EXT_REFS
} // namespace
diff --git a/test/avg_test.cc b/test/avg_test.cc
index 44d8dd7..4a5d325 100644
--- a/test/avg_test.cc
+++ b/test/avg_test.cc
@@ -31,7 +31,7 @@
AverageTestBase(int width, int height) : width_(width), height_(height) {}
static void SetUpTestCase() {
- source_data_ = reinterpret_cast<uint8_t*>(
+ source_data_ = reinterpret_cast<uint8_t *>(
vpx_memalign(kDataAlignment, kDataBlockSize));
}
@@ -40,9 +40,7 @@
source_data_ = NULL;
}
- virtual void TearDown() {
- libvpx_test::ClearSystemState();
- }
+ virtual void TearDown() { libvpx_test::ClearSystemState(); }
protected:
// Handle blocks up to 4 blocks 64x64 with stride up to 128
@@ -55,47 +53,44 @@
}
// Sum Pixels
- unsigned int ReferenceAverage8x8(const uint8_t* source, int pitch) {
+ unsigned int ReferenceAverage8x8(const uint8_t *source, int pitch) {
unsigned int average = 0;
for (int h = 0; h < 8; ++h)
- for (int w = 0; w < 8; ++w)
- average += source[h * pitch + w];
+ for (int w = 0; w < 8; ++w) average += source[h * pitch + w];
return ((average + 32) >> 6);
}
- unsigned int ReferenceAverage4x4(const uint8_t* source, int pitch) {
+ unsigned int ReferenceAverage4x4(const uint8_t *source, int pitch) {
unsigned int average = 0;
for (int h = 0; h < 4; ++h)
- for (int w = 0; w < 4; ++w)
- average += source[h * pitch + w];
+ for (int w = 0; w < 4; ++w) average += source[h * pitch + w];
return ((average + 8) >> 4);
}
void FillConstant(uint8_t fill_constant) {
for (int i = 0; i < width_ * height_; ++i) {
- source_data_[i] = fill_constant;
+ source_data_[i] = fill_constant;
}
}
void FillRandom() {
for (int i = 0; i < width_ * height_; ++i) {
- source_data_[i] = rnd_.Rand8();
+ source_data_[i] = rnd_.Rand8();
}
}
int width_, height_;
- static uint8_t* source_data_;
+ static uint8_t *source_data_;
int source_stride_;
ACMRandom rnd_;
};
-typedef unsigned int (*AverageFunction)(const uint8_t* s, int pitch);
+typedef unsigned int (*AverageFunction)(const uint8_t *s, int pitch);
typedef std::tr1::tuple<int, int, int, int, AverageFunction> AvgFunc;
-class AverageTest
- : public AverageTestBase,
- public ::testing::WithParamInterface<AvgFunc>{
+class AverageTest : public AverageTestBase,
+ public ::testing::WithParamInterface<AvgFunc> {
public:
AverageTest() : AverageTestBase(GET_PARAM(0), GET_PARAM(1)) {}
@@ -103,17 +98,17 @@
void CheckAverages() {
unsigned int expected = 0;
if (GET_PARAM(3) == 8) {
- expected = ReferenceAverage8x8(source_data_+ GET_PARAM(2),
- source_stride_);
- } else if (GET_PARAM(3) == 4) {
- expected = ReferenceAverage4x4(source_data_+ GET_PARAM(2),
- source_stride_);
+ expected =
+ ReferenceAverage8x8(source_data_ + GET_PARAM(2), source_stride_);
+ } else if (GET_PARAM(3) == 4) {
+ expected =
+ ReferenceAverage4x4(source_data_ + GET_PARAM(2), source_stride_);
}
- ASM_REGISTER_STATE_CHECK(GET_PARAM(4)(source_data_+ GET_PARAM(2),
- source_stride_));
- unsigned int actual = GET_PARAM(4)(source_data_+ GET_PARAM(2),
- source_stride_);
+ ASM_REGISTER_STATE_CHECK(
+ GET_PARAM(4)(source_data_ + GET_PARAM(2), source_stride_));
+ unsigned int actual =
+ GET_PARAM(4)(source_data_ + GET_PARAM(2), source_stride_);
EXPECT_EQ(expected, actual);
}
@@ -124,23 +119,20 @@
typedef std::tr1::tuple<int, IntProRowFunc, IntProRowFunc> IntProRowParam;
-class IntProRowTest
- : public AverageTestBase,
- public ::testing::WithParamInterface<IntProRowParam> {
+class IntProRowTest : public AverageTestBase,
+ public ::testing::WithParamInterface<IntProRowParam> {
public:
IntProRowTest()
- : AverageTestBase(16, GET_PARAM(0)),
- hbuf_asm_(NULL),
- hbuf_c_(NULL) {
+ : AverageTestBase(16, GET_PARAM(0)), hbuf_asm_(NULL), hbuf_c_(NULL) {
asm_func_ = GET_PARAM(1);
c_func_ = GET_PARAM(2);
}
protected:
virtual void SetUp() {
- hbuf_asm_ = reinterpret_cast<int16_t*>(
+ hbuf_asm_ = reinterpret_cast<int16_t *>(
vpx_memalign(kDataAlignment, sizeof(*hbuf_asm_) * 16));
- hbuf_c_ = reinterpret_cast<int16_t*>(
+ hbuf_c_ = reinterpret_cast<int16_t *>(
vpx_memalign(kDataAlignment, sizeof(*hbuf_c_) * 16));
}
@@ -169,9 +161,8 @@
typedef std::tr1::tuple<int, IntProColFunc, IntProColFunc> IntProColParam;
-class IntProColTest
- : public AverageTestBase,
- public ::testing::WithParamInterface<IntProColParam> {
+class IntProColTest : public AverageTestBase,
+ public ::testing::WithParamInterface<IntProColParam> {
public:
IntProColTest() : AverageTestBase(GET_PARAM(0), 1), sum_asm_(0), sum_c_(0) {
asm_func_ = GET_PARAM(1);
@@ -195,15 +186,14 @@
typedef int (*SatdFunc)(const int16_t *coeffs, int length);
typedef std::tr1::tuple<int, SatdFunc> SatdTestParam;
-class SatdTest
- : public ::testing::Test,
- public ::testing::WithParamInterface<SatdTestParam> {
+class SatdTest : public ::testing::Test,
+ public ::testing::WithParamInterface<SatdTestParam> {
protected:
virtual void SetUp() {
satd_size_ = GET_PARAM(0);
satd_func_ = GET_PARAM(1);
rnd_.Reset(ACMRandom::DeterministicSeed());
- src_ = reinterpret_cast<int16_t*>(
+ src_ = reinterpret_cast<int16_t *>(
vpx_memalign(16, sizeof(*src_) * satd_size_));
ASSERT_TRUE(src_ != NULL);
}
@@ -235,7 +225,7 @@
ACMRandom rnd_;
};
-uint8_t* AverageTestBase::source_data_ = NULL;
+uint8_t *AverageTestBase::source_data_ = NULL;
TEST_P(AverageTest, MinValue) {
FillConstant(0);
@@ -286,7 +276,6 @@
RunComparison();
}
-
TEST_P(SatdTest, MinValue) {
const int kMin = -32640;
const int expected = -kMin * satd_size_;
@@ -320,92 +309,86 @@
INSTANTIATE_TEST_CASE_P(
C, AverageTest,
- ::testing::Values(
- make_tuple(16, 16, 1, 8, &vpx_avg_8x8_c),
- make_tuple(16, 16, 1, 4, &vpx_avg_4x4_c)));
+ ::testing::Values(make_tuple(16, 16, 1, 8, &vpx_avg_8x8_c),
+ make_tuple(16, 16, 1, 4, &vpx_avg_4x4_c)));
-INSTANTIATE_TEST_CASE_P(
- C, SatdTest,
- ::testing::Values(
- make_tuple(16, &vpx_satd_c),
- make_tuple(64, &vpx_satd_c),
- make_tuple(256, &vpx_satd_c),
- make_tuple(1024, &vpx_satd_c)));
+INSTANTIATE_TEST_CASE_P(C, SatdTest,
+ ::testing::Values(make_tuple(16, &vpx_satd_c),
+ make_tuple(64, &vpx_satd_c),
+ make_tuple(256, &vpx_satd_c),
+ make_tuple(1024, &vpx_satd_c)));
#if HAVE_SSE2
INSTANTIATE_TEST_CASE_P(
SSE2, AverageTest,
- ::testing::Values(
- make_tuple(16, 16, 0, 8, &vpx_avg_8x8_sse2),
- make_tuple(16, 16, 5, 8, &vpx_avg_8x8_sse2),
- make_tuple(32, 32, 15, 8, &vpx_avg_8x8_sse2),
- make_tuple(16, 16, 0, 4, &vpx_avg_4x4_sse2),
- make_tuple(16, 16, 5, 4, &vpx_avg_4x4_sse2),
- make_tuple(32, 32, 15, 4, &vpx_avg_4x4_sse2)));
+ ::testing::Values(make_tuple(16, 16, 0, 8, &vpx_avg_8x8_sse2),
+ make_tuple(16, 16, 5, 8, &vpx_avg_8x8_sse2),
+ make_tuple(32, 32, 15, 8, &vpx_avg_8x8_sse2),
+ make_tuple(16, 16, 0, 4, &vpx_avg_4x4_sse2),
+ make_tuple(16, 16, 5, 4, &vpx_avg_4x4_sse2),
+ make_tuple(32, 32, 15, 4, &vpx_avg_4x4_sse2)));
INSTANTIATE_TEST_CASE_P(
- SSE2, IntProRowTest, ::testing::Values(
- make_tuple(16, &vpx_int_pro_row_sse2, &vpx_int_pro_row_c),
- make_tuple(32, &vpx_int_pro_row_sse2, &vpx_int_pro_row_c),
- make_tuple(64, &vpx_int_pro_row_sse2, &vpx_int_pro_row_c)));
+ SSE2, IntProRowTest,
+ ::testing::Values(make_tuple(16, &vpx_int_pro_row_sse2, &vpx_int_pro_row_c),
+ make_tuple(32, &vpx_int_pro_row_sse2, &vpx_int_pro_row_c),
+ make_tuple(64, &vpx_int_pro_row_sse2,
+ &vpx_int_pro_row_c)));
INSTANTIATE_TEST_CASE_P(
- SSE2, IntProColTest, ::testing::Values(
- make_tuple(16, &vpx_int_pro_col_sse2, &vpx_int_pro_col_c),
- make_tuple(32, &vpx_int_pro_col_sse2, &vpx_int_pro_col_c),
- make_tuple(64, &vpx_int_pro_col_sse2, &vpx_int_pro_col_c)));
+ SSE2, IntProColTest,
+ ::testing::Values(make_tuple(16, &vpx_int_pro_col_sse2, &vpx_int_pro_col_c),
+ make_tuple(32, &vpx_int_pro_col_sse2, &vpx_int_pro_col_c),
+ make_tuple(64, &vpx_int_pro_col_sse2,
+ &vpx_int_pro_col_c)));
-INSTANTIATE_TEST_CASE_P(
- SSE2, SatdTest,
- ::testing::Values(
- make_tuple(16, &vpx_satd_sse2),
- make_tuple(64, &vpx_satd_sse2),
- make_tuple(256, &vpx_satd_sse2),
- make_tuple(1024, &vpx_satd_sse2)));
+INSTANTIATE_TEST_CASE_P(SSE2, SatdTest,
+ ::testing::Values(make_tuple(16, &vpx_satd_sse2),
+ make_tuple(64, &vpx_satd_sse2),
+ make_tuple(256, &vpx_satd_sse2),
+ make_tuple(1024, &vpx_satd_sse2)));
#endif
#if HAVE_NEON
INSTANTIATE_TEST_CASE_P(
NEON, AverageTest,
- ::testing::Values(
- make_tuple(16, 16, 0, 8, &vpx_avg_8x8_neon),
- make_tuple(16, 16, 5, 8, &vpx_avg_8x8_neon),
- make_tuple(32, 32, 15, 8, &vpx_avg_8x8_neon),
- make_tuple(16, 16, 0, 4, &vpx_avg_4x4_neon),
- make_tuple(16, 16, 5, 4, &vpx_avg_4x4_neon),
- make_tuple(32, 32, 15, 4, &vpx_avg_4x4_neon)));
+ ::testing::Values(make_tuple(16, 16, 0, 8, &vpx_avg_8x8_neon),
+ make_tuple(16, 16, 5, 8, &vpx_avg_8x8_neon),
+ make_tuple(32, 32, 15, 8, &vpx_avg_8x8_neon),
+ make_tuple(16, 16, 0, 4, &vpx_avg_4x4_neon),
+ make_tuple(16, 16, 5, 4, &vpx_avg_4x4_neon),
+ make_tuple(32, 32, 15, 4, &vpx_avg_4x4_neon)));
INSTANTIATE_TEST_CASE_P(
- NEON, IntProRowTest, ::testing::Values(
- make_tuple(16, &vpx_int_pro_row_neon, &vpx_int_pro_row_c),
- make_tuple(32, &vpx_int_pro_row_neon, &vpx_int_pro_row_c),
- make_tuple(64, &vpx_int_pro_row_neon, &vpx_int_pro_row_c)));
+ NEON, IntProRowTest,
+ ::testing::Values(make_tuple(16, &vpx_int_pro_row_neon, &vpx_int_pro_row_c),
+ make_tuple(32, &vpx_int_pro_row_neon, &vpx_int_pro_row_c),
+ make_tuple(64, &vpx_int_pro_row_neon,
+ &vpx_int_pro_row_c)));
INSTANTIATE_TEST_CASE_P(
- NEON, IntProColTest, ::testing::Values(
- make_tuple(16, &vpx_int_pro_col_neon, &vpx_int_pro_col_c),
- make_tuple(32, &vpx_int_pro_col_neon, &vpx_int_pro_col_c),
- make_tuple(64, &vpx_int_pro_col_neon, &vpx_int_pro_col_c)));
+ NEON, IntProColTest,
+ ::testing::Values(make_tuple(16, &vpx_int_pro_col_neon, &vpx_int_pro_col_c),
+ make_tuple(32, &vpx_int_pro_col_neon, &vpx_int_pro_col_c),
+ make_tuple(64, &vpx_int_pro_col_neon,
+ &vpx_int_pro_col_c)));
-INSTANTIATE_TEST_CASE_P(
- NEON, SatdTest,
- ::testing::Values(
- make_tuple(16, &vpx_satd_neon),
- make_tuple(64, &vpx_satd_neon),
- make_tuple(256, &vpx_satd_neon),
- make_tuple(1024, &vpx_satd_neon)));
+INSTANTIATE_TEST_CASE_P(NEON, SatdTest,
+ ::testing::Values(make_tuple(16, &vpx_satd_neon),
+ make_tuple(64, &vpx_satd_neon),
+ make_tuple(256, &vpx_satd_neon),
+ make_tuple(1024, &vpx_satd_neon)));
#endif
#if HAVE_MSA
INSTANTIATE_TEST_CASE_P(
MSA, AverageTest,
- ::testing::Values(
- make_tuple(16, 16, 0, 8, &vpx_avg_8x8_msa),
- make_tuple(16, 16, 5, 8, &vpx_avg_8x8_msa),
- make_tuple(32, 32, 15, 8, &vpx_avg_8x8_msa),
- make_tuple(16, 16, 0, 4, &vpx_avg_4x4_msa),
- make_tuple(16, 16, 5, 4, &vpx_avg_4x4_msa),
- make_tuple(32, 32, 15, 4, &vpx_avg_4x4_msa)));
+ ::testing::Values(make_tuple(16, 16, 0, 8, &vpx_avg_8x8_msa),
+ make_tuple(16, 16, 5, 8, &vpx_avg_8x8_msa),
+ make_tuple(32, 32, 15, 8, &vpx_avg_8x8_msa),
+ make_tuple(16, 16, 0, 4, &vpx_avg_4x4_msa),
+ make_tuple(16, 16, 5, 4, &vpx_avg_4x4_msa),
+ make_tuple(32, 32, 15, 4, &vpx_avg_4x4_msa)));
#endif
} // namespace
diff --git a/test/blend_a64_mask_1d_test.cc b/test/blend_a64_mask_1d_test.cc
index ba63afa..97608b8 100644
--- a/test/blend_a64_mask_1d_test.cc
+++ b/test/blend_a64_mask_1d_test.cc
@@ -31,7 +31,7 @@
namespace {
-template<typename F, typename T>
+template <typename F, typename T>
class BlendA64Mask1DTest : public FunctionEquivalenceTest<F> {
public:
static const int kIterations = 10000;
@@ -62,30 +62,29 @@
T *p_src1;
switch (this->rng_(3)) {
- case 0: // Separate sources
+ case 0: // Separate sources
p_src0 = src0_;
p_src1 = src1_;
break;
- case 1: // src0 == dst
+ case 1: // src0 == dst
p_src0 = dst_tst_;
src0_stride_ = dst_stride_;
src0_offset_ = dst_offset_;
p_src1 = src1_;
break;
- case 2: // src1 == dst
+ case 2: // src1 == dst
p_src0 = src0_;
p_src1 = dst_tst_;
src1_stride_ = dst_stride_;
src1_offset_ = dst_offset_;
break;
- default:
- FAIL();
+ default: FAIL();
}
Execute(p_src0, p_src1);
- for (int r = 0 ; r < h_ ; ++r) {
- for (int c = 0 ; c < w_ ; ++c) {
+ for (int r = 0; r < h_; ++r) {
+ for (int c = 0; c < w_; ++c) {
ASSERT_EQ(dst_ref_[dst_offset_ + r * dst_stride_ + c],
dst_tst_[dst_offset_ + r * dst_stride_ + c]);
}
@@ -115,28 +114,26 @@
// 8 bit version
//////////////////////////////////////////////////////////////////////////////
-typedef void (*F8B)(uint8_t *dst, uint32_t dst_stride,
- const uint8_t *src0, uint32_t src0_stride,
- const uint8_t *src1, uint32_t src1_stride,
- const uint8_t *mask, int h, int w);
+typedef void (*F8B)(uint8_t *dst, uint32_t dst_stride, const uint8_t *src0,
+ uint32_t src0_stride, const uint8_t *src1,
+ uint32_t src1_stride, const uint8_t *mask, int h, int w);
typedef libvpx_test::FuncParam<F8B> TestFuncs;
class BlendA64Mask1DTest8B : public BlendA64Mask1DTest<F8B, uint8_t> {
protected:
void Execute(const uint8_t *p_src0, const uint8_t *p_src1) {
- params_.ref_func(dst_ref_ + dst_offset_, dst_stride_,
- p_src0 + src0_offset_, src0_stride_,
- p_src1 + src1_offset_, src1_stride_, mask_, h_, w_);
- ASM_REGISTER_STATE_CHECK(
- params_.tst_func(dst_tst_ + dst_offset_, dst_stride_,
- p_src0 + src0_offset_, src0_stride_,
- p_src1 + src1_offset_, src1_stride_, mask_, h_, w_));
+ params_.ref_func(dst_ref_ + dst_offset_, dst_stride_, p_src0 + src0_offset_,
+ src0_stride_, p_src1 + src1_offset_, src1_stride_, mask_,
+ h_, w_);
+ ASM_REGISTER_STATE_CHECK(params_.tst_func(
+ dst_tst_ + dst_offset_, dst_stride_, p_src0 + src0_offset_,
+ src0_stride_, p_src1 + src1_offset_, src1_stride_, mask_, h_, w_));
}
};
TEST_P(BlendA64Mask1DTest8B, RandomValues) {
- for (int iter = 0 ; iter < kIterations && !HasFatalFailure(); ++iter) {
- for (int i = 0 ; i < kBufSize ; ++i) {
+ for (int iter = 0; iter < kIterations && !HasFatalFailure(); ++iter) {
+ for (int i = 0; i < kBufSize; ++i) {
dst_ref_[i] = rng_.Rand8();
dst_tst_[i] = rng_.Rand8();
@@ -144,7 +141,7 @@
src1_[i] = rng_.Rand8();
}
- for (int i = 0 ; i < kMaxMaskSize ; ++i)
+ for (int i = 0; i < kMaxMaskSize; ++i)
mask_[i] = rng_(VPX_BLEND_A64_MAX_ALPHA + 1);
Common();
@@ -152,70 +149,62 @@
}
TEST_P(BlendA64Mask1DTest8B, ExtremeValues) {
- for (int iter = 0 ; iter < kIterations && !HasFatalFailure(); ++iter) {
- for (int i = 0 ; i < kBufSize ; ++i) {
+ for (int iter = 0; iter < kIterations && !HasFatalFailure(); ++iter) {
+ for (int i = 0; i < kBufSize; ++i) {
dst_ref_[i] = rng_(2) + 254;
dst_tst_[i] = rng_(2) + 254;
src0_[i] = rng_(2) + 254;
src1_[i] = rng_(2) + 254;
}
- for (int i = 0 ; i < kMaxMaskSize ; ++i)
+ for (int i = 0; i < kMaxMaskSize; ++i)
mask_[i] = rng_(2) + VPX_BLEND_A64_MAX_ALPHA - 1;
Common();
}
}
-static void blend_a64_hmask_ref(
- uint8_t *dst, uint32_t dst_stride,
- const uint8_t *src0, uint32_t src0_stride,
- const uint8_t *src1, uint32_t src1_stride,
- const uint8_t *mask, int h, int w) {
+static void blend_a64_hmask_ref(uint8_t *dst, uint32_t dst_stride,
+ const uint8_t *src0, uint32_t src0_stride,
+ const uint8_t *src1, uint32_t src1_stride,
+ const uint8_t *mask, int h, int w) {
uint8_t mask2d[BlendA64Mask1DTest8B::kMaxMaskSize]
[BlendA64Mask1DTest8B::kMaxMaskSize];
- for (int row = 0 ; row < h ; ++row)
- for (int col = 0 ; col < w ; ++col)
- mask2d[row][col] = mask[col];
+ for (int row = 0; row < h; ++row)
+ for (int col = 0; col < w; ++col) mask2d[row][col] = mask[col];
- vpx_blend_a64_mask_c(dst, dst_stride,
- src0, src0_stride,
- src1, src1_stride,
- &mask2d[0][0], BlendA64Mask1DTest8B::kMaxMaskSize,
- h, w, 0, 0);
+ vpx_blend_a64_mask_c(dst, dst_stride, src0, src0_stride, src1, src1_stride,
+ &mask2d[0][0], BlendA64Mask1DTest8B::kMaxMaskSize, h, w,
+ 0, 0);
}
-static void blend_a64_vmask_ref(
- uint8_t *dst, uint32_t dst_stride,
- const uint8_t *src0, uint32_t src0_stride,
- const uint8_t *src1, uint32_t src1_stride,
- const uint8_t *mask, int h, int w) {
+static void blend_a64_vmask_ref(uint8_t *dst, uint32_t dst_stride,
+ const uint8_t *src0, uint32_t src0_stride,
+ const uint8_t *src1, uint32_t src1_stride,
+ const uint8_t *mask, int h, int w) {
uint8_t mask2d[BlendA64Mask1DTest8B::kMaxMaskSize]
[BlendA64Mask1DTest8B::kMaxMaskSize];
- for (int row = 0 ; row < h ; ++row)
- for (int col = 0 ; col < w ; ++col)
- mask2d[row][col] = mask[row];
+ for (int row = 0; row < h; ++row)
+ for (int col = 0; col < w; ++col) mask2d[row][col] = mask[row];
- vpx_blend_a64_mask_c(dst, dst_stride,
- src0, src0_stride,
- src1, src1_stride,
- &mask2d[0][0], BlendA64Mask1DTest8B::kMaxMaskSize,
- h, w, 0, 0);
+ vpx_blend_a64_mask_c(dst, dst_stride, src0, src0_stride, src1, src1_stride,
+ &mask2d[0][0], BlendA64Mask1DTest8B::kMaxMaskSize, h, w,
+ 0, 0);
}
INSTANTIATE_TEST_CASE_P(
- C, BlendA64Mask1DTest8B,
- ::testing::Values(TestFuncs(blend_a64_hmask_ref, vpx_blend_a64_hmask_c),
- TestFuncs(blend_a64_vmask_ref, vpx_blend_a64_vmask_c)));
+ C, BlendA64Mask1DTest8B,
+ ::testing::Values(TestFuncs(blend_a64_hmask_ref, vpx_blend_a64_hmask_c),
+ TestFuncs(blend_a64_vmask_ref, vpx_blend_a64_vmask_c)));
#if HAVE_SSE4_1
INSTANTIATE_TEST_CASE_P(
- SSE4_1, BlendA64Mask1DTest8B,
- ::testing::Values(
- TestFuncs(blend_a64_hmask_ref, vpx_blend_a64_hmask_sse4_1),
- TestFuncs(blend_a64_vmask_ref, vpx_blend_a64_vmask_sse4_1)));
+ SSE4_1, BlendA64Mask1DTest8B,
+ ::testing::Values(
+ TestFuncs(blend_a64_hmask_ref, vpx_blend_a64_hmask_sse4_1),
+ TestFuncs(blend_a64_vmask_ref, vpx_blend_a64_vmask_sse4_1)));
#endif // HAVE_SSE4_1
#if CONFIG_VP9_HIGHBITDEPTH
@@ -223,10 +212,10 @@
// High bit-depth version
//////////////////////////////////////////////////////////////////////////////
-typedef void (*FHBD)(uint8_t *dst, uint32_t dst_stride,
- const uint8_t *src0, uint32_t src0_stride,
- const uint8_t *src1, uint32_t src1_stride,
- const uint8_t *mask, int h, int w, int bd);
+typedef void (*FHBD)(uint8_t *dst, uint32_t dst_stride, const uint8_t *src0,
+ uint32_t src0_stride, const uint8_t *src1,
+ uint32_t src1_stride, const uint8_t *mask, int h, int w,
+ int bd);
typedef libvpx_test::FuncParam<FHBD> TestFuncsHBD;
class BlendA64Mask1DTestHBD : public BlendA64Mask1DTest<FHBD, uint16_t> {
@@ -239,37 +228,31 @@
ASM_REGISTER_STATE_CHECK(params_.tst_func(
CONVERT_TO_BYTEPTR(dst_tst_ + dst_offset_), dst_stride_,
CONVERT_TO_BYTEPTR(p_src0 + src0_offset_), src0_stride_,
- CONVERT_TO_BYTEPTR(p_src1 + src1_offset_), src1_stride_,
- mask_, h_, w_, bit_depth_));
+ CONVERT_TO_BYTEPTR(p_src1 + src1_offset_), src1_stride_, mask_, h_, w_,
+ bit_depth_));
}
int bit_depth_;
};
TEST_P(BlendA64Mask1DTestHBD, RandomValues) {
- for (int iter = 0 ; iter < kIterations && !HasFatalFailure(); ++iter) {
+ for (int iter = 0; iter < kIterations && !HasFatalFailure(); ++iter) {
switch (rng_(3)) {
- case 0:
- bit_depth_ = 8;
- break;
- case 1:
- bit_depth_ = 10;
- break;
- default:
- bit_depth_ = 12;
- break;
+ case 0: bit_depth_ = 8; break;
+ case 1: bit_depth_ = 10; break;
+ default: bit_depth_ = 12; break;
}
const int hi = 1 << bit_depth_;
- for (int i = 0 ; i < kBufSize ; ++i) {
+ for (int i = 0; i < kBufSize; ++i) {
dst_ref_[i] = rng_(hi);
dst_tst_[i] = rng_(hi);
src0_[i] = rng_(hi);
src1_[i] = rng_(hi);
}
- for (int i = 0 ; i < kMaxMaskSize ; ++i)
+ for (int i = 0; i < kMaxMaskSize; ++i)
mask_[i] = rng_(VPX_BLEND_A64_MAX_ALPHA + 1);
Common();
@@ -277,30 +260,24 @@
}
TEST_P(BlendA64Mask1DTestHBD, ExtremeValues) {
- for (int iter = 0 ; iter < 1000 && !HasFatalFailure(); ++iter) {
+ for (int iter = 0; iter < 1000 && !HasFatalFailure(); ++iter) {
switch (rng_(3)) {
- case 0:
- bit_depth_ = 8;
- break;
- case 1:
- bit_depth_ = 10;
- break;
- default:
- bit_depth_ = 12;
- break;
+ case 0: bit_depth_ = 8; break;
+ case 1: bit_depth_ = 10; break;
+ default: bit_depth_ = 12; break;
}
const int hi = 1 << bit_depth_;
const int lo = hi - 2;
- for (int i = 0 ; i < kBufSize ; ++i) {
+ for (int i = 0; i < kBufSize; ++i) {
dst_ref_[i] = rng_(hi - lo) + lo;
dst_tst_[i] = rng_(hi - lo) + lo;
src0_[i] = rng_(hi - lo) + lo;
src1_[i] = rng_(hi - lo) + lo;
}
- for (int i = 0 ; i < kMaxMaskSize ; ++i)
+ for (int i = 0; i < kMaxMaskSize; ++i)
mask_[i] = rng_(2) + VPX_BLEND_A64_MAX_ALPHA - 1;
Common();
@@ -308,59 +285,49 @@
}
static void highbd_blend_a64_hmask_ref(
- uint8_t *dst, uint32_t dst_stride,
- const uint8_t *src0, uint32_t src0_stride,
- const uint8_t *src1, uint32_t src1_stride,
+ uint8_t *dst, uint32_t dst_stride, const uint8_t *src0,
+ uint32_t src0_stride, const uint8_t *src1, uint32_t src1_stride,
const uint8_t *mask, int h, int w, int bd) {
uint8_t mask2d[BlendA64Mask1DTestHBD::kMaxMaskSize]
[BlendA64Mask1DTestHBD::kMaxMaskSize];
- for (int row = 0 ; row < h ; ++row)
- for (int col = 0 ; col < w ; ++col)
- mask2d[row][col] = mask[col];
+ for (int row = 0; row < h; ++row)
+ for (int col = 0; col < w; ++col) mask2d[row][col] = mask[col];
- vpx_highbd_blend_a64_mask_c(dst, dst_stride,
- src0, src0_stride,
- src1, src1_stride,
- &mask2d[0][0],
- BlendA64Mask1DTestHBD::kMaxMaskSize,
- h, w, 0, 0, bd);
+ vpx_highbd_blend_a64_mask_c(
+ dst, dst_stride, src0, src0_stride, src1, src1_stride, &mask2d[0][0],
+ BlendA64Mask1DTestHBD::kMaxMaskSize, h, w, 0, 0, bd);
}
static void highbd_blend_a64_vmask_ref(
- uint8_t *dst, uint32_t dst_stride,
- const uint8_t *src0, uint32_t src0_stride,
- const uint8_t *src1, uint32_t src1_stride,
+ uint8_t *dst, uint32_t dst_stride, const uint8_t *src0,
+ uint32_t src0_stride, const uint8_t *src1, uint32_t src1_stride,
const uint8_t *mask, int h, int w, int bd) {
uint8_t mask2d[BlendA64Mask1DTestHBD::kMaxMaskSize]
[BlendA64Mask1DTestHBD::kMaxMaskSize];
- for (int row = 0 ; row < h ; ++row)
- for (int col = 0 ; col < w ; ++col)
- mask2d[row][col] = mask[row];
+ for (int row = 0; row < h; ++row)
+ for (int col = 0; col < w; ++col) mask2d[row][col] = mask[row];
- vpx_highbd_blend_a64_mask_c(dst, dst_stride,
- src0, src0_stride,
- src1, src1_stride,
- &mask2d[0][0],
- BlendA64Mask1DTestHBD::kMaxMaskSize,
- h, w, 0, 0, bd);
+ vpx_highbd_blend_a64_mask_c(
+ dst, dst_stride, src0, src0_stride, src1, src1_stride, &mask2d[0][0],
+ BlendA64Mask1DTestHBD::kMaxMaskSize, h, w, 0, 0, bd);
}
INSTANTIATE_TEST_CASE_P(
- C, BlendA64Mask1DTestHBD,
- ::testing::Values(
- TestFuncsHBD(highbd_blend_a64_hmask_ref, vpx_highbd_blend_a64_hmask_c),
- TestFuncsHBD(highbd_blend_a64_vmask_ref, vpx_highbd_blend_a64_vmask_c)));
+ C, BlendA64Mask1DTestHBD,
+ ::testing::Values(TestFuncsHBD(highbd_blend_a64_hmask_ref,
+ vpx_highbd_blend_a64_hmask_c),
+ TestFuncsHBD(highbd_blend_a64_vmask_ref,
+ vpx_highbd_blend_a64_vmask_c)));
#if HAVE_SSE4_1
INSTANTIATE_TEST_CASE_P(
- SSE4_1, BlendA64Mask1DTestHBD,
- ::testing::Values(
- TestFuncsHBD(highbd_blend_a64_hmask_ref,
- vpx_highbd_blend_a64_hmask_sse4_1),
- TestFuncsHBD(highbd_blend_a64_vmask_ref,
- vpx_highbd_blend_a64_vmask_sse4_1)));
+ SSE4_1, BlendA64Mask1DTestHBD,
+ ::testing::Values(TestFuncsHBD(highbd_blend_a64_hmask_ref,
+ vpx_highbd_blend_a64_hmask_sse4_1),
+ TestFuncsHBD(highbd_blend_a64_vmask_ref,
+ vpx_highbd_blend_a64_vmask_sse4_1)));
#endif // HAVE_SSE4_1
#endif // CONFIG_VP9_HIGHBITDEPTH
diff --git a/test/blend_a64_mask_test.cc b/test/blend_a64_mask_test.cc
index f6c09bb..ece3bc8 100644
--- a/test/blend_a64_mask_test.cc
+++ b/test/blend_a64_mask_test.cc
@@ -31,7 +31,7 @@
namespace {
-template<typename F, typename T>
+template <typename F, typename T>
class BlendA64MaskTest : public FunctionEquivalenceTest<F> {
protected:
static const int kIterations = 10000;
@@ -61,37 +61,36 @@
src1_offset_ = this->rng_(33);
src1_stride_ = this->rng_(kMaxWidth + 1 - w_) + w_;
- mask_stride_ = this->rng_(kMaxWidth + 1 - w_ * (subx_ ? 2 : 1)) +
- w_ * (subx_ ? 2 : 1);
+ mask_stride_ =
+ this->rng_(kMaxWidth + 1 - w_ * (subx_ ? 2 : 1)) + w_ * (subx_ ? 2 : 1);
T *p_src0;
T *p_src1;
switch (this->rng_(3)) {
- case 0: // Separate sources
+ case 0: // Separate sources
p_src0 = src0_;
p_src1 = src1_;
break;
- case 1: // src0 == dst
+ case 1: // src0 == dst
p_src0 = dst_tst_;
src0_stride_ = dst_stride_;
src0_offset_ = dst_offset_;
p_src1 = src1_;
break;
- case 2: // src1 == dst
+ case 2: // src1 == dst
p_src0 = src0_;
p_src1 = dst_tst_;
src1_stride_ = dst_stride_;
src1_offset_ = dst_offset_;
break;
- default:
- FAIL();
+ default: FAIL();
}
Execute(p_src0, p_src1);
- for (int r = 0 ; r < h_ ; ++r) {
- for (int c = 0 ; c < w_ ; ++c) {
+ for (int r = 0; r < h_; ++r) {
+ for (int c = 0; c < w_; ++c) {
ASSERT_EQ(dst_ref_[dst_offset_ + r * dst_stride_ + c],
dst_tst_[dst_offset_ + r * dst_stride_ + c]);
}
@@ -125,31 +124,28 @@
// 8 bit version
//////////////////////////////////////////////////////////////////////////////
-typedef void (*F8B)(uint8_t *dst, uint32_t dst_stride,
- const uint8_t *src0, uint32_t src0_stride,
- const uint8_t *src1, uint32_t src1_stride,
- const uint8_t *mask, uint32_t mask_stride,
- int h, int w, int suby, int subx);
+typedef void (*F8B)(uint8_t *dst, uint32_t dst_stride, const uint8_t *src0,
+ uint32_t src0_stride, const uint8_t *src1,
+ uint32_t src1_stride, const uint8_t *mask,
+ uint32_t mask_stride, int h, int w, int suby, int subx);
typedef libvpx_test::FuncParam<F8B> TestFuncs;
class BlendA64MaskTest8B : public BlendA64MaskTest<F8B, uint8_t> {
protected:
void Execute(const uint8_t *p_src0, const uint8_t *p_src1) {
- params_.ref_func(dst_ref_ + dst_offset_, dst_stride_,
- p_src0 + src0_offset_, src0_stride_,
- p_src1 + src1_offset_, src1_stride_,
- mask_, kMaxMaskWidth, h_, w_, suby_, subx_);
- ASM_REGISTER_STATE_CHECK(
- params_.tst_func(dst_tst_ + dst_offset_, dst_stride_,
- p_src0 + src0_offset_, src0_stride_,
- p_src1 + src1_offset_, src1_stride_,
- mask_, kMaxMaskWidth, h_, w_, suby_, subx_));
+ params_.ref_func(dst_ref_ + dst_offset_, dst_stride_, p_src0 + src0_offset_,
+ src0_stride_, p_src1 + src1_offset_, src1_stride_, mask_,
+ kMaxMaskWidth, h_, w_, suby_, subx_);
+ ASM_REGISTER_STATE_CHECK(params_.tst_func(
+ dst_tst_ + dst_offset_, dst_stride_, p_src0 + src0_offset_,
+ src0_stride_, p_src1 + src1_offset_, src1_stride_, mask_, kMaxMaskWidth,
+ h_, w_, suby_, subx_));
}
};
TEST_P(BlendA64MaskTest8B, RandomValues) {
- for (int iter = 0 ; iter < kIterations && !HasFatalFailure(); ++iter) {
- for (int i = 0 ; i < kBufSize ; ++i) {
+ for (int iter = 0; iter < kIterations && !HasFatalFailure(); ++iter) {
+ for (int i = 0; i < kBufSize; ++i) {
dst_ref_[i] = rng_.Rand8();
dst_tst_[i] = rng_.Rand8();
@@ -157,7 +153,7 @@
src1_[i] = rng_.Rand8();
}
- for (int i = 0 ; i < kMaxMaskSize ; ++i)
+ for (int i = 0; i < kMaxMaskSize; ++i)
mask_[i] = rng_(VPX_BLEND_A64_MAX_ALPHA + 1);
Common();
@@ -165,15 +161,15 @@
}
TEST_P(BlendA64MaskTest8B, ExtremeValues) {
- for (int iter = 0 ; iter < kIterations && !HasFatalFailure(); ++iter) {
- for (int i = 0 ; i < kBufSize ; ++i) {
+ for (int iter = 0; iter < kIterations && !HasFatalFailure(); ++iter) {
+ for (int i = 0; i < kBufSize; ++i) {
dst_ref_[i] = rng_(2) + 254;
dst_tst_[i] = rng_(2) + 254;
src0_[i] = rng_(2) + 254;
src1_[i] = rng_(2) + 254;
}
- for (int i = 0 ; i < kMaxMaskSize ; ++i)
+ for (int i = 0; i < kMaxMaskSize; ++i)
mask_[i] = rng_(2) + VPX_BLEND_A64_MAX_ALPHA - 1;
Common();
@@ -181,10 +177,9 @@
}
#if HAVE_SSE4_1
-INSTANTIATE_TEST_CASE_P(
- SSE4_1_C_COMPARE, BlendA64MaskTest8B,
- ::testing::Values(
- TestFuncs(vpx_blend_a64_mask_c, vpx_blend_a64_mask_sse4_1)));
+INSTANTIATE_TEST_CASE_P(SSE4_1_C_COMPARE, BlendA64MaskTest8B,
+ ::testing::Values(TestFuncs(
+ vpx_blend_a64_mask_c, vpx_blend_a64_mask_sse4_1)));
#endif // HAVE_SSE4_1
#if CONFIG_VP9_HIGHBITDEPTH
@@ -192,11 +187,11 @@
// High bit-depth version
//////////////////////////////////////////////////////////////////////////////
-typedef void (*FHBD)(uint8_t *dst, uint32_t dst_stride,
- const uint8_t *src0, uint32_t src0_stride,
- const uint8_t *src1, uint32_t src1_stride,
- const uint8_t *mask, uint32_t mask_stride,
- int h, int w, int suby, int subx, int bd);
+typedef void (*FHBD)(uint8_t *dst, uint32_t dst_stride, const uint8_t *src0,
+ uint32_t src0_stride, const uint8_t *src1,
+ uint32_t src1_stride, const uint8_t *mask,
+ uint32_t mask_stride, int h, int w, int suby, int subx,
+ int bd);
typedef libvpx_test::FuncParam<FHBD> TestFuncsHBD;
class BlendA64MaskTestHBD : public BlendA64MaskTest<FHBD, uint16_t> {
@@ -209,37 +204,31 @@
ASM_REGISTER_STATE_CHECK(params_.tst_func(
CONVERT_TO_BYTEPTR(dst_tst_ + dst_offset_), dst_stride_,
CONVERT_TO_BYTEPTR(p_src0 + src0_offset_), src0_stride_,
- CONVERT_TO_BYTEPTR(p_src1 + src1_offset_), src1_stride_,
- mask_, kMaxMaskWidth, h_, w_, suby_, subx_, bit_depth_));
+ CONVERT_TO_BYTEPTR(p_src1 + src1_offset_), src1_stride_, mask_,
+ kMaxMaskWidth, h_, w_, suby_, subx_, bit_depth_));
}
int bit_depth_;
};
TEST_P(BlendA64MaskTestHBD, RandomValues) {
- for (int iter = 0 ; iter < kIterations && !HasFatalFailure(); ++iter) {
+ for (int iter = 0; iter < kIterations && !HasFatalFailure(); ++iter) {
switch (rng_(3)) {
- case 0:
- bit_depth_ = 8;
- break;
- case 1:
- bit_depth_ = 10;
- break;
- default:
- bit_depth_ = 12;
- break;
+ case 0: bit_depth_ = 8; break;
+ case 1: bit_depth_ = 10; break;
+ default: bit_depth_ = 12; break;
}
const int hi = 1 << bit_depth_;
- for (int i = 0 ; i < kBufSize ; ++i) {
+ for (int i = 0; i < kBufSize; ++i) {
dst_ref_[i] = rng_(hi);
dst_tst_[i] = rng_(hi);
src0_[i] = rng_(hi);
src1_[i] = rng_(hi);
}
- for (int i = 0 ; i < kMaxMaskSize ; ++i)
+ for (int i = 0; i < kMaxMaskSize; ++i)
mask_[i] = rng_(VPX_BLEND_A64_MAX_ALPHA + 1);
Common();
@@ -247,30 +236,24 @@
}
TEST_P(BlendA64MaskTestHBD, ExtremeValues) {
- for (int iter = 0 ; iter < 1000 && !HasFatalFailure(); ++iter) {
+ for (int iter = 0; iter < 1000 && !HasFatalFailure(); ++iter) {
switch (rng_(3)) {
- case 0:
- bit_depth_ = 8;
- break;
- case 1:
- bit_depth_ = 10;
- break;
- default:
- bit_depth_ = 12;
- break;
+ case 0: bit_depth_ = 8; break;
+ case 1: bit_depth_ = 10; break;
+ default: bit_depth_ = 12; break;
}
const int hi = 1 << bit_depth_;
const int lo = hi - 2;
- for (int i = 0 ; i < kBufSize ; ++i) {
+ for (int i = 0; i < kBufSize; ++i) {
dst_ref_[i] = rng_(hi - lo) + lo;
dst_tst_[i] = rng_(hi - lo) + lo;
src0_[i] = rng_(hi - lo) + lo;
src1_[i] = rng_(hi - lo) + lo;
}
- for (int i = 0 ; i < kMaxMaskSize ; ++i)
+ for (int i = 0; i < kMaxMaskSize; ++i)
mask_[i] = rng_(2) + VPX_BLEND_A64_MAX_ALPHA - 1;
Common();
@@ -279,10 +262,9 @@
#if HAVE_SSE4_1
INSTANTIATE_TEST_CASE_P(
- SSE4_1_C_COMPARE, BlendA64MaskTestHBD,
- ::testing::Values(
- TestFuncsHBD(vpx_highbd_blend_a64_mask_c,
- vpx_highbd_blend_a64_mask_sse4_1)));
+ SSE4_1_C_COMPARE, BlendA64MaskTestHBD,
+ ::testing::Values(TestFuncsHBD(vpx_highbd_blend_a64_mask_c,
+ vpx_highbd_blend_a64_mask_sse4_1)));
#endif // HAVE_SSE4_1
#endif // CONFIG_VP9_HIGHBITDEPTH
} // namespace
diff --git a/test/boolcoder_test.cc b/test/boolcoder_test.cc
index c61bb4a..5dbfd5c 100644
--- a/test/boolcoder_test.cc
+++ b/test/boolcoder_test.cc
@@ -28,12 +28,13 @@
TEST(VP9, TestBitIO) {
ACMRandom rnd(ACMRandom::DeterministicSeed());
for (int n = 0; n < num_tests; ++n) {
- for (int method = 0; method <= 7; ++method) { // we generate various proba
+ for (int method = 0; method <= 7; ++method) { // we generate various proba
const int kBitsToTest = 1000;
uint8_t probas[kBitsToTest];
for (int i = 0; i < kBitsToTest; ++i) {
const int parity = i & 1;
+ /* clang-format off */
probas[i] =
(method == 0) ? 0 : (method == 1) ? 255 :
(method == 2) ? 128 :
@@ -44,6 +45,7 @@
(method == 6) ?
(parity ? rnd(64) : 255 - rnd(64)) :
(parity ? rnd(32) : 255 - rnd(32));
+ /* clang-format on */
}
for (int bit_method = 0; bit_method <= 3; ++bit_method) {
const int random_seed = 6432;
@@ -79,8 +81,7 @@
}
GTEST_ASSERT_EQ(vpx_read(&br, probas[i]), bit)
<< "pos: " << i << " / " << kBitsToTest
- << " bit_method: " << bit_method
- << " method: " << method;
+ << " bit_method: " << bit_method << " method: " << method;
}
}
}
diff --git a/test/borders_test.cc b/test/borders_test.cc
index 088b8f2..9057631 100644
--- a/test/borders_test.cc
+++ b/test/borders_test.cc
@@ -17,8 +17,9 @@
namespace {
-class BordersTest : public ::libvpx_test::EncoderTest,
- public ::libvpx_test::CodecTestWithParam<libvpx_test::TestMode> {
+class BordersTest
+ : public ::libvpx_test::EncoderTest,
+ public ::libvpx_test::CodecTestWithParam<libvpx_test::TestMode> {
protected:
BordersTest() : EncoderTest(GET_PARAM(0)) {}
virtual ~BordersTest() {}
@@ -78,6 +79,6 @@
ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
}
-VP10_INSTANTIATE_TEST_CASE(BordersTest, ::testing::Values(
- ::libvpx_test::kTwoPassGood));
+VP10_INSTANTIATE_TEST_CASE(BordersTest,
+ ::testing::Values(::libvpx_test::kTwoPassGood));
} // namespace
diff --git a/test/clear_system_state.h b/test/clear_system_state.h
index 5e76797..044a5c7 100644
--- a/test/clear_system_state.h
+++ b/test/clear_system_state.h
@@ -12,7 +12,7 @@
#include "./vpx_config.h"
#if ARCH_X86 || ARCH_X86_64
-# include "vpx_ports/x86.h"
+#include "vpx_ports/x86.h"
#endif
namespace libvpx_test {
diff --git a/test/codec_factory.h b/test/codec_factory.h
index e16dbbe..6a8d0c8 100644
--- a/test/codec_factory.h
+++ b/test/codec_factory.h
@@ -32,15 +32,15 @@
virtual ~CodecFactory() {}
- virtual Decoder* CreateDecoder(vpx_codec_dec_cfg_t cfg,
+ virtual Decoder *CreateDecoder(vpx_codec_dec_cfg_t cfg,
unsigned long deadline) const = 0;
- virtual Decoder* CreateDecoder(vpx_codec_dec_cfg_t cfg,
+ virtual Decoder *CreateDecoder(vpx_codec_dec_cfg_t cfg,
const vpx_codec_flags_t flags,
unsigned long deadline) // NOLINT(runtime/int)
- const = 0;
+ const = 0;
- virtual Encoder* CreateEncoder(vpx_codec_enc_cfg_t cfg,
+ virtual Encoder *CreateEncoder(vpx_codec_enc_cfg_t cfg,
unsigned long deadline,
const unsigned long init_flags,
TwopassStatsStore *stats) const = 0;
@@ -53,20 +53,20 @@
* to avoid having to include a pointer to the CodecFactory in every test
* definition.
*/
-template<class T1>
-class CodecTestWithParam : public ::testing::TestWithParam<
- std::tr1::tuple< const libvpx_test::CodecFactory*, T1 > > {
-};
+template <class T1>
+class CodecTestWithParam
+ : public ::testing::TestWithParam<
+ std::tr1::tuple<const libvpx_test::CodecFactory *, T1> > {};
-template<class T1, class T2>
-class CodecTestWith2Params : public ::testing::TestWithParam<
- std::tr1::tuple< const libvpx_test::CodecFactory*, T1, T2 > > {
-};
+template <class T1, class T2>
+class CodecTestWith2Params
+ : public ::testing::TestWithParam<
+ std::tr1::tuple<const libvpx_test::CodecFactory *, T1, T2> > {};
-template<class T1, class T2, class T3>
-class CodecTestWith3Params : public ::testing::TestWithParam<
- std::tr1::tuple< const libvpx_test::CodecFactory*, T1, T2, T3 > > {
-};
+template <class T1, class T2, class T3>
+class CodecTestWith3Params
+ : public ::testing::TestWithParam<
+ std::tr1::tuple<const libvpx_test::CodecFactory *, T1, T2, T3> > {};
/*
* VP10 Codec Definitions
@@ -82,7 +82,7 @@
: Decoder(cfg, flag, deadline) {}
protected:
- virtual vpx_codec_iface_t* CodecInterface() const {
+ virtual vpx_codec_iface_t *CodecInterface() const {
#if CONFIG_VP10_DECODER
return &vpx_codec_vp10_dx_algo;
#else
@@ -98,7 +98,7 @@
: Encoder(cfg, deadline, init_flags, stats) {}
protected:
- virtual vpx_codec_iface_t* CodecInterface() const {
+ virtual vpx_codec_iface_t *CodecInterface() const {
#if CONFIG_VP10_ENCODER
return &vpx_codec_vp10_cx_algo;
#else
@@ -111,12 +111,12 @@
public:
VP10CodecFactory() : CodecFactory() {}
- virtual Decoder* CreateDecoder(vpx_codec_dec_cfg_t cfg,
+ virtual Decoder *CreateDecoder(vpx_codec_dec_cfg_t cfg,
unsigned long deadline) const {
return CreateDecoder(cfg, 0, deadline);
}
- virtual Decoder* CreateDecoder(vpx_codec_dec_cfg_t cfg,
+ virtual Decoder *CreateDecoder(vpx_codec_dec_cfg_t cfg,
const vpx_codec_flags_t flags,
unsigned long deadline) const { // NOLINT
#if CONFIG_VP10_DECODER
@@ -126,7 +126,7 @@
#endif
}
- virtual Encoder* CreateEncoder(vpx_codec_enc_cfg_t cfg,
+ virtual Encoder *CreateEncoder(vpx_codec_enc_cfg_t cfg,
unsigned long deadline,
const unsigned long init_flags,
TwopassStatsStore *stats) const {
@@ -149,11 +149,12 @@
const libvpx_test::VP10CodecFactory kVP10;
-#define VP10_INSTANTIATE_TEST_CASE(test, ...)\
- INSTANTIATE_TEST_CASE_P(VP10, test, \
- ::testing::Combine( \
- ::testing::Values(static_cast<const libvpx_test::CodecFactory*>( \
- &libvpx_test::kVP10)), \
+#define VP10_INSTANTIATE_TEST_CASE(test, ...) \
+ INSTANTIATE_TEST_CASE_P( \
+ VP10, test, \
+ ::testing::Combine( \
+ ::testing::Values(static_cast<const libvpx_test::CodecFactory *>( \
+ &libvpx_test::kVP10)), \
__VA_ARGS__))
#else
#define VP10_INSTANTIATE_TEST_CASE(test, ...)
diff --git a/test/convolve_test.cc b/test/convolve_test.cc
index f9bea21..e3e75d1 100644
--- a/test/convolve_test.cc
+++ b/test/convolve_test.cc
@@ -34,14 +34,12 @@
int w, int h);
struct ConvolveFunctions {
- ConvolveFunctions(ConvolveFunc copy, ConvolveFunc avg,
- ConvolveFunc h8, ConvolveFunc h8_avg,
- ConvolveFunc v8, ConvolveFunc v8_avg,
- ConvolveFunc hv8, ConvolveFunc hv8_avg,
- ConvolveFunc sh8, ConvolveFunc sh8_avg,
- ConvolveFunc sv8, ConvolveFunc sv8_avg,
- ConvolveFunc shv8, ConvolveFunc shv8_avg,
- int bd)
+ ConvolveFunctions(ConvolveFunc copy, ConvolveFunc avg, ConvolveFunc h8,
+ ConvolveFunc h8_avg, ConvolveFunc v8, ConvolveFunc v8_avg,
+ ConvolveFunc hv8, ConvolveFunc hv8_avg, ConvolveFunc sh8,
+ ConvolveFunc sh8_avg, ConvolveFunc sv8,
+ ConvolveFunc sv8_avg, ConvolveFunc shv8,
+ ConvolveFunc shv8_avg, int bd)
: copy_(copy), avg_(avg), h8_(h8), v8_(v8), hv8_(hv8), h8_avg_(h8_avg),
v8_avg_(v8_avg), hv8_avg_(hv8_avg), sh8_(sh8), sv8_(sv8), shv8_(shv8),
sh8_avg_(sh8_avg), sv8_avg_(sv8_avg), shv8_avg_(shv8_avg),
@@ -55,69 +53,47 @@
ConvolveFunc h8_avg_;
ConvolveFunc v8_avg_;
ConvolveFunc hv8_avg_;
- ConvolveFunc sh8_; // scaled horiz
- ConvolveFunc sv8_; // scaled vert
- ConvolveFunc shv8_; // scaled horiz/vert
- ConvolveFunc sh8_avg_; // scaled avg horiz
- ConvolveFunc sv8_avg_; // scaled avg vert
- ConvolveFunc shv8_avg_; // scaled avg horiz/vert
+ ConvolveFunc sh8_; // scaled horiz
+ ConvolveFunc sv8_; // scaled vert
+ ConvolveFunc shv8_; // scaled horiz/vert
+ ConvolveFunc sh8_avg_; // scaled avg horiz
+ ConvolveFunc sv8_avg_; // scaled avg vert
+ ConvolveFunc shv8_avg_; // scaled avg horiz/vert
int use_highbd_; // 0 if high bitdepth not used, else the actual bit depth.
};
typedef std::tr1::tuple<int, int, const ConvolveFunctions *> ConvolveParam;
#if CONFIG_VP10 && CONFIG_EXT_PARTITION
-#define ALL_SIZES(convolve_fn) \
- make_tuple(128, 64, &convolve_fn), \
- make_tuple(64, 128, &convolve_fn), \
- make_tuple(128, 128, &convolve_fn), \
- make_tuple(4, 4, &convolve_fn), \
- make_tuple(8, 4, &convolve_fn), \
- make_tuple(4, 8, &convolve_fn), \
- make_tuple(8, 8, &convolve_fn), \
- make_tuple(16, 8, &convolve_fn), \
- make_tuple(8, 16, &convolve_fn), \
- make_tuple(16, 16, &convolve_fn), \
- make_tuple(32, 16, &convolve_fn), \
- make_tuple(16, 32, &convolve_fn), \
- make_tuple(32, 32, &convolve_fn), \
- make_tuple(64, 32, &convolve_fn), \
- make_tuple(32, 64, &convolve_fn), \
- make_tuple(64, 64, &convolve_fn)
+#define ALL_SIZES(convolve_fn) \
+ make_tuple(128, 64, &convolve_fn), make_tuple(64, 128, &convolve_fn), \
+ make_tuple(128, 128, &convolve_fn), make_tuple(4, 4, &convolve_fn), \
+ make_tuple(8, 4, &convolve_fn), make_tuple(4, 8, &convolve_fn), \
+ make_tuple(8, 8, &convolve_fn), make_tuple(16, 8, &convolve_fn), \
+ make_tuple(8, 16, &convolve_fn), make_tuple(16, 16, &convolve_fn), \
+ make_tuple(32, 16, &convolve_fn), make_tuple(16, 32, &convolve_fn), \
+ make_tuple(32, 32, &convolve_fn), make_tuple(64, 32, &convolve_fn), \
+ make_tuple(32, 64, &convolve_fn), make_tuple(64, 64, &convolve_fn)
#else
-#define ALL_SIZES(convolve_fn) \
- make_tuple(4, 4, &convolve_fn), \
- make_tuple(8, 4, &convolve_fn), \
- make_tuple(4, 8, &convolve_fn), \
- make_tuple(8, 8, &convolve_fn), \
- make_tuple(16, 8, &convolve_fn), \
- make_tuple(8, 16, &convolve_fn), \
- make_tuple(16, 16, &convolve_fn), \
- make_tuple(32, 16, &convolve_fn), \
- make_tuple(16, 32, &convolve_fn), \
- make_tuple(32, 32, &convolve_fn), \
- make_tuple(64, 32, &convolve_fn), \
- make_tuple(32, 64, &convolve_fn), \
- make_tuple(64, 64, &convolve_fn)
+#define ALL_SIZES(convolve_fn) \
+ make_tuple(4, 4, &convolve_fn), make_tuple(8, 4, &convolve_fn), \
+ make_tuple(4, 8, &convolve_fn), make_tuple(8, 8, &convolve_fn), \
+ make_tuple(16, 8, &convolve_fn), make_tuple(8, 16, &convolve_fn), \
+ make_tuple(16, 16, &convolve_fn), make_tuple(32, 16, &convolve_fn), \
+ make_tuple(16, 32, &convolve_fn), make_tuple(32, 32, &convolve_fn), \
+ make_tuple(64, 32, &convolve_fn), make_tuple(32, 64, &convolve_fn), \
+ make_tuple(64, 64, &convolve_fn)
#endif // CONFIG_VP10 && CONFIG_EXT_PARTITION
// Reference 8-tap subpixel filter, slightly modified to fit into this test.
#define VP9_FILTER_WEIGHT 128
#define VP9_FILTER_SHIFT 7
-uint8_t clip_pixel(int x) {
- return x < 0 ? 0 :
- x > 255 ? 255 :
- x;
-}
+uint8_t clip_pixel(int x) { return x < 0 ? 0 : x > 255 ? 255 : x; }
-void filter_block2d_8_c(const uint8_t *src_ptr,
- const unsigned int src_stride,
- const int16_t *HFilter,
- const int16_t *VFilter,
- uint8_t *dst_ptr,
- unsigned int dst_stride,
- unsigned int output_width,
- unsigned int output_height) {
+void filter_block2d_8_c(const uint8_t *src_ptr, const unsigned int src_stride,
+ const int16_t *HFilter, const int16_t *VFilter,
+ uint8_t *dst_ptr, unsigned int dst_stride,
+ unsigned int output_width, unsigned int output_height) {
// Between passes, we use an intermediate buffer whose height is extended to
// have enough horizontally filtered values as input for the vertical pass.
// This buffer is allocated to be big enough for the largest block type we
@@ -134,7 +110,7 @@
// = 23
// and filter_max_width = 16
//
- uint8_t intermediate_buffer[(kMaxDimension+8) * kMaxDimension];
+ uint8_t intermediate_buffer[(kMaxDimension + 8) * kMaxDimension];
const int intermediate_next_stride =
1 - static_cast<int>(intermediate_height * output_width);
@@ -145,15 +121,11 @@
for (i = 0; i < intermediate_height; ++i) {
for (j = 0; j < output_width; ++j) {
// Apply filter...
- const int temp = (src_ptr[0] * HFilter[0]) +
- (src_ptr[1] * HFilter[1]) +
- (src_ptr[2] * HFilter[2]) +
- (src_ptr[3] * HFilter[3]) +
- (src_ptr[4] * HFilter[4]) +
- (src_ptr[5] * HFilter[5]) +
- (src_ptr[6] * HFilter[6]) +
- (src_ptr[7] * HFilter[7]) +
- (VP9_FILTER_WEIGHT >> 1); // Rounding
+ const int temp = (src_ptr[0] * HFilter[0]) + (src_ptr[1] * HFilter[1]) +
+ (src_ptr[2] * HFilter[2]) + (src_ptr[3] * HFilter[3]) +
+ (src_ptr[4] * HFilter[4]) + (src_ptr[5] * HFilter[5]) +
+ (src_ptr[6] * HFilter[6]) + (src_ptr[7] * HFilter[7]) +
+ (VP9_FILTER_WEIGHT >> 1); // Rounding
// Normalize back to 0-255...
*output_ptr = clip_pixel(temp >> VP9_FILTER_SHIFT);
@@ -170,15 +142,11 @@
for (i = 0; i < output_height; ++i) {
for (j = 0; j < output_width; ++j) {
// Apply filter...
- const int temp = (src_ptr[0] * VFilter[0]) +
- (src_ptr[1] * VFilter[1]) +
- (src_ptr[2] * VFilter[2]) +
- (src_ptr[3] * VFilter[3]) +
- (src_ptr[4] * VFilter[4]) +
- (src_ptr[5] * VFilter[5]) +
- (src_ptr[6] * VFilter[6]) +
- (src_ptr[7] * VFilter[7]) +
- (VP9_FILTER_WEIGHT >> 1); // Rounding
+ const int temp = (src_ptr[0] * VFilter[0]) + (src_ptr[1] * VFilter[1]) +
+ (src_ptr[2] * VFilter[2]) + (src_ptr[3] * VFilter[3]) +
+ (src_ptr[4] * VFilter[4]) + (src_ptr[5] * VFilter[5]) +
+ (src_ptr[6] * VFilter[6]) + (src_ptr[7] * VFilter[7]) +
+ (VP9_FILTER_WEIGHT >> 1); // Rounding
// Normalize back to 0-255...
*dst_ptr++ = clip_pixel(temp >> VP9_FILTER_SHIFT);
@@ -189,12 +157,9 @@
}
}
-void block2d_average_c(uint8_t *src,
- unsigned int src_stride,
- uint8_t *output_ptr,
- unsigned int output_stride,
- unsigned int output_width,
- unsigned int output_height) {
+void block2d_average_c(uint8_t *src, unsigned int src_stride,
+ uint8_t *output_ptr, unsigned int output_stride,
+ unsigned int output_width, unsigned int output_height) {
unsigned int i, j;
for (i = 0; i < output_height; ++i) {
for (j = 0; j < output_width; ++j) {
@@ -206,10 +171,8 @@
void filter_average_block2d_8_c(const uint8_t *src_ptr,
const unsigned int src_stride,
- const int16_t *HFilter,
- const int16_t *VFilter,
- uint8_t *dst_ptr,
- unsigned int dst_stride,
+ const int16_t *HFilter, const int16_t *VFilter,
+ uint8_t *dst_ptr, unsigned int dst_stride,
unsigned int output_width,
unsigned int output_height) {
uint8_t tmp[kMaxDimension * kMaxDimension];
@@ -218,20 +181,17 @@
assert(output_height <= kMaxDimension);
filter_block2d_8_c(src_ptr, src_stride, HFilter, VFilter, tmp, kMaxDimension,
output_width, output_height);
- block2d_average_c(tmp, kMaxDimension, dst_ptr, dst_stride,
- output_width, output_height);
+ block2d_average_c(tmp, kMaxDimension, dst_ptr, dst_stride, output_width,
+ output_height);
}
#if CONFIG_VP9_HIGHBITDEPTH
void highbd_filter_block2d_8_c(const uint16_t *src_ptr,
const unsigned int src_stride,
- const int16_t *HFilter,
- const int16_t *VFilter,
- uint16_t *dst_ptr,
- unsigned int dst_stride,
+ const int16_t *HFilter, const int16_t *VFilter,
+ uint16_t *dst_ptr, unsigned int dst_stride,
unsigned int output_width,
- unsigned int output_height,
- int bd) {
+ unsigned int output_height, int bd) {
// Between passes, we use an intermediate buffer whose height is extended to
// have enough horizontally filtered values as input for the vertical pass.
// This buffer is allocated to be big enough for the largest block type we
@@ -247,7 +207,7 @@
* = 23
* and filter_max_width = 16
*/
- uint16_t intermediate_buffer[(kMaxDimension+8) * kMaxDimension];
+ uint16_t intermediate_buffer[(kMaxDimension + 8) * kMaxDimension];
const int intermediate_next_stride =
1 - static_cast<int>(intermediate_height * output_width);
@@ -260,14 +220,10 @@
for (i = 0; i < intermediate_height; ++i) {
for (j = 0; j < output_width; ++j) {
// Apply filter...
- const int temp = (src_ptr[0] * HFilter[0]) +
- (src_ptr[1] * HFilter[1]) +
- (src_ptr[2] * HFilter[2]) +
- (src_ptr[3] * HFilter[3]) +
- (src_ptr[4] * HFilter[4]) +
- (src_ptr[5] * HFilter[5]) +
- (src_ptr[6] * HFilter[6]) +
- (src_ptr[7] * HFilter[7]) +
+ const int temp = (src_ptr[0] * HFilter[0]) + (src_ptr[1] * HFilter[1]) +
+ (src_ptr[2] * HFilter[2]) + (src_ptr[3] * HFilter[3]) +
+ (src_ptr[4] * HFilter[4]) + (src_ptr[5] * HFilter[5]) +
+ (src_ptr[6] * HFilter[6]) + (src_ptr[7] * HFilter[7]) +
(VP9_FILTER_WEIGHT >> 1); // Rounding
// Normalize back to 0-255...
@@ -288,14 +244,10 @@
for (i = 0; i < output_height; ++i) {
for (j = 0; j < output_width; ++j) {
// Apply filter...
- const int temp = (src_ptr[0] * VFilter[0]) +
- (src_ptr[1] * VFilter[1]) +
- (src_ptr[2] * VFilter[2]) +
- (src_ptr[3] * VFilter[3]) +
- (src_ptr[4] * VFilter[4]) +
- (src_ptr[5] * VFilter[5]) +
- (src_ptr[6] * VFilter[6]) +
- (src_ptr[7] * VFilter[7]) +
+ const int temp = (src_ptr[0] * VFilter[0]) + (src_ptr[1] * VFilter[1]) +
+ (src_ptr[2] * VFilter[2]) + (src_ptr[3] * VFilter[3]) +
+ (src_ptr[4] * VFilter[4]) + (src_ptr[5] * VFilter[5]) +
+ (src_ptr[6] * VFilter[6]) + (src_ptr[7] * VFilter[7]) +
(VP9_FILTER_WEIGHT >> 1); // Rounding
// Normalize back to 0-255...
@@ -308,10 +260,8 @@
}
}
-void highbd_block2d_average_c(uint16_t *src,
- unsigned int src_stride,
- uint16_t *output_ptr,
- unsigned int output_stride,
+void highbd_block2d_average_c(uint16_t *src, unsigned int src_stride,
+ uint16_t *output_ptr, unsigned int output_stride,
unsigned int output_width,
unsigned int output_height) {
unsigned int i, j;
@@ -323,22 +273,17 @@
}
}
-void highbd_filter_average_block2d_8_c(const uint16_t *src_ptr,
- const unsigned int src_stride,
- const int16_t *HFilter,
- const int16_t *VFilter,
- uint16_t *dst_ptr,
- unsigned int dst_stride,
- unsigned int output_width,
- unsigned int output_height,
- int bd) {
+void highbd_filter_average_block2d_8_c(
+ const uint16_t *src_ptr, const unsigned int src_stride,
+ const int16_t *HFilter, const int16_t *VFilter, uint16_t *dst_ptr,
+ unsigned int dst_stride, unsigned int output_width,
+ unsigned int output_height, int bd) {
uint16_t tmp[kMaxDimension * kMaxDimension];
assert(output_width <= kMaxDimension);
assert(output_height <= kMaxDimension);
- highbd_filter_block2d_8_c(src_ptr, src_stride, HFilter, VFilter,
- tmp, kMaxDimension,
- output_width, output_height, bd);
+ highbd_filter_block2d_8_c(src_ptr, src_stride, HFilter, VFilter, tmp,
+ kMaxDimension, output_width, output_height, bd);
highbd_block2d_average_c(tmp, kMaxDimension, dst_ptr, dst_stride,
output_width, output_height);
}
@@ -348,19 +293,20 @@
public:
static void SetUpTestCase() {
// Force input_ to be unaligned, output to be 16 byte aligned.
- input_ = reinterpret_cast<uint8_t*>(
- vpx_memalign(kDataAlignment, kInputBufferSize + 1)) + 1;
- output_ = reinterpret_cast<uint8_t*>(
+ input_ = reinterpret_cast<uint8_t *>(
+ vpx_memalign(kDataAlignment, kInputBufferSize + 1)) +
+ 1;
+ output_ = reinterpret_cast<uint8_t *>(
vpx_memalign(kDataAlignment, kOutputBufferSize));
- output_ref_ = reinterpret_cast<uint8_t*>(
+ output_ref_ = reinterpret_cast<uint8_t *>(
vpx_memalign(kDataAlignment, kOutputBufferSize));
#if CONFIG_VP9_HIGHBITDEPTH
- input16_ = reinterpret_cast<uint16_t*>(
- vpx_memalign(kDataAlignment,
- (kInputBufferSize + 1) * sizeof(uint16_t))) + 1;
- output16_ = reinterpret_cast<uint16_t*>(
+ input16_ = reinterpret_cast<uint16_t *>(vpx_memalign(
+ kDataAlignment, (kInputBufferSize + 1) * sizeof(uint16_t))) +
+ 1;
+ output16_ = reinterpret_cast<uint16_t *>(
vpx_memalign(kDataAlignment, (kOutputBufferSize) * sizeof(uint16_t)));
- output16_ref_ = reinterpret_cast<uint16_t*>(
+ output16_ref_ = reinterpret_cast<uint16_t *>(
vpx_memalign(kDataAlignment, (kOutputBufferSize) * sizeof(uint16_t)));
#endif
}
@@ -386,7 +332,7 @@
protected:
static const int kDataAlignment = 16;
- static const int kOuterBlockSize = 4*kMaxDimension;
+ static const int kOuterBlockSize = 4 * kMaxDimension;
static const int kInputStride = kOuterBlockSize;
static const int kOutputStride = kOuterBlockSize;
static const int kInputBufferSize = kOuterBlockSize * kOuterBlockSize;
@@ -456,8 +402,7 @@
void CheckGuardBlocks() {
for (int i = 0; i < kOutputBufferSize; ++i) {
- if (IsIndexInBorder(i))
- EXPECT_EQ(255, output_[i]);
+ if (IsIndexInBorder(i)) EXPECT_EQ(255, output_[i]);
}
}
@@ -515,98 +460,88 @@
void assign_val(uint8_t *list, int index, uint16_t val) const {
#if CONFIG_VP9_HIGHBITDEPTH
if (UUT_->use_highbd_ == 0) {
- list[index] = (uint8_t) val;
+ list[index] = (uint8_t)val;
} else {
CONVERT_TO_SHORTPTR(list)[index] = val;
}
#else
- list[index] = (uint8_t) val;
+ list[index] = (uint8_t)val;
#endif
}
- void wrapper_filter_average_block2d_8_c(const uint8_t *src_ptr,
- const unsigned int src_stride,
- const int16_t *HFilter,
- const int16_t *VFilter,
- uint8_t *dst_ptr,
- unsigned int dst_stride,
- unsigned int output_width,
- unsigned int output_height) {
+ void wrapper_filter_average_block2d_8_c(
+ const uint8_t *src_ptr, const unsigned int src_stride,
+ const int16_t *HFilter, const int16_t *VFilter, uint8_t *dst_ptr,
+ unsigned int dst_stride, unsigned int output_width,
+ unsigned int output_height) {
#if CONFIG_VP9_HIGHBITDEPTH
if (UUT_->use_highbd_ == 0) {
- filter_average_block2d_8_c(src_ptr, src_stride, HFilter, VFilter,
- dst_ptr, dst_stride, output_width,
- output_height);
+ filter_average_block2d_8_c(src_ptr, src_stride, HFilter, VFilter, dst_ptr,
+ dst_stride, output_width, output_height);
} else {
- highbd_filter_average_block2d_8_c(CONVERT_TO_SHORTPTR(src_ptr),
- src_stride, HFilter, VFilter,
- CONVERT_TO_SHORTPTR(dst_ptr),
- dst_stride, output_width, output_height,
- UUT_->use_highbd_);
+ highbd_filter_average_block2d_8_c(
+ CONVERT_TO_SHORTPTR(src_ptr), src_stride, HFilter, VFilter,
+ CONVERT_TO_SHORTPTR(dst_ptr), dst_stride, output_width, output_height,
+ UUT_->use_highbd_);
}
#else
- filter_average_block2d_8_c(src_ptr, src_stride, HFilter, VFilter,
- dst_ptr, dst_stride, output_width,
- output_height);
+ filter_average_block2d_8_c(src_ptr, src_stride, HFilter, VFilter, dst_ptr,
+ dst_stride, output_width, output_height);
#endif
}
void wrapper_filter_block2d_8_c(const uint8_t *src_ptr,
const unsigned int src_stride,
const int16_t *HFilter,
- const int16_t *VFilter,
- uint8_t *dst_ptr,
+ const int16_t *VFilter, uint8_t *dst_ptr,
unsigned int dst_stride,
unsigned int output_width,
unsigned int output_height) {
#if CONFIG_VP9_HIGHBITDEPTH
if (UUT_->use_highbd_ == 0) {
- filter_block2d_8_c(src_ptr, src_stride, HFilter, VFilter,
- dst_ptr, dst_stride, output_width, output_height);
+ filter_block2d_8_c(src_ptr, src_stride, HFilter, VFilter, dst_ptr,
+ dst_stride, output_width, output_height);
} else {
highbd_filter_block2d_8_c(CONVERT_TO_SHORTPTR(src_ptr), src_stride,
- HFilter, VFilter,
- CONVERT_TO_SHORTPTR(dst_ptr), dst_stride,
- output_width, output_height, UUT_->use_highbd_);
+ HFilter, VFilter, CONVERT_TO_SHORTPTR(dst_ptr),
+ dst_stride, output_width, output_height,
+ UUT_->use_highbd_);
}
#else
- filter_block2d_8_c(src_ptr, src_stride, HFilter, VFilter,
- dst_ptr, dst_stride, output_width, output_height);
+ filter_block2d_8_c(src_ptr, src_stride, HFilter, VFilter, dst_ptr,
+ dst_stride, output_width, output_height);
#endif
}
- const ConvolveFunctions* UUT_;
- static uint8_t* input_;
- static uint8_t* output_;
- static uint8_t* output_ref_;
+ const ConvolveFunctions *UUT_;
+ static uint8_t *input_;
+ static uint8_t *output_;
+ static uint8_t *output_ref_;
#if CONFIG_VP9_HIGHBITDEPTH
- static uint16_t* input16_;
- static uint16_t* output16_;
- static uint16_t* output16_ref_;
+ static uint16_t *input16_;
+ static uint16_t *output16_;
+ static uint16_t *output16_ref_;
int mask_;
#endif
};
-uint8_t* ConvolveTest::input_ = NULL;
-uint8_t* ConvolveTest::output_ = NULL;
-uint8_t* ConvolveTest::output_ref_ = NULL;
+uint8_t *ConvolveTest::input_ = NULL;
+uint8_t *ConvolveTest::output_ = NULL;
+uint8_t *ConvolveTest::output_ref_ = NULL;
#if CONFIG_VP9_HIGHBITDEPTH
-uint16_t* ConvolveTest::input16_ = NULL;
-uint16_t* ConvolveTest::output16_ = NULL;
-uint16_t* ConvolveTest::output16_ref_ = NULL;
+uint16_t *ConvolveTest::input16_ = NULL;
+uint16_t *ConvolveTest::output16_ = NULL;
+uint16_t *ConvolveTest::output16_ref_ = NULL;
#endif
-TEST_P(ConvolveTest, GuardBlocks) {
- CheckGuardBlocks();
-}
+TEST_P(ConvolveTest, GuardBlocks) { CheckGuardBlocks(); }
TEST_P(ConvolveTest, Copy) {
- uint8_t* const in = input();
- uint8_t* const out = output();
+ uint8_t *const in = input();
+ uint8_t *const out = output();
- ASM_REGISTER_STATE_CHECK(
- UUT_->copy_(in, kInputStride, out, kOutputStride, NULL, 0, NULL, 0,
- Width(), Height()));
+ ASM_REGISTER_STATE_CHECK(UUT_->copy_(in, kInputStride, out, kOutputStride,
+ NULL, 0, NULL, 0, Width(), Height()));
CheckGuardBlocks();
@@ -618,14 +553,13 @@
}
TEST_P(ConvolveTest, Avg) {
- uint8_t* const in = input();
- uint8_t* const out = output();
- uint8_t* const out_ref = output_ref();
+ uint8_t *const in = input();
+ uint8_t *const out = output();
+ uint8_t *const out_ref = output_ref();
CopyOutputToRef();
- ASM_REGISTER_STATE_CHECK(
- UUT_->avg_(in, kInputStride, out, kOutputStride, NULL, 0, NULL, 0,
- Width(), Height()));
+ ASM_REGISTER_STATE_CHECK(UUT_->avg_(in, kInputStride, out, kOutputStride,
+ NULL, 0, NULL, 0, Width(), Height()));
CheckGuardBlocks();
@@ -633,18 +567,20 @@
for (int x = 0; x < Width(); ++x)
ASSERT_EQ(lookup(out, y * kOutputStride + x),
ROUND_POWER_OF_TWO(lookup(in, y * kInputStride + x) +
- lookup(out_ref, y * kOutputStride + x), 1))
+ lookup(out_ref, y * kOutputStride + x),
+ 1))
<< "(" << x << "," << y << ")";
}
TEST_P(ConvolveTest, CopyHoriz) {
- uint8_t* const in = input();
- uint8_t* const out = output();
- DECLARE_ALIGNED(256, const int16_t, filter8[8]) = {0, 0, 0, 128, 0, 0, 0, 0};
+ uint8_t *const in = input();
+ uint8_t *const out = output();
+ DECLARE_ALIGNED(256, const int16_t,
+ filter8[8]) = { 0, 0, 0, 128, 0, 0, 0, 0 };
- ASM_REGISTER_STATE_CHECK(
- UUT_->sh8_(in, kInputStride, out, kOutputStride, filter8, 16, filter8, 16,
- Width(), Height()));
+ ASM_REGISTER_STATE_CHECK(UUT_->sh8_(in, kInputStride, out, kOutputStride,
+ filter8, 16, filter8, 16, Width(),
+ Height()));
CheckGuardBlocks();
@@ -656,13 +592,14 @@
}
TEST_P(ConvolveTest, CopyVert) {
- uint8_t* const in = input();
- uint8_t* const out = output();
- DECLARE_ALIGNED(256, const int16_t, filter8[8]) = {0, 0, 0, 128, 0, 0, 0, 0};
+ uint8_t *const in = input();
+ uint8_t *const out = output();
+ DECLARE_ALIGNED(256, const int16_t,
+ filter8[8]) = { 0, 0, 0, 128, 0, 0, 0, 0 };
- ASM_REGISTER_STATE_CHECK(
- UUT_->sv8_(in, kInputStride, out, kOutputStride, filter8, 16, filter8, 16,
- Width(), Height()));
+ ASM_REGISTER_STATE_CHECK(UUT_->sv8_(in, kInputStride, out, kOutputStride,
+ filter8, 16, filter8, 16, Width(),
+ Height()));
CheckGuardBlocks();
@@ -674,13 +611,14 @@
}
TEST_P(ConvolveTest, Copy2D) {
- uint8_t* const in = input();
- uint8_t* const out = output();
- DECLARE_ALIGNED(256, const int16_t, filter8[8]) = {0, 0, 0, 128, 0, 0, 0, 0};
+ uint8_t *const in = input();
+ uint8_t *const out = output();
+ DECLARE_ALIGNED(256, const int16_t,
+ filter8[8]) = { 0, 0, 0, 128, 0, 0, 0, 0 };
- ASM_REGISTER_STATE_CHECK(
- UUT_->shv8_(in, kInputStride, out, kOutputStride, filter8, 16, filter8,
- 16, Width(), Height()));
+ ASM_REGISTER_STATE_CHECK(UUT_->shv8_(in, kInputStride, out, kOutputStride,
+ filter8, 16, filter8, 16, Width(),
+ Height()));
CheckGuardBlocks();
@@ -718,12 +656,12 @@
const int16_t kInvalidFilter[8] = { 0 };
TEST_P(ConvolveTest, MatchesReferenceSubpixelFilter) {
- uint8_t* const in = input();
- uint8_t* const out = output();
+ uint8_t *const in = input();
+ uint8_t *const out = output();
#if CONFIG_VP9_HIGHBITDEPTH
uint8_t ref8[kOutputStride * kMaxDimension];
uint16_t ref16[kOutputStride * kMaxDimension];
- uint8_t* ref;
+ uint8_t *ref;
if (UUT_->use_highbd_ == 0) {
ref = ref8;
} else {
@@ -739,31 +677,26 @@
for (int filter_x = 0; filter_x < kNumFilters; ++filter_x) {
for (int filter_y = 0; filter_y < kNumFilters; ++filter_y) {
- wrapper_filter_block2d_8_c(in, kInputStride,
- filters[filter_x], filters[filter_y],
- ref, kOutputStride,
+ wrapper_filter_block2d_8_c(in, kInputStride, filters[filter_x],
+ filters[filter_y], ref, kOutputStride,
Width(), Height());
if (filter_x && filter_y)
- ASM_REGISTER_STATE_CHECK(
- UUT_->hv8_(in, kInputStride, out, kOutputStride,
- filters[filter_x], 16, filters[filter_y], 16,
- Width(), Height()));
+ ASM_REGISTER_STATE_CHECK(UUT_->hv8_(
+ in, kInputStride, out, kOutputStride, filters[filter_x], 16,
+ filters[filter_y], 16, Width(), Height()));
else if (filter_y)
ASM_REGISTER_STATE_CHECK(
- UUT_->v8_(in, kInputStride, out, kOutputStride,
- kInvalidFilter, 16, filters[filter_y], 16,
- Width(), Height()));
+ UUT_->v8_(in, kInputStride, out, kOutputStride, kInvalidFilter,
+ 16, filters[filter_y], 16, Width(), Height()));
else if (filter_x)
ASM_REGISTER_STATE_CHECK(
- UUT_->h8_(in, kInputStride, out, kOutputStride,
- filters[filter_x], 16, kInvalidFilter, 16,
- Width(), Height()));
+ UUT_->h8_(in, kInputStride, out, kOutputStride, filters[filter_x],
+ 16, kInvalidFilter, 16, Width(), Height()));
else
ASM_REGISTER_STATE_CHECK(
- UUT_->copy_(in, kInputStride, out, kOutputStride,
- kInvalidFilter, 0, kInvalidFilter, 0,
- Width(), Height()));
+ UUT_->copy_(in, kInputStride, out, kOutputStride, kInvalidFilter,
+ 0, kInvalidFilter, 0, Width(), Height()));
CheckGuardBlocks();
@@ -772,20 +705,20 @@
ASSERT_EQ(lookup(ref, y * kOutputStride + x),
lookup(out, y * kOutputStride + x))
<< "mismatch at (" << x << "," << y << "), "
- << "filters (" << filter_bank << ","
- << filter_x << "," << filter_y << ")";
+ << "filters (" << filter_bank << "," << filter_x << ","
+ << filter_y << ")";
}
}
}
}
TEST_P(ConvolveTest, MatchesReferenceAveragingSubpixelFilter) {
- uint8_t* const in = input();
- uint8_t* const out = output();
+ uint8_t *const in = input();
+ uint8_t *const out = output();
#if CONFIG_VP9_HIGHBITDEPTH
uint8_t ref8[kOutputStride * kMaxDimension];
uint16_t ref16[kOutputStride * kMaxDimension];
- uint8_t* ref;
+ uint8_t *ref;
if (UUT_->use_highbd_ == 0) {
ref = ref8;
} else {
@@ -821,31 +754,26 @@
for (int filter_x = 0; filter_x < kNumFilters; ++filter_x) {
for (int filter_y = 0; filter_y < kNumFilters; ++filter_y) {
- wrapper_filter_average_block2d_8_c(in, kInputStride,
- filters[filter_x], filters[filter_y],
- ref, kOutputStride,
- Width(), Height());
+ wrapper_filter_average_block2d_8_c(in, kInputStride, filters[filter_x],
+ filters[filter_y], ref,
+ kOutputStride, Width(), Height());
if (filter_x && filter_y)
- ASM_REGISTER_STATE_CHECK(
- UUT_->hv8_avg_(in, kInputStride, out, kOutputStride,
- filters[filter_x], 16, filters[filter_y], 16,
- Width(), Height()));
+ ASM_REGISTER_STATE_CHECK(UUT_->hv8_avg_(
+ in, kInputStride, out, kOutputStride, filters[filter_x], 16,
+ filters[filter_y], 16, Width(), Height()));
else if (filter_y)
- ASM_REGISTER_STATE_CHECK(
- UUT_->v8_avg_(in, kInputStride, out, kOutputStride,
- kInvalidFilter, 16, filters[filter_y], 16,
- Width(), Height()));
+ ASM_REGISTER_STATE_CHECK(UUT_->v8_avg_(
+ in, kInputStride, out, kOutputStride, kInvalidFilter, 16,
+ filters[filter_y], 16, Width(), Height()));
else if (filter_x)
- ASM_REGISTER_STATE_CHECK(
- UUT_->h8_avg_(in, kInputStride, out, kOutputStride,
- filters[filter_x], 16, kInvalidFilter, 16,
- Width(), Height()));
+ ASM_REGISTER_STATE_CHECK(UUT_->h8_avg_(
+ in, kInputStride, out, kOutputStride, filters[filter_x], 16,
+ kInvalidFilter, 16, Width(), Height()));
else
ASM_REGISTER_STATE_CHECK(
- UUT_->avg_(in, kInputStride, out, kOutputStride,
- kInvalidFilter, 0, kInvalidFilter, 0,
- Width(), Height()));
+ UUT_->avg_(in, kInputStride, out, kOutputStride, kInvalidFilter,
+ 0, kInvalidFilter, 0, Width(), Height()));
CheckGuardBlocks();
@@ -854,8 +782,8 @@
ASSERT_EQ(lookup(ref, y * kOutputStride + x),
lookup(out, y * kOutputStride + x))
<< "mismatch at (" << x << "," << y << "), "
- << "filters (" << filter_bank << ","
- << filter_x << "," << filter_y << ")";
+ << "filters (" << filter_bank << "," << filter_x << ","
+ << filter_y << ")";
}
}
}
@@ -902,16 +830,16 @@
for (int y = 0; y < 8; ++y) {
for (int x = 0; x < 8; ++x) {
#if CONFIG_VP9_HIGHBITDEPTH
- assign_val(in, y * kOutputStride + x - SUBPEL_TAPS / 2 + 1,
- ((seed_val >> (axis ? y : x)) & 1) * mask_);
+ assign_val(in, y * kOutputStride + x - SUBPEL_TAPS / 2 + 1,
+ ((seed_val >> (axis ? y : x)) & 1) * mask_);
#else
- assign_val(in, y * kOutputStride + x - SUBPEL_TAPS / 2 + 1,
- ((seed_val >> (axis ? y : x)) & 1) * 255);
+ assign_val(in, y * kOutputStride + x - SUBPEL_TAPS / 2 + 1,
+ ((seed_val >> (axis ? y : x)) & 1) * 255);
#endif
if (axis) seed_val++;
}
if (axis)
- seed_val-= 8;
+ seed_val -= 8;
else
seed_val++;
}
@@ -922,38 +850,33 @@
vp9_filter_kernels[static_cast<INTERP_FILTER>(filter_bank)];
for (int filter_x = 0; filter_x < kNumFilters; ++filter_x) {
for (int filter_y = 0; filter_y < kNumFilters; ++filter_y) {
- wrapper_filter_block2d_8_c(in, kInputStride,
- filters[filter_x], filters[filter_y],
- ref, kOutputStride,
+ wrapper_filter_block2d_8_c(in, kInputStride, filters[filter_x],
+ filters[filter_y], ref, kOutputStride,
Width(), Height());
if (filter_x && filter_y)
- ASM_REGISTER_STATE_CHECK(
- UUT_->hv8_(in, kInputStride, out, kOutputStride,
- filters[filter_x], 16, filters[filter_y], 16,
- Width(), Height()));
+ ASM_REGISTER_STATE_CHECK(UUT_->hv8_(
+ in, kInputStride, out, kOutputStride, filters[filter_x], 16,
+ filters[filter_y], 16, Width(), Height()));
else if (filter_y)
- ASM_REGISTER_STATE_CHECK(
- UUT_->v8_(in, kInputStride, out, kOutputStride,
- kInvalidFilter, 16, filters[filter_y], 16,
- Width(), Height()));
+ ASM_REGISTER_STATE_CHECK(UUT_->v8_(
+ in, kInputStride, out, kOutputStride, kInvalidFilter, 16,
+ filters[filter_y], 16, Width(), Height()));
else if (filter_x)
- ASM_REGISTER_STATE_CHECK(
- UUT_->h8_(in, kInputStride, out, kOutputStride,
- filters[filter_x], 16, kInvalidFilter, 16,
- Width(), Height()));
+ ASM_REGISTER_STATE_CHECK(UUT_->h8_(
+ in, kInputStride, out, kOutputStride, filters[filter_x], 16,
+ kInvalidFilter, 16, Width(), Height()));
else
- ASM_REGISTER_STATE_CHECK(
- UUT_->copy_(in, kInputStride, out, kOutputStride,
- kInvalidFilter, 0, kInvalidFilter, 0,
- Width(), Height()));
+ ASM_REGISTER_STATE_CHECK(UUT_->copy_(
+ in, kInputStride, out, kOutputStride, kInvalidFilter, 0,
+ kInvalidFilter, 0, Width(), Height()));
for (int y = 0; y < Height(); ++y)
for (int x = 0; x < Width(); ++x)
ASSERT_EQ(lookup(ref, y * kOutputStride + x),
lookup(out, y * kOutputStride + x))
<< "mismatch at (" << x << "," << y << "), "
- << "filters (" << filter_bank << ","
- << filter_x << "," << filter_y << ")";
+ << "filters (" << filter_bank << "," << filter_x << ","
+ << filter_y << ")";
}
}
}
@@ -964,8 +887,8 @@
/* This test exercises that enough rows and columns are filtered with every
possible initial fractional positions and scaling steps. */
TEST_P(ConvolveTest, CheckScalingFiltering) {
- uint8_t* const in = input();
- uint8_t* const out = output();
+ uint8_t *const in = input();
+ uint8_t *const out = output();
const InterpKernel *const eighttap = vp9_filter_kernels[EIGHTTAP];
SetConstantInput(127);
@@ -974,9 +897,8 @@
for (int step = 1; step <= 32; ++step) {
/* Test the horizontal and vertical filters in combination. */
ASM_REGISTER_STATE_CHECK(UUT_->shv8_(in, kInputStride, out, kOutputStride,
- eighttap[frac], step,
- eighttap[frac], step,
- Width(), Height()));
+ eighttap[frac], step, eighttap[frac],
+ step, Width(), Height()));
CheckGuardBlocks();
@@ -984,8 +906,8 @@
for (int x = 0; x < Width(); ++x) {
ASSERT_EQ(lookup(in, y * kInputStride + x),
lookup(out, y * kOutputStride + x))
- << "x == " << x << ", y == " << y
- << ", frac == " << frac << ", step == " << step;
+ << "x == " << x << ", y == " << y << ", frac == " << frac
+ << ", step == " << step;
}
}
}
@@ -995,18 +917,14 @@
using std::tr1::make_tuple;
#if CONFIG_VP9_HIGHBITDEPTH
-#define WRAP(func, bd) \
-void wrap_ ## func ## _ ## bd(const uint8_t *src, ptrdiff_t src_stride, \
- uint8_t *dst, ptrdiff_t dst_stride, \
- const int16_t *filter_x, \
- int filter_x_stride, \
- const int16_t *filter_y, \
- int filter_y_stride, \
- int w, int h) { \
- vpx_highbd_ ## func(src, src_stride, dst, dst_stride, filter_x, \
- filter_x_stride, filter_y, filter_y_stride, \
- w, h, bd); \
-}
+#define WRAP(func, bd) \
+ void wrap_##func##_##bd( \
+ const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, \
+ ptrdiff_t dst_stride, const int16_t *filter_x, int filter_x_stride, \
+ const int16_t *filter_y, int filter_y_stride, int w, int h) { \
+ vpx_highbd_##func(src, src_stride, dst, dst_stride, filter_x, \
+ filter_x_stride, filter_y, filter_y_stride, w, h, bd); \
+ }
#if HAVE_SSE2 && ARCH_X86_64
WRAP(convolve_copy_sse2, 8)
WRAP(convolve_avg_sse2, 8)
@@ -1061,48 +979,40 @@
#undef WRAP
const ConvolveFunctions convolve8_c(
- wrap_convolve_copy_c_8, wrap_convolve_avg_c_8,
+ wrap_convolve_copy_c_8, wrap_convolve_avg_c_8, wrap_convolve8_horiz_c_8,
+ wrap_convolve8_avg_horiz_c_8, wrap_convolve8_vert_c_8,
+ wrap_convolve8_avg_vert_c_8, wrap_convolve8_c_8, wrap_convolve8_avg_c_8,
wrap_convolve8_horiz_c_8, wrap_convolve8_avg_horiz_c_8,
- wrap_convolve8_vert_c_8, wrap_convolve8_avg_vert_c_8,
- wrap_convolve8_c_8, wrap_convolve8_avg_c_8,
- wrap_convolve8_horiz_c_8, wrap_convolve8_avg_horiz_c_8,
- wrap_convolve8_vert_c_8, wrap_convolve8_avg_vert_c_8,
- wrap_convolve8_c_8, wrap_convolve8_avg_c_8, 8);
+ wrap_convolve8_vert_c_8, wrap_convolve8_avg_vert_c_8, wrap_convolve8_c_8,
+ wrap_convolve8_avg_c_8, 8);
const ConvolveFunctions convolve10_c(
- wrap_convolve_copy_c_10, wrap_convolve_avg_c_10,
+ wrap_convolve_copy_c_10, wrap_convolve_avg_c_10, wrap_convolve8_horiz_c_10,
+ wrap_convolve8_avg_horiz_c_10, wrap_convolve8_vert_c_10,
+ wrap_convolve8_avg_vert_c_10, wrap_convolve8_c_10, wrap_convolve8_avg_c_10,
wrap_convolve8_horiz_c_10, wrap_convolve8_avg_horiz_c_10,
- wrap_convolve8_vert_c_10, wrap_convolve8_avg_vert_c_10,
- wrap_convolve8_c_10, wrap_convolve8_avg_c_10,
- wrap_convolve8_horiz_c_10, wrap_convolve8_avg_horiz_c_10,
- wrap_convolve8_vert_c_10, wrap_convolve8_avg_vert_c_10,
- wrap_convolve8_c_10, wrap_convolve8_avg_c_10, 10);
+ wrap_convolve8_vert_c_10, wrap_convolve8_avg_vert_c_10, wrap_convolve8_c_10,
+ wrap_convolve8_avg_c_10, 10);
const ConvolveFunctions convolve12_c(
- wrap_convolve_copy_c_12, wrap_convolve_avg_c_12,
+ wrap_convolve_copy_c_12, wrap_convolve_avg_c_12, wrap_convolve8_horiz_c_12,
+ wrap_convolve8_avg_horiz_c_12, wrap_convolve8_vert_c_12,
+ wrap_convolve8_avg_vert_c_12, wrap_convolve8_c_12, wrap_convolve8_avg_c_12,
wrap_convolve8_horiz_c_12, wrap_convolve8_avg_horiz_c_12,
- wrap_convolve8_vert_c_12, wrap_convolve8_avg_vert_c_12,
- wrap_convolve8_c_12, wrap_convolve8_avg_c_12,
- wrap_convolve8_horiz_c_12, wrap_convolve8_avg_horiz_c_12,
- wrap_convolve8_vert_c_12, wrap_convolve8_avg_vert_c_12,
- wrap_convolve8_c_12, wrap_convolve8_avg_c_12, 12);
+ wrap_convolve8_vert_c_12, wrap_convolve8_avg_vert_c_12, wrap_convolve8_c_12,
+ wrap_convolve8_avg_c_12, 12);
const ConvolveParam kArrayConvolve_c[] = {
- ALL_SIZES(convolve8_c),
- ALL_SIZES(convolve10_c),
- ALL_SIZES(convolve12_c)
+ ALL_SIZES(convolve8_c), ALL_SIZES(convolve10_c), ALL_SIZES(convolve12_c)
};
#else
const ConvolveFunctions convolve8_c(
- vpx_convolve_copy_c, vpx_convolve_avg_c,
- vpx_convolve8_horiz_c, vpx_convolve8_avg_horiz_c,
- vpx_convolve8_vert_c, vpx_convolve8_avg_vert_c,
- vpx_convolve8_c, vpx_convolve8_avg_c,
- vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
- vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
+ vpx_convolve_copy_c, vpx_convolve_avg_c, vpx_convolve8_horiz_c,
+ vpx_convolve8_avg_horiz_c, vpx_convolve8_vert_c, vpx_convolve8_avg_vert_c,
+ vpx_convolve8_c, vpx_convolve8_avg_c, vpx_scaled_horiz_c,
+ vpx_scaled_avg_horiz_c, vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
const ConvolveParam kArrayConvolve_c[] = { ALL_SIZES(convolve8_c) };
#endif
-INSTANTIATE_TEST_CASE_P(C, ConvolveTest,
- ::testing::ValuesIn(kArrayConvolve_c));
+INSTANTIATE_TEST_CASE_P(C, ConvolveTest, ::testing::ValuesIn(kArrayConvolve_c));
#if HAVE_SSE2 && ARCH_X86_64
#if CONFIG_VP9_HIGHBITDEPTH
@@ -1130,20 +1040,16 @@
wrap_convolve8_horiz_sse2_12, wrap_convolve8_avg_horiz_sse2_12,
wrap_convolve8_vert_sse2_12, wrap_convolve8_avg_vert_sse2_12,
wrap_convolve8_sse2_12, wrap_convolve8_avg_sse2_12, 12);
-const ConvolveParam kArrayConvolve_sse2[] = {
- ALL_SIZES(convolve8_sse2),
- ALL_SIZES(convolve10_sse2),
- ALL_SIZES(convolve12_sse2)
-};
+const ConvolveParam kArrayConvolve_sse2[] = { ALL_SIZES(convolve8_sse2),
+ ALL_SIZES(convolve10_sse2),
+ ALL_SIZES(convolve12_sse2) };
#else
const ConvolveFunctions convolve8_sse2(
- vpx_convolve_copy_sse2, vpx_convolve_avg_sse2,
- vpx_convolve8_horiz_sse2, vpx_convolve8_avg_horiz_sse2,
- vpx_convolve8_vert_sse2, vpx_convolve8_avg_vert_sse2,
- vpx_convolve8_sse2, vpx_convolve8_avg_sse2,
- vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
- vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
- vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
+ vpx_convolve_copy_sse2, vpx_convolve_avg_sse2, vpx_convolve8_horiz_sse2,
+ vpx_convolve8_avg_horiz_sse2, vpx_convolve8_vert_sse2,
+ vpx_convolve8_avg_vert_sse2, vpx_convolve8_sse2, vpx_convolve8_avg_sse2,
+ vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c, vpx_scaled_vert_c,
+ vpx_scaled_avg_vert_c, vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
const ConvolveParam kArrayConvolve_sse2[] = { ALL_SIZES(convolve8_sse2) };
#endif // CONFIG_VP9_HIGHBITDEPTH
@@ -1153,13 +1059,11 @@
#if HAVE_SSSE3
const ConvolveFunctions convolve8_ssse3(
- vpx_convolve_copy_c, vpx_convolve_avg_c,
- vpx_convolve8_horiz_ssse3, vpx_convolve8_avg_horiz_ssse3,
- vpx_convolve8_vert_ssse3, vpx_convolve8_avg_vert_ssse3,
- vpx_convolve8_ssse3, vpx_convolve8_avg_ssse3,
- vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
- vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
- vpx_scaled_2d_ssse3, vpx_scaled_avg_2d_c, 0);
+ vpx_convolve_copy_c, vpx_convolve_avg_c, vpx_convolve8_horiz_ssse3,
+ vpx_convolve8_avg_horiz_ssse3, vpx_convolve8_vert_ssse3,
+ vpx_convolve8_avg_vert_ssse3, vpx_convolve8_ssse3, vpx_convolve8_avg_ssse3,
+ vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c, vpx_scaled_vert_c,
+ vpx_scaled_avg_vert_c, vpx_scaled_2d_ssse3, vpx_scaled_avg_2d_c, 0);
const ConvolveParam kArrayConvolve8_ssse3[] = { ALL_SIZES(convolve8_ssse3) };
INSTANTIATE_TEST_CASE_P(SSSE3, ConvolveTest,
@@ -1168,13 +1072,11 @@
#if HAVE_AVX2 && HAVE_SSSE3
const ConvolveFunctions convolve8_avx2(
- vpx_convolve_copy_c, vpx_convolve_avg_c,
- vpx_convolve8_horiz_avx2, vpx_convolve8_avg_horiz_ssse3,
- vpx_convolve8_vert_avx2, vpx_convolve8_avg_vert_ssse3,
- vpx_convolve8_avx2, vpx_convolve8_avg_ssse3,
- vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
- vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
- vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
+ vpx_convolve_copy_c, vpx_convolve_avg_c, vpx_convolve8_horiz_avx2,
+ vpx_convolve8_avg_horiz_ssse3, vpx_convolve8_vert_avx2,
+ vpx_convolve8_avg_vert_ssse3, vpx_convolve8_avx2, vpx_convolve8_avg_ssse3,
+ vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c, vpx_scaled_vert_c,
+ vpx_scaled_avg_vert_c, vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
const ConvolveParam kArrayConvolve8_avx2[] = { ALL_SIZES(convolve8_avx2) };
INSTANTIATE_TEST_CASE_P(AVX2, ConvolveTest,
@@ -1185,22 +1087,18 @@
#if HAVE_NEON && !(CONFIG_VP10 && CONFIG_EXT_PARTITION)
#if HAVE_NEON_ASM
const ConvolveFunctions convolve8_neon(
- vpx_convolve_copy_neon, vpx_convolve_avg_neon,
- vpx_convolve8_horiz_neon, vpx_convolve8_avg_horiz_neon,
- vpx_convolve8_vert_neon, vpx_convolve8_avg_vert_neon,
- vpx_convolve8_neon, vpx_convolve8_avg_neon,
- vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
- vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
- vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
-#else // HAVE_NEON
+ vpx_convolve_copy_neon, vpx_convolve_avg_neon, vpx_convolve8_horiz_neon,
+ vpx_convolve8_avg_horiz_neon, vpx_convolve8_vert_neon,
+ vpx_convolve8_avg_vert_neon, vpx_convolve8_neon, vpx_convolve8_avg_neon,
+ vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c, vpx_scaled_vert_c,
+ vpx_scaled_avg_vert_c, vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
+#else // HAVE_NEON
const ConvolveFunctions convolve8_neon(
- vpx_convolve_copy_neon, vpx_convolve_avg_neon,
- vpx_convolve8_horiz_neon, vpx_convolve8_avg_horiz_neon,
- vpx_convolve8_vert_neon, vpx_convolve8_avg_vert_neon,
- vpx_convolve8_neon, vpx_convolve8_avg_neon,
- vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
- vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
- vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
+ vpx_convolve_copy_neon, vpx_convolve_avg_neon, vpx_convolve8_horiz_neon,
+ vpx_convolve8_avg_horiz_neon, vpx_convolve8_vert_neon,
+ vpx_convolve8_avg_vert_neon, vpx_convolve8_neon, vpx_convolve8_avg_neon,
+ vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c, vpx_scaled_vert_c,
+ vpx_scaled_avg_vert_c, vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
#endif // HAVE_NEON_ASM
const ConvolveParam kArrayConvolve8_neon[] = { ALL_SIZES(convolve8_neon) };
@@ -1211,13 +1109,11 @@
// TODO(any): Make DSPR2 versions support 128x128 128x64 64x128 block sizes
#if HAVE_DSPR2 && !(CONFIG_VP10 && CONFIG_EXT_PARTITION)
const ConvolveFunctions convolve8_dspr2(
- vpx_convolve_copy_dspr2, vpx_convolve_avg_dspr2,
- vpx_convolve8_horiz_dspr2, vpx_convolve8_avg_horiz_dspr2,
- vpx_convolve8_vert_dspr2, vpx_convolve8_avg_vert_dspr2,
- vpx_convolve8_dspr2, vpx_convolve8_avg_dspr2,
- vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
- vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
- vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
+ vpx_convolve_copy_dspr2, vpx_convolve_avg_dspr2, vpx_convolve8_horiz_dspr2,
+ vpx_convolve8_avg_horiz_dspr2, vpx_convolve8_vert_dspr2,
+ vpx_convolve8_avg_vert_dspr2, vpx_convolve8_dspr2, vpx_convolve8_avg_dspr2,
+ vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c, vpx_scaled_vert_c,
+ vpx_scaled_avg_vert_c, vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
const ConvolveParam kArrayConvolve8_dspr2[] = { ALL_SIZES(convolve8_dspr2) };
INSTANTIATE_TEST_CASE_P(DSPR2, ConvolveTest,
@@ -1227,13 +1123,11 @@
// TODO(any): Make MSA versions support 128x128 128x64 64x128 block sizes
#if HAVE_MSA && !(CONFIG_VP10 && CONFIG_EXT_PARTITION)
const ConvolveFunctions convolve8_msa(
- vpx_convolve_copy_msa, vpx_convolve_avg_msa,
- vpx_convolve8_horiz_msa, vpx_convolve8_avg_horiz_msa,
- vpx_convolve8_vert_msa, vpx_convolve8_avg_vert_msa,
- vpx_convolve8_msa, vpx_convolve8_avg_msa,
- vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c,
- vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
- vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
+ vpx_convolve_copy_msa, vpx_convolve_avg_msa, vpx_convolve8_horiz_msa,
+ vpx_convolve8_avg_horiz_msa, vpx_convolve8_vert_msa,
+ vpx_convolve8_avg_vert_msa, vpx_convolve8_msa, vpx_convolve8_avg_msa,
+ vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c, vpx_scaled_vert_c,
+ vpx_scaled_avg_vert_c, vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
const ConvolveParam kArrayConvolve8_msa[] = { ALL_SIZES(convolve8_msa) };
INSTANTIATE_TEST_CASE_P(MSA, ConvolveTest,
diff --git a/test/cpu_speed_test.cc b/test/cpu_speed_test.cc
index 3a9593b..2ed9113 100644
--- a/test/cpu_speed_test.cc
+++ b/test/cpu_speed_test.cc
@@ -23,10 +23,8 @@
public ::libvpx_test::CodecTestWith2Params<libvpx_test::TestMode, int> {
protected:
CpuSpeedTest()
- : EncoderTest(GET_PARAM(0)),
- encoding_mode_(GET_PARAM(1)),
- set_cpu_used_(GET_PARAM(2)),
- min_psnr_(kMaxPSNR),
+ : EncoderTest(GET_PARAM(0)), encoding_mode_(GET_PARAM(1)),
+ set_cpu_used_(GET_PARAM(2)), min_psnr_(kMaxPSNR),
tune_content_(VPX_CONTENT_DEFAULT) {}
virtual ~CpuSpeedTest() {}
@@ -42,9 +40,7 @@
}
}
- virtual void BeginPassHook(unsigned int /*pass*/) {
- min_psnr_ = kMaxPSNR;
- }
+ virtual void BeginPassHook(unsigned int /*pass*/) { min_psnr_ = kMaxPSNR; }
virtual void PreEncodeFrameHook(::libvpx_test::VideoSource *video,
::libvpx_test::Encoder *encoder) {
@@ -61,8 +57,7 @@
}
virtual void PSNRPktHook(const vpx_codec_cx_pkt_t *pkt) {
- if (pkt->data.psnr.psnr[0] < min_psnr_)
- min_psnr_ = pkt->data.psnr.psnr[0];
+ if (pkt->data.psnr.psnr[0] < min_psnr_) min_psnr_ = pkt->data.psnr.psnr[0];
}
void TestQ0();
@@ -173,12 +168,12 @@
TEST_P(CpuSpeedTestLarge, TestEncodeHighBitrate) { TestEncodeHighBitrate(); }
TEST_P(CpuSpeedTestLarge, TestLowBitrate) { TestLowBitrate(); }
-VP10_INSTANTIATE_TEST_CASE(
- CpuSpeedTest,
- ::testing::Values(::libvpx_test::kTwoPassGood, ::libvpx_test::kOnePassGood),
- ::testing::Range(1, 3));
-VP10_INSTANTIATE_TEST_CASE(
- CpuSpeedTestLarge,
- ::testing::Values(::libvpx_test::kTwoPassGood, ::libvpx_test::kOnePassGood),
- ::testing::Range(0, 1));
+VP10_INSTANTIATE_TEST_CASE(CpuSpeedTest,
+ ::testing::Values(::libvpx_test::kTwoPassGood,
+ ::libvpx_test::kOnePassGood),
+ ::testing::Range(1, 3));
+VP10_INSTANTIATE_TEST_CASE(CpuSpeedTestLarge,
+ ::testing::Values(::libvpx_test::kTwoPassGood,
+ ::libvpx_test::kOnePassGood),
+ ::testing::Range(0, 1));
} // namespace
diff --git a/test/datarate_test.cc b/test/datarate_test.cc
index 89761e7..300b11e 100644
--- a/test/datarate_test.cc
+++ b/test/datarate_test.cc
@@ -18,8 +18,9 @@
namespace {
-class DatarateTestLarge : public ::libvpx_test::EncoderTest,
- public ::libvpx_test::CodecTestWith2Params<libvpx_test::TestMode, int> {
+class DatarateTestLarge
+ : public ::libvpx_test::EncoderTest,
+ public ::libvpx_test::CodecTestWith2Params<libvpx_test::TestMode, int> {
public:
DatarateTestLarge() : EncoderTest(GET_PARAM(0)) {}
@@ -49,8 +50,7 @@
virtual void PreEncodeFrameHook(::libvpx_test::VideoSource *video,
::libvpx_test::Encoder *encoder) {
- if (video->frame() == 0)
- encoder->Control(VP8E_SET_CPUUSED, set_cpu_used_);
+ if (video->frame() == 0) encoder->Control(VP8E_SET_CPUUSED, set_cpu_used_);
if (denoiser_offon_test_) {
ASSERT_GT(denoiser_offon_period_, 0)
@@ -74,8 +74,7 @@
if (duration > 1) {
// If first drop not set and we have a drop set it to this time.
- if (!first_drop_)
- first_drop_ = last_pts_ + 1;
+ if (!first_drop_) first_drop_ = last_pts_ + 1;
// Update the number of frame drops.
num_drops_ += static_cast<int>(duration - 1);
// Update counter for total number of frames (#frames input to encoder).
@@ -89,7 +88,7 @@
// Buffer should not go negative.
ASSERT_GE(bits_in_buffer_model_, 0) << "Buffer Underrun at frame "
- << pkt->data.frame.pts;
+ << pkt->data.frame.pts;
const size_t frame_size_in_bits = pkt->data.frame.sz * 8;
@@ -194,7 +193,7 @@
ASSERT_LE(static_cast<double>(cfg_.rc_target_bitrate),
effective_datarate_ * 1.15)
<< " The datarate for the file missed the target!"
- << cfg_.rc_target_bitrate << " "<< effective_datarate_;
+ << cfg_.rc_target_bitrate << " " << effective_datarate_;
}
}
diff --git a/test/dct16x16_test.cc b/test/dct16x16_test.cc
index 7d1146b..d0e2b4b 100644
--- a/test/dct16x16_test.cc
+++ b/test/dct16x16_test.cc
@@ -54,16 +54,16 @@
double temp1, temp2;
// step 1
- step[ 0] = input[0] + input[15];
- step[ 1] = input[1] + input[14];
- step[ 2] = input[2] + input[13];
- step[ 3] = input[3] + input[12];
- step[ 4] = input[4] + input[11];
- step[ 5] = input[5] + input[10];
- step[ 6] = input[6] + input[ 9];
- step[ 7] = input[7] + input[ 8];
- step[ 8] = input[7] - input[ 8];
- step[ 9] = input[6] - input[ 9];
+ step[0] = input[0] + input[15];
+ step[1] = input[1] + input[14];
+ step[2] = input[2] + input[13];
+ step[3] = input[3] + input[12];
+ step[4] = input[4] + input[11];
+ step[5] = input[5] + input[10];
+ step[6] = input[6] + input[9];
+ step[7] = input[7] + input[8];
+ step[8] = input[7] - input[8];
+ step[9] = input[6] - input[9];
step[10] = input[5] - input[10];
step[11] = input[4] - input[11];
step[12] = input[3] - input[12];
@@ -81,13 +81,13 @@
output[6] = step[1] - step[6];
output[7] = step[0] - step[7];
- temp1 = step[ 8] * C7;
+ temp1 = step[8] * C7;
temp2 = step[15] * C9;
- output[ 8] = temp1 + temp2;
+ output[8] = temp1 + temp2;
- temp1 = step[ 9] * C11;
+ temp1 = step[9] * C11;
temp2 = step[14] * C5;
- output[ 9] = temp1 - temp2;
+ output[9] = temp1 - temp2;
temp1 = step[10] * C3;
temp2 = step[13] * C13;
@@ -105,40 +105,40 @@
temp2 = step[13] * C3;
output[13] = temp2 - temp1;
- temp1 = step[ 9] * C5;
+ temp1 = step[9] * C5;
temp2 = step[14] * C11;
output[14] = temp2 + temp1;
- temp1 = step[ 8] * C9;
+ temp1 = step[8] * C9;
temp2 = step[15] * C7;
output[15] = temp2 - temp1;
// step 3
- step[ 0] = output[0] + output[3];
- step[ 1] = output[1] + output[2];
- step[ 2] = output[1] - output[2];
- step[ 3] = output[0] - output[3];
+ step[0] = output[0] + output[3];
+ step[1] = output[1] + output[2];
+ step[2] = output[1] - output[2];
+ step[3] = output[0] - output[3];
temp1 = output[4] * C14;
temp2 = output[7] * C2;
- step[ 4] = temp1 + temp2;
+ step[4] = temp1 + temp2;
temp1 = output[5] * C10;
temp2 = output[6] * C6;
- step[ 5] = temp1 + temp2;
+ step[5] = temp1 + temp2;
temp1 = output[5] * C6;
temp2 = output[6] * C10;
- step[ 6] = temp2 - temp1;
+ step[6] = temp2 - temp1;
temp1 = output[4] * C2;
temp2 = output[7] * C14;
- step[ 7] = temp2 - temp1;
+ step[7] = temp2 - temp1;
- step[ 8] = output[ 8] + output[11];
- step[ 9] = output[ 9] + output[10];
- step[10] = output[ 9] - output[10];
- step[11] = output[ 8] - output[11];
+ step[8] = output[8] + output[11];
+ step[9] = output[9] + output[10];
+ step[10] = output[9] - output[10];
+ step[11] = output[8] - output[11];
step[12] = output[12] + output[15];
step[13] = output[13] + output[14];
@@ -146,25 +146,25 @@
step[15] = output[12] - output[15];
// step 4
- output[ 0] = (step[ 0] + step[ 1]);
- output[ 8] = (step[ 0] - step[ 1]);
+ output[0] = (step[0] + step[1]);
+ output[8] = (step[0] - step[1]);
temp1 = step[2] * C12;
temp2 = step[3] * C4;
temp1 = temp1 + temp2;
- output[ 4] = 2*(temp1 * C8);
+ output[4] = 2 * (temp1 * C8);
temp1 = step[2] * C4;
temp2 = step[3] * C12;
temp1 = temp2 - temp1;
output[12] = 2 * (temp1 * C8);
- output[ 2] = 2 * ((step[4] + step[ 5]) * C8);
- output[14] = 2 * ((step[7] - step[ 6]) * C8);
+ output[2] = 2 * ((step[4] + step[5]) * C8);
+ output[14] = 2 * ((step[7] - step[6]) * C8);
temp1 = step[4] - step[5];
temp2 = step[6] + step[7];
- output[ 6] = (temp1 + temp2);
+ output[6] = (temp1 + temp2);
output[10] = (temp1 - temp2);
intermediate[8] = step[8] + step[14];
@@ -180,18 +180,18 @@
temp1 = temp2 + temp1;
output[13] = 2 * (temp1 * C8);
- output[ 9] = 2 * ((step[10] + step[11]) * C8);
+ output[9] = 2 * ((step[10] + step[11]) * C8);
intermediate[11] = step[10] - step[11];
intermediate[12] = step[12] + step[13];
intermediate[13] = step[12] - step[13];
- intermediate[14] = step[ 8] - step[14];
- intermediate[15] = step[ 9] - step[15];
+ intermediate[14] = step[8] - step[14];
+ intermediate[15] = step[9] - step[15];
output[15] = (intermediate[11] + intermediate[12]);
- output[ 1] = -(intermediate[11] - intermediate[12]);
+ output[1] = -(intermediate[11] - intermediate[12]);
- output[ 7] = 2 * (intermediate[13] * C8);
+ output[7] = 2 * (intermediate[13] * C8);
temp1 = intermediate[14] * C12;
temp2 = intermediate[15] * C4;
@@ -201,28 +201,24 @@
temp1 = intermediate[14] * C4;
temp2 = intermediate[15] * C12;
temp1 = temp2 + temp1;
- output[ 5] = 2 * (temp1 * C8);
+ output[5] = 2 * (temp1 * C8);
}
void reference_16x16_dct_2d(int16_t input[256], double output[256]) {
// First transform columns
for (int i = 0; i < 16; ++i) {
double temp_in[16], temp_out[16];
- for (int j = 0; j < 16; ++j)
- temp_in[j] = input[j * 16 + i];
+ for (int j = 0; j < 16; ++j) temp_in[j] = input[j * 16 + i];
butterfly_16x16_dct_1d(temp_in, temp_out);
- for (int j = 0; j < 16; ++j)
- output[j * 16 + i] = temp_out[j];
+ for (int j = 0; j < 16; ++j) output[j * 16 + i] = temp_out[j];
}
// Then transform rows
for (int i = 0; i < 16; ++i) {
double temp_in[16], temp_out[16];
- for (int j = 0; j < 16; ++j)
- temp_in[j] = output[j + i * 16];
+ for (int j = 0; j < 16; ++j) temp_in[j] = output[j + i * 16];
butterfly_16x16_dct_1d(temp_in, temp_out);
// Scale by some magic number
- for (int j = 0; j < 16; ++j)
- output[j + i * 16] = temp_out[j]/2;
+ for (int j = 0; j < 16; ++j) output[j + i * 16] = temp_out[j] / 2;
}
}
@@ -248,8 +244,7 @@
vpx_idct16x16_256_add_c(in, dest, stride);
}
-void fht16x16_ref(const int16_t *in, tran_low_t *out, int stride,
- int tx_type) {
+void fht16x16_ref(const int16_t *in, tran_low_t *out, int stride, int tx_type) {
vp10_fht16x16_c(in, out, stride, tx_type);
}
@@ -351,11 +346,10 @@
}
}
- ASM_REGISTER_STATE_CHECK(RunFwdTxfm(test_input_block,
- test_temp_block, pitch_));
+ ASM_REGISTER_STATE_CHECK(
+ RunFwdTxfm(test_input_block, test_temp_block, pitch_));
if (bit_depth_ == VPX_BITS_8) {
- ASM_REGISTER_STATE_CHECK(
- RunInvTxfm(test_temp_block, dst, pitch_));
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block, dst, pitch_));
#if CONFIG_VP9_HIGHBITDEPTH
} else {
ASM_REGISTER_STATE_CHECK(
@@ -366,18 +360,17 @@
for (int j = 0; j < kNumCoeffs; ++j) {
#if CONFIG_VP9_HIGHBITDEPTH
const int32_t diff =
- bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
+ bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
#else
const int32_t diff = dst[j] - src[j];
#endif
const uint32_t error = diff * diff;
- if (max_error < error)
- max_error = error;
+ if (max_error < error) max_error = error;
total_error += error;
}
}
- EXPECT_GE(1u << 2 * (bit_depth_ - 8), max_error)
+ EXPECT_GE(1u << 2 * (bit_depth_ - 8), max_error)
<< "Error: 16x16 FHT/IHT has an individual round trip error > 1";
EXPECT_GE(count_test_block << 2 * (bit_depth_ - 8), total_error)
@@ -418,16 +411,14 @@
input_extreme_block[j] = rnd.Rand8() % 2 ? mask_ : -mask_;
}
if (i == 0) {
- for (int j = 0; j < kNumCoeffs; ++j)
- input_extreme_block[j] = mask_;
+ for (int j = 0; j < kNumCoeffs; ++j) input_extreme_block[j] = mask_;
} else if (i == 1) {
- for (int j = 0; j < kNumCoeffs; ++j)
- input_extreme_block[j] = -mask_;
+ for (int j = 0; j < kNumCoeffs; ++j) input_extreme_block[j] = -mask_;
}
fwd_txfm_ref(input_extreme_block, output_ref_block, pitch_, tx_type_);
- ASM_REGISTER_STATE_CHECK(RunFwdTxfm(input_extreme_block,
- output_block, pitch_));
+ ASM_REGISTER_STATE_CHECK(
+ RunFwdTxfm(input_extreme_block, output_block, pitch_));
// The minimum quant value is 4.
for (int j = 0; j < kNumCoeffs; ++j) {
@@ -457,11 +448,9 @@
input_extreme_block[j] = rnd.Rand8() % 2 ? mask_ : -mask_;
}
if (i == 0)
- for (int j = 0; j < kNumCoeffs; ++j)
- input_extreme_block[j] = mask_;
+ for (int j = 0; j < kNumCoeffs; ++j) input_extreme_block[j] = mask_;
if (i == 1)
- for (int j = 0; j < kNumCoeffs; ++j)
- input_extreme_block[j] = -mask_;
+ for (int j = 0; j < kNumCoeffs; ++j) input_extreme_block[j] = -mask_;
fwd_txfm_ref(input_extreme_block, output_ref_block, pitch_, tx_type_);
@@ -484,17 +473,15 @@
} else {
inv_txfm_ref(output_ref_block, CONVERT_TO_BYTEPTR(ref16), pitch_,
tx_type_);
- ASM_REGISTER_STATE_CHECK(RunInvTxfm(output_ref_block,
- CONVERT_TO_BYTEPTR(dst16), pitch_));
+ ASM_REGISTER_STATE_CHECK(
+ RunInvTxfm(output_ref_block, CONVERT_TO_BYTEPTR(dst16), pitch_));
#endif
}
if (bit_depth_ == VPX_BITS_8) {
- for (int j = 0; j < kNumCoeffs; ++j)
- EXPECT_EQ(ref[j], dst[j]);
+ for (int j = 0; j < kNumCoeffs; ++j) EXPECT_EQ(ref[j], dst[j]);
#if CONFIG_VP9_HIGHBITDEPTH
} else {
- for (int j = 0; j < kNumCoeffs; ++j)
- EXPECT_EQ(ref16[j], dst16[j]);
+ for (int j = 0; j < kNumCoeffs; ++j) EXPECT_EQ(ref16[j], dst16[j]);
#endif
}
}
@@ -538,8 +525,8 @@
ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, 16));
#if CONFIG_VP9_HIGHBITDEPTH
} else {
- ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16),
- 16));
+ ASM_REGISTER_STATE_CHECK(
+ RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16), 16));
#endif // CONFIG_VP9_HIGHBITDEPTH
}
@@ -551,9 +538,8 @@
const uint32_t diff = dst[j] - src[j];
#endif // CONFIG_VP9_HIGHBITDEPTH
const uint32_t error = diff * diff;
- EXPECT_GE(1u, error)
- << "Error: 16x16 IDCT has error " << error
- << " at index " << j;
+ EXPECT_GE(1u, error) << "Error: 16x16 IDCT has error " << error
+ << " at index " << j;
}
}
}
@@ -595,8 +581,8 @@
} else {
#if CONFIG_VP9_HIGHBITDEPTH
ref_txfm(coeff, CONVERT_TO_BYTEPTR(ref16), pitch_);
- ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16),
- pitch_));
+ ASM_REGISTER_STATE_CHECK(
+ RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16), pitch_));
#endif // CONFIG_VP9_HIGHBITDEPTH
}
@@ -608,9 +594,8 @@
const uint32_t diff = dst[j] - ref[j];
#endif // CONFIG_VP9_HIGHBITDEPTH
const uint32_t error = diff * diff;
- EXPECT_EQ(0u, error)
- << "Error: 16x16 IDCT Comparison has error " << error
- << " at index " << j;
+ EXPECT_EQ(0u, error) << "Error: 16x16 IDCT Comparison has error "
+ << error << " at index " << j;
}
}
}
@@ -623,32 +608,25 @@
IhtFunc inv_txfm_ref;
};
-class Trans16x16DCT
- : public Trans16x16TestBase,
- public ::testing::TestWithParam<Dct16x16Param> {
+class Trans16x16DCT : public Trans16x16TestBase,
+ public ::testing::TestWithParam<Dct16x16Param> {
public:
virtual ~Trans16x16DCT() {}
virtual void SetUp() {
fwd_txfm_ = GET_PARAM(0);
inv_txfm_ = GET_PARAM(1);
- tx_type_ = GET_PARAM(2);
+ tx_type_ = GET_PARAM(2);
bit_depth_ = GET_PARAM(3);
- pitch_ = 16;
+ pitch_ = 16;
fwd_txfm_ref = fdct16x16_ref;
inv_txfm_ref = idct16x16_ref;
mask_ = (1 << bit_depth_) - 1;
#if CONFIG_VP9_HIGHBITDEPTH
switch (bit_depth_) {
- case VPX_BITS_10:
- inv_txfm_ref = idct16x16_10_ref;
- break;
- case VPX_BITS_12:
- inv_txfm_ref = idct16x16_12_ref;
- break;
- default:
- inv_txfm_ref = idct16x16_ref;
- break;
+ case VPX_BITS_10: inv_txfm_ref = idct16x16_10_ref; break;
+ case VPX_BITS_12: inv_txfm_ref = idct16x16_12_ref; break;
+ default: inv_txfm_ref = idct16x16_ref; break;
}
#else
inv_txfm_ref = idct16x16_ref;
@@ -668,17 +646,11 @@
IdctFunc inv_txfm_;
};
-TEST_P(Trans16x16DCT, AccuracyCheck) {
- RunAccuracyCheck();
-}
+TEST_P(Trans16x16DCT, AccuracyCheck) { RunAccuracyCheck(); }
-TEST_P(Trans16x16DCT, CoeffCheck) {
- RunCoeffCheck();
-}
+TEST_P(Trans16x16DCT, CoeffCheck) { RunCoeffCheck(); }
-TEST_P(Trans16x16DCT, MemCheck) {
- RunMemCheck();
-}
+TEST_P(Trans16x16DCT, MemCheck) { RunMemCheck(); }
TEST_P(Trans16x16DCT, QuantCheck) {
// Use maximally allowed quantization step sizes for DC and AC
@@ -686,36 +658,27 @@
RunQuantCheck(1336, 1828);
}
-TEST_P(Trans16x16DCT, InvAccuracyCheck) {
- RunInvAccuracyCheck();
-}
+TEST_P(Trans16x16DCT, InvAccuracyCheck) { RunInvAccuracyCheck(); }
-class Trans16x16HT
- : public Trans16x16TestBase,
- public ::testing::TestWithParam<Ht16x16Param> {
+class Trans16x16HT : public Trans16x16TestBase,
+ public ::testing::TestWithParam<Ht16x16Param> {
public:
virtual ~Trans16x16HT() {}
virtual void SetUp() {
fwd_txfm_ = GET_PARAM(0);
inv_txfm_ = GET_PARAM(1);
- tx_type_ = GET_PARAM(2);
+ tx_type_ = GET_PARAM(2);
bit_depth_ = GET_PARAM(3);
- pitch_ = 16;
+ pitch_ = 16;
fwd_txfm_ref = fht16x16_ref;
inv_txfm_ref = iht16x16_ref;
mask_ = (1 << bit_depth_) - 1;
#if CONFIG_VP9_HIGHBITDEPTH
switch (bit_depth_) {
- case VPX_BITS_10:
- inv_txfm_ref = iht16x16_10;
- break;
- case VPX_BITS_12:
- inv_txfm_ref = iht16x16_12;
- break;
- default:
- inv_txfm_ref = iht16x16_ref;
- break;
+ case VPX_BITS_10: inv_txfm_ref = iht16x16_10; break;
+ case VPX_BITS_12: inv_txfm_ref = iht16x16_12; break;
+ default: inv_txfm_ref = iht16x16_ref; break;
}
#else
inv_txfm_ref = iht16x16_ref;
@@ -735,17 +698,11 @@
IhtFunc inv_txfm_;
};
-TEST_P(Trans16x16HT, AccuracyCheck) {
- RunAccuracyCheck();
-}
+TEST_P(Trans16x16HT, AccuracyCheck) { RunAccuracyCheck(); }
-TEST_P(Trans16x16HT, CoeffCheck) {
- RunCoeffCheck();
-}
+TEST_P(Trans16x16HT, CoeffCheck) { RunCoeffCheck(); }
-TEST_P(Trans16x16HT, MemCheck) {
- RunMemCheck();
-}
+TEST_P(Trans16x16HT, MemCheck) { RunMemCheck(); }
TEST_P(Trans16x16HT, QuantCheck) {
// The encoder skips any non-DC intra prediction modes,
@@ -753,9 +710,8 @@
RunQuantCheck(429, 729);
}
-class InvTrans16x16DCT
- : public Trans16x16TestBase,
- public ::testing::TestWithParam<Idct16x16Param> {
+class InvTrans16x16DCT : public Trans16x16TestBase,
+ public ::testing::TestWithParam<Idct16x16Param> {
public:
virtual ~InvTrans16x16DCT() {}
@@ -766,7 +722,7 @@
bit_depth_ = GET_PARAM(3);
pitch_ = 16;
mask_ = (1 << bit_depth_) - 1;
-}
+ }
virtual void TearDown() { libvpx_test::ClearSystemState(); }
protected:
@@ -784,9 +740,8 @@
CompareInvReference(ref_txfm_, thresh_);
}
-class PartialTrans16x16Test
- : public ::testing::TestWithParam<
- std::tr1::tuple<FdctFunc, vpx_bit_depth_t> > {
+class PartialTrans16x16Test : public ::testing::TestWithParam<
+ std::tr1::tuple<FdctFunc, vpx_bit_depth_t> > {
public:
virtual ~PartialTrans16x16Test() {}
virtual void SetUp() {
@@ -855,10 +810,10 @@
make_tuple(&vpx_highbd_fdct16x16_c, &idct16x16_12, 0, VPX_BITS_12),
make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c, 0, VPX_BITS_8)));
#else
-INSTANTIATE_TEST_CASE_P(
- C, Trans16x16DCT,
- ::testing::Values(
- make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c, 0, VPX_BITS_8)));
+INSTANTIATE_TEST_CASE_P(C, Trans16x16DCT,
+ ::testing::Values(make_tuple(&vpx_fdct16x16_c,
+ &vpx_idct16x16_256_add_c,
+ 0, VPX_BITS_8)));
#endif // CONFIG_VP9_HIGHBITDEPTH
#if CONFIG_VP9_HIGHBITDEPTH
@@ -898,28 +853,25 @@
#if HAVE_NEON_ASM && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
NEON, Trans16x16DCT,
- ::testing::Values(
- make_tuple(&vpx_fdct16x16_c,
- &vpx_idct16x16_256_add_neon, 0, VPX_BITS_8)));
+ ::testing::Values(make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_neon,
+ 0, VPX_BITS_8)));
#endif
#if HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
SSE2, Trans16x16DCT,
- ::testing::Values(
- make_tuple(&vpx_fdct16x16_sse2,
- &vpx_idct16x16_256_add_sse2, 0, VPX_BITS_8)));
+ ::testing::Values(make_tuple(&vpx_fdct16x16_sse2,
+ &vpx_idct16x16_256_add_sse2, 0, VPX_BITS_8)));
INSTANTIATE_TEST_CASE_P(
SSE2, Trans16x16HT,
- ::testing::Values(
- make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_sse2, 0,
- VPX_BITS_8),
- make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_sse2, 1,
- VPX_BITS_8),
- make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_sse2, 2,
- VPX_BITS_8),
- make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_sse2, 3,
- VPX_BITS_8)));
+ ::testing::Values(make_tuple(&vp10_fht16x16_sse2,
+ &vp10_iht16x16_256_add_sse2, 0, VPX_BITS_8),
+ make_tuple(&vp10_fht16x16_sse2,
+ &vp10_iht16x16_256_add_sse2, 1, VPX_BITS_8),
+ make_tuple(&vp10_fht16x16_sse2,
+ &vp10_iht16x16_256_add_sse2, 2, VPX_BITS_8),
+ make_tuple(&vp10_fht16x16_sse2,
+ &vp10_iht16x16_256_add_sse2, 3, VPX_BITS_8)));
INSTANTIATE_TEST_CASE_P(SSE2, PartialTrans16x16Test,
::testing::Values(make_tuple(&vpx_fdct16x16_1_sse2,
VPX_BITS_8)));
@@ -929,62 +881,56 @@
INSTANTIATE_TEST_CASE_P(
SSE2, Trans16x16DCT,
::testing::Values(
- make_tuple(&vpx_highbd_fdct16x16_sse2,
- &idct16x16_10, 0, VPX_BITS_10),
- make_tuple(&vpx_highbd_fdct16x16_c,
- &idct16x16_256_add_10_sse2, 0, VPX_BITS_10),
- make_tuple(&vpx_highbd_fdct16x16_sse2,
- &idct16x16_12, 0, VPX_BITS_12),
- make_tuple(&vpx_highbd_fdct16x16_c,
- &idct16x16_256_add_12_sse2, 0, VPX_BITS_12),
- make_tuple(&vpx_fdct16x16_sse2,
- &vpx_idct16x16_256_add_c, 0, VPX_BITS_8)));
+ make_tuple(&vpx_highbd_fdct16x16_sse2, &idct16x16_10, 0, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct16x16_c, &idct16x16_256_add_10_sse2, 0,
+ VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct16x16_sse2, &idct16x16_12, 0, VPX_BITS_12),
+ make_tuple(&vpx_highbd_fdct16x16_c, &idct16x16_256_add_12_sse2, 0,
+ VPX_BITS_12),
+ make_tuple(&vpx_fdct16x16_sse2, &vpx_idct16x16_256_add_c, 0,
+ VPX_BITS_8)));
INSTANTIATE_TEST_CASE_P(
SSE2, Trans16x16HT,
- ::testing::Values(
- make_tuple(&vp10_fht16x16_sse2,
- &vp10_iht16x16_256_add_c, 0, VPX_BITS_8),
- make_tuple(&vp10_fht16x16_sse2,
- &vp10_iht16x16_256_add_c, 1, VPX_BITS_8),
- make_tuple(&vp10_fht16x16_sse2,
- &vp10_iht16x16_256_add_c, 2, VPX_BITS_8),
- make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_c, 3,
- VPX_BITS_8)));
+ ::testing::Values(make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_c,
+ 0, VPX_BITS_8),
+ make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_c,
+ 1, VPX_BITS_8),
+ make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_c,
+ 2, VPX_BITS_8),
+ make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_c,
+ 3, VPX_BITS_8)));
// Optimizations take effect at a threshold of 3155, so we use a value close to
// that to test both branches.
INSTANTIATE_TEST_CASE_P(
SSE2, InvTrans16x16DCT,
- ::testing::Values(
- make_tuple(&idct16x16_10_add_10_c,
- &idct16x16_10_add_10_sse2, 3167, VPX_BITS_10),
- make_tuple(&idct16x16_10,
- &idct16x16_256_add_10_sse2, 3167, VPX_BITS_10),
- make_tuple(&idct16x16_10_add_12_c,
- &idct16x16_10_add_12_sse2, 3167, VPX_BITS_12),
- make_tuple(&idct16x16_12,
- &idct16x16_256_add_12_sse2, 3167, VPX_BITS_12)));
+ ::testing::Values(make_tuple(&idct16x16_10_add_10_c,
+ &idct16x16_10_add_10_sse2, 3167, VPX_BITS_10),
+ make_tuple(&idct16x16_10, &idct16x16_256_add_10_sse2,
+ 3167, VPX_BITS_10),
+ make_tuple(&idct16x16_10_add_12_c,
+ &idct16x16_10_add_12_sse2, 3167, VPX_BITS_12),
+ make_tuple(&idct16x16_12, &idct16x16_256_add_12_sse2,
+ 3167, VPX_BITS_12)));
INSTANTIATE_TEST_CASE_P(SSE2, PartialTrans16x16Test,
::testing::Values(make_tuple(&vpx_fdct16x16_1_sse2,
VPX_BITS_8)));
#endif // HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
#if HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
-INSTANTIATE_TEST_CASE_P(
- MSA, Trans16x16DCT,
- ::testing::Values(
- make_tuple(&vpx_fdct16x16_msa,
- &vpx_idct16x16_256_add_msa, 0, VPX_BITS_8)));
+INSTANTIATE_TEST_CASE_P(MSA, Trans16x16DCT,
+ ::testing::Values(make_tuple(&vpx_fdct16x16_msa,
+ &vpx_idct16x16_256_add_msa,
+ 0, VPX_BITS_8)));
INSTANTIATE_TEST_CASE_P(
MSA, Trans16x16HT,
- ::testing::Values(
- make_tuple(&vp10_fht16x16_msa,
- &vp10_iht16x16_256_add_msa, 0, VPX_BITS_8),
- make_tuple(&vp10_fht16x16_msa,
- &vp10_iht16x16_256_add_msa, 1, VPX_BITS_8),
- make_tuple(&vp10_fht16x16_msa,
- &vp10_iht16x16_256_add_msa, 2, VPX_BITS_8),
- make_tuple(&vp10_fht16x16_msa,
- &vp10_iht16x16_256_add_msa, 3, VPX_BITS_8)));
+ ::testing::Values(make_tuple(&vp10_fht16x16_msa, &vp10_iht16x16_256_add_msa,
+ 0, VPX_BITS_8),
+ make_tuple(&vp10_fht16x16_msa, &vp10_iht16x16_256_add_msa,
+ 1, VPX_BITS_8),
+ make_tuple(&vp10_fht16x16_msa, &vp10_iht16x16_256_add_msa,
+ 2, VPX_BITS_8),
+ make_tuple(&vp10_fht16x16_msa, &vp10_iht16x16_256_add_msa,
+ 3, VPX_BITS_8)));
INSTANTIATE_TEST_CASE_P(MSA, PartialTrans16x16Test,
::testing::Values(make_tuple(&vpx_fdct16x16_1_msa,
VPX_BITS_8)));
diff --git a/test/dct32x32_test.cc b/test/dct32x32_test.cc
index 88d64ec3..b59ab03 100644
--- a/test/dct32x32_test.cc
+++ b/test/dct32x32_test.cc
@@ -39,8 +39,7 @@
out[k] = 0.0;
for (int n = 0; n < 32; n++)
out[k] += in[n] * cos(kPi * (2 * n + 1) * k / 64.0);
- if (k == 0)
- out[k] = out[k] * kInvSqrt2;
+ if (k == 0) out[k] = out[k] * kInvSqrt2;
}
}
@@ -49,21 +48,17 @@
// First transform columns
for (int i = 0; i < 32; ++i) {
double temp_in[32], temp_out[32];
- for (int j = 0; j < 32; ++j)
- temp_in[j] = input[j*32 + i];
+ for (int j = 0; j < 32; ++j) temp_in[j] = input[j * 32 + i];
reference_32x32_dct_1d(temp_in, temp_out);
- for (int j = 0; j < 32; ++j)
- output[j * 32 + i] = temp_out[j];
+ for (int j = 0; j < 32; ++j) output[j * 32 + i] = temp_out[j];
}
// Then transform rows
for (int i = 0; i < 32; ++i) {
double temp_in[32], temp_out[32];
- for (int j = 0; j < 32; ++j)
- temp_in[j] = output[j + i*32];
+ for (int j = 0; j < 32; ++j) temp_in[j] = output[j + i * 32];
reference_32x32_dct_1d(temp_in, temp_out);
// Scale by some magic number
- for (int j = 0; j < 32; ++j)
- output[j + i * 32] = temp_out[j] / 4;
+ for (int j = 0; j < 32; ++j) output[j + i * 32] = temp_out[j] / 4;
}
}
@@ -89,8 +84,8 @@
virtual void SetUp() {
fwd_txfm_ = GET_PARAM(0);
inv_txfm_ = GET_PARAM(1);
- version_ = GET_PARAM(2); // 0: high precision forward transform
- // 1: low precision version for rd loop
+ version_ = GET_PARAM(2); // 0: high precision forward transform
+ // 1: low precision version for rd loop
bit_depth_ = GET_PARAM(3);
mask_ = (1 << bit_depth_) - 1;
}
@@ -140,8 +135,8 @@
ASM_REGISTER_STATE_CHECK(inv_txfm_(test_temp_block, dst, 32));
#if CONFIG_VP9_HIGHBITDEPTH
} else {
- ASM_REGISTER_STATE_CHECK(inv_txfm_(test_temp_block,
- CONVERT_TO_BYTEPTR(dst16), 32));
+ ASM_REGISTER_STATE_CHECK(
+ inv_txfm_(test_temp_block, CONVERT_TO_BYTEPTR(dst16), 32));
#endif
}
@@ -153,8 +148,7 @@
const int32_t diff = dst[j] - src[j];
#endif
const uint32_t error = diff * diff;
- if (max_error < error)
- max_error = error;
+ if (max_error < error) max_error = error;
total_error += error;
}
}
@@ -213,11 +207,9 @@
input_extreme_block[j] = rnd.Rand8() & 1 ? mask_ : -mask_;
}
if (i == 0) {
- for (int j = 0; j < kNumCoeffs; ++j)
- input_extreme_block[j] = mask_;
+ for (int j = 0; j < kNumCoeffs; ++j) input_extreme_block[j] = mask_;
} else if (i == 1) {
- for (int j = 0; j < kNumCoeffs; ++j)
- input_extreme_block[j] = -mask_;
+ for (int j = 0; j < kNumCoeffs; ++j) input_extreme_block[j] = -mask_;
}
const int stride = 32;
@@ -291,9 +283,8 @@
const int diff = dst[j] - src[j];
#endif
const int error = diff * diff;
- EXPECT_GE(1, error)
- << "Error: 32x32 IDCT has error " << error
- << " at index " << j;
+ EXPECT_GE(1, error) << "Error: 32x32 IDCT has error " << error
+ << " at index " << j;
}
}
}
@@ -365,18 +356,13 @@
INSTANTIATE_TEST_CASE_P(
C, Trans32x32Test,
::testing::Values(
- make_tuple(&vpx_highbd_fdct32x32_c,
- &idct32x32_10, 0, VPX_BITS_10),
- make_tuple(&vpx_highbd_fdct32x32_rd_c,
- &idct32x32_10, 1, VPX_BITS_10),
- make_tuple(&vpx_highbd_fdct32x32_c,
- &idct32x32_12, 0, VPX_BITS_12),
- make_tuple(&vpx_highbd_fdct32x32_rd_c,
- &idct32x32_12, 1, VPX_BITS_12),
- make_tuple(&vpx_fdct32x32_c,
- &vpx_idct32x32_1024_add_c, 0, VPX_BITS_8),
- make_tuple(&vpx_fdct32x32_rd_c,
- &vpx_idct32x32_1024_add_c, 1, VPX_BITS_8)));
+ make_tuple(&vpx_highbd_fdct32x32_c, &idct32x32_10, 0, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct32x32_rd_c, &idct32x32_10, 1, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct32x32_c, &idct32x32_12, 0, VPX_BITS_12),
+ make_tuple(&vpx_highbd_fdct32x32_rd_c, &idct32x32_12, 1, VPX_BITS_12),
+ make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c, 0, VPX_BITS_8),
+ make_tuple(&vpx_fdct32x32_rd_c, &vpx_idct32x32_1024_add_c, 1,
+ VPX_BITS_8)));
INSTANTIATE_TEST_CASE_P(
C, PartialTrans32x32Test,
::testing::Values(make_tuple(&vpx_highbd_fdct32x32_1_c, VPX_BITS_8),
@@ -385,11 +371,10 @@
#else
INSTANTIATE_TEST_CASE_P(
C, Trans32x32Test,
- ::testing::Values(
- make_tuple(&vpx_fdct32x32_c,
- &vpx_idct32x32_1024_add_c, 0, VPX_BITS_8),
- make_tuple(&vpx_fdct32x32_rd_c,
- &vpx_idct32x32_1024_add_c, 1, VPX_BITS_8)));
+ ::testing::Values(make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c, 0,
+ VPX_BITS_8),
+ make_tuple(&vpx_fdct32x32_rd_c, &vpx_idct32x32_1024_add_c,
+ 1, VPX_BITS_8)));
INSTANTIATE_TEST_CASE_P(C, PartialTrans32x32Test,
::testing::Values(make_tuple(&vpx_fdct32x32_1_c,
VPX_BITS_8)));
@@ -398,21 +383,19 @@
#if HAVE_NEON && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
NEON, Trans32x32Test,
- ::testing::Values(
- make_tuple(&vpx_fdct32x32_c,
- &vpx_idct32x32_1024_add_neon, 0, VPX_BITS_8),
- make_tuple(&vpx_fdct32x32_rd_c,
- &vpx_idct32x32_1024_add_neon, 1, VPX_BITS_8)));
+ ::testing::Values(make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_neon,
+ 0, VPX_BITS_8),
+ make_tuple(&vpx_fdct32x32_rd_c,
+ &vpx_idct32x32_1024_add_neon, 1, VPX_BITS_8)));
#endif // HAVE_NEON && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
#if HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
SSE2, Trans32x32Test,
- ::testing::Values(
- make_tuple(&vpx_fdct32x32_sse2,
- &vpx_idct32x32_1024_add_sse2, 0, VPX_BITS_8),
- make_tuple(&vpx_fdct32x32_rd_sse2,
- &vpx_idct32x32_1024_add_sse2, 1, VPX_BITS_8)));
+ ::testing::Values(make_tuple(&vpx_fdct32x32_sse2,
+ &vpx_idct32x32_1024_add_sse2, 0, VPX_BITS_8),
+ make_tuple(&vpx_fdct32x32_rd_sse2,
+ &vpx_idct32x32_1024_add_sse2, 1, VPX_BITS_8)));
INSTANTIATE_TEST_CASE_P(SSE2, PartialTrans32x32Test,
::testing::Values(make_tuple(&vpx_fdct32x32_1_sse2,
VPX_BITS_8)));
@@ -440,21 +423,19 @@
#if HAVE_AVX2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
AVX2, Trans32x32Test,
- ::testing::Values(
- make_tuple(&vpx_fdct32x32_avx2,
- &vpx_idct32x32_1024_add_sse2, 0, VPX_BITS_8),
- make_tuple(&vpx_fdct32x32_rd_avx2,
- &vpx_idct32x32_1024_add_sse2, 1, VPX_BITS_8)));
+ ::testing::Values(make_tuple(&vpx_fdct32x32_avx2,
+ &vpx_idct32x32_1024_add_sse2, 0, VPX_BITS_8),
+ make_tuple(&vpx_fdct32x32_rd_avx2,
+ &vpx_idct32x32_1024_add_sse2, 1, VPX_BITS_8)));
#endif // HAVE_AVX2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
#if HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
MSA, Trans32x32Test,
- ::testing::Values(
- make_tuple(&vpx_fdct32x32_msa,
- &vpx_idct32x32_1024_add_msa, 0, VPX_BITS_8),
- make_tuple(&vpx_fdct32x32_rd_msa,
- &vpx_idct32x32_1024_add_msa, 1, VPX_BITS_8)));
+ ::testing::Values(make_tuple(&vpx_fdct32x32_msa,
+ &vpx_idct32x32_1024_add_msa, 0, VPX_BITS_8),
+ make_tuple(&vpx_fdct32x32_rd_msa,
+ &vpx_idct32x32_1024_add_msa, 1, VPX_BITS_8)));
INSTANTIATE_TEST_CASE_P(MSA, PartialTrans32x32Test,
::testing::Values(make_tuple(&vpx_fdct32x32_1_msa,
VPX_BITS_8)));
diff --git a/test/decode_api_test.cc b/test/decode_api_test.cc
index a819da5..6571154 100644
--- a/test/decode_api_test.cc
+++ b/test/decode_api_test.cc
@@ -25,7 +25,7 @@
&vpx_codec_vp10_dx_algo,
#endif
};
- uint8_t buf[1] = {0};
+ uint8_t buf[1] = { 0 };
vpx_codec_ctx_t dec;
EXPECT_EQ(VPX_CODEC_INVALID_PARAM, vpx_codec_dec_init(NULL, NULL, NULL, 0));
@@ -48,8 +48,7 @@
vpx_codec_decode(&dec, buf, NELEMENTS(buf), NULL, 0));
EXPECT_EQ(VPX_CODEC_INVALID_PARAM,
vpx_codec_decode(&dec, NULL, NELEMENTS(buf), NULL, 0));
- EXPECT_EQ(VPX_CODEC_INVALID_PARAM,
- vpx_codec_decode(&dec, buf, 0, NULL, 0));
+ EXPECT_EQ(VPX_CODEC_INVALID_PARAM, vpx_codec_decode(&dec, buf, 0, NULL, 0));
EXPECT_EQ(VPX_CODEC_OK, vpx_codec_destroy(&dec));
}
diff --git a/test/decode_perf_test.cc b/test/decode_perf_test.cc
index 000f9c8..d8bfcbe 100644
--- a/test/decode_perf_test.cc
+++ b/test/decode_perf_test.cc
@@ -70,8 +70,7 @@
power/temp/min max frame decode times/etc
*/
-class DecodePerfTest : public ::testing::TestWithParam<DecodePerfParam> {
-};
+class DecodePerfTest : public ::testing::TestWithParam<DecodePerfParam> {};
TEST_P(DecodePerfTest, PerfTest) {
const char *const video_name = GET_PARAM(VIDEO_NAME);
@@ -92,8 +91,7 @@
}
vpx_usec_timer_mark(&t);
- const double elapsed_secs = double(vpx_usec_timer_elapsed(&t))
- / kUsecsInSec;
+ const double elapsed_secs = double(vpx_usec_timer_elapsed(&t)) / kUsecsInSec;
const unsigned frames = video.frame_number();
const double fps = double(frames) / elapsed_secs;
@@ -111,17 +109,13 @@
INSTANTIATE_TEST_CASE_P(VP9, DecodePerfTest,
::testing::ValuesIn(kVP9DecodePerfVectors));
-class VP9NewEncodeDecodePerfTest :
- public ::libvpx_test::EncoderTest,
- public ::libvpx_test::CodecTestWithParam<libvpx_test::TestMode> {
+class VP9NewEncodeDecodePerfTest
+ : public ::libvpx_test::EncoderTest,
+ public ::libvpx_test::CodecTestWithParam<libvpx_test::TestMode> {
protected:
VP9NewEncodeDecodePerfTest()
- : EncoderTest(GET_PARAM(0)),
- encoding_mode_(GET_PARAM(1)),
- speed_(0),
- outfile_(0),
- out_frames_(0) {
- }
+ : EncoderTest(GET_PARAM(0)), encoding_mode_(GET_PARAM(1)), speed_(0),
+ outfile_(0), out_frames_(0) {}
virtual ~VP9NewEncodeDecodePerfTest() {}
@@ -182,9 +176,7 @@
virtual bool DoDecode() { return false; }
- void set_speed(unsigned int speed) {
- speed_ = speed;
- }
+ void set_speed(unsigned int speed) { speed_ = speed; }
private:
libvpx_test::TestMode encoding_mode_;
@@ -196,10 +188,7 @@
struct EncodePerfTestVideo {
EncodePerfTestVideo(const char *name_, uint32_t width_, uint32_t height_,
uint32_t bitrate_, int frames_)
- : name(name_),
- width(width_),
- height(height_),
- bitrate(bitrate_),
+ : name(name_), width(width_), height(height_), bitrate(bitrate_),
frames(frames_) {}
const char *name;
uint32_t width;
@@ -225,10 +214,8 @@
const char *video_name = kVP9EncodePerfTestVectors[i].name;
libvpx_test::I420VideoSource video(
- video_name,
- kVP9EncodePerfTestVectors[i].width,
- kVP9EncodePerfTestVectors[i].height,
- timebase.den, timebase.num, 0,
+ video_name, kVP9EncodePerfTestVectors[i].width,
+ kVP9EncodePerfTestVectors[i].height, timebase.den, timebase.num, 0,
kVP9EncodePerfTestVectors[i].frames);
set_speed(2);
@@ -268,6 +255,6 @@
printf("}\n");
}
-VP10_INSTANTIATE_TEST_CASE(
- VP9NewEncodeDecodePerfTest, ::testing::Values(::libvpx_test::kTwoPassGood));
+VP10_INSTANTIATE_TEST_CASE(VP9NewEncodeDecodePerfTest,
+ ::testing::Values(::libvpx_test::kTwoPassGood));
} // namespace
diff --git a/test/decode_test_driver.cc b/test/decode_test_driver.cc
index 7fb3e37..a62ddbd 100644
--- a/test/decode_test_driver.cc
+++ b/test/decode_test_driver.cc
@@ -22,9 +22,8 @@
vpx_codec_err_t Decoder::PeekStream(const uint8_t *cxdata, size_t size,
vpx_codec_stream_info_t *stream_info) {
- return vpx_codec_peek_stream_info(CodecInterface(),
- cxdata, static_cast<unsigned int>(size),
- stream_info);
+ return vpx_codec_peek_stream_info(
+ CodecInterface(), cxdata, static_cast<unsigned int>(size), stream_info);
}
vpx_codec_err_t Decoder::DecodeFrame(const uint8_t *cxdata, size_t size) {
@@ -36,9 +35,8 @@
vpx_codec_err_t res_dec;
InitOnce();
API_REGISTER_STATE_CHECK(
- res_dec = vpx_codec_decode(&decoder_,
- cxdata, static_cast<unsigned int>(size),
- user_priv, 0));
+ res_dec = vpx_codec_decode(
+ &decoder_, cxdata, static_cast<unsigned int>(size), user_priv, 0));
return res_dec;
}
@@ -73,7 +71,7 @@
void DecoderTest::RunLoop(CompressedVideoSource *video,
const vpx_codec_dec_cfg_t &dec_cfg) {
- Decoder* const decoder = codec_->CreateDecoder(dec_cfg, flags_, 0);
+ Decoder *const decoder = codec_->CreateDecoder(dec_cfg, flags_, 0);
ASSERT_TRUE(decoder != NULL);
bool end_of_file = false;
@@ -86,16 +84,14 @@
stream_info.sz = sizeof(stream_info);
if (video->cxdata() != NULL) {
- const vpx_codec_err_t res_peek = decoder->PeekStream(video->cxdata(),
- video->frame_size(),
- &stream_info);
+ const vpx_codec_err_t res_peek = decoder->PeekStream(
+ video->cxdata(), video->frame_size(), &stream_info);
HandlePeekResult(decoder, video, res_peek);
ASSERT_FALSE(::testing::Test::HasFailure());
- vpx_codec_err_t res_dec = decoder->DecodeFrame(video->cxdata(),
- video->frame_size());
- if (!HandleDecodeResult(res_dec, *video, decoder))
- break;
+ vpx_codec_err_t res_dec =
+ decoder->DecodeFrame(video->cxdata(), video->frame_size());
+ if (!HandleDecodeResult(res_dec, *video, decoder)) break;
} else {
// Signal end of the file to the decoder.
const vpx_codec_err_t res_dec = decoder->DecodeFrame(NULL, 0);
@@ -122,8 +118,6 @@
memcpy(&cfg_, &dec_cfg, sizeof(cfg_));
}
-void DecoderTest::set_flags(const vpx_codec_flags_t flags) {
- flags_ = flags;
-}
+void DecoderTest::set_flags(const vpx_codec_flags_t flags) { flags_ = flags; }
} // namespace libvpx_test
diff --git a/test/decode_test_driver.h b/test/decode_test_driver.h
index 1492c5a..e6ef853 100644
--- a/test/decode_test_driver.h
+++ b/test/decode_test_driver.h
@@ -26,13 +26,11 @@
explicit DxDataIterator(vpx_codec_ctx_t *decoder)
: decoder_(decoder), iter_(NULL) {}
- const vpx_image_t *Next() {
- return vpx_codec_get_frame(decoder_, &iter_);
- }
+ const vpx_image_t *Next() { return vpx_codec_get_frame(decoder_, &iter_); }
private:
- vpx_codec_ctx_t *decoder_;
- vpx_codec_iter_t iter_;
+ vpx_codec_ctx_t *decoder_;
+ vpx_codec_iter_t iter_;
};
// Provides a simplified interface to manage one video decoding.
@@ -47,13 +45,14 @@
Decoder(vpx_codec_dec_cfg_t cfg, const vpx_codec_flags_t flag,
unsigned long deadline) // NOLINT
- : cfg_(cfg), flags_(flag), deadline_(deadline), init_done_(false) {
+ : cfg_(cfg),
+ flags_(flag),
+ deadline_(deadline),
+ init_done_(false) {
memset(&decoder_, 0, sizeof(decoder_));
}
- virtual ~Decoder() {
- vpx_codec_destroy(&decoder_);
- }
+ virtual ~Decoder() { vpx_codec_destroy(&decoder_); }
vpx_codec_err_t PeekStream(const uint8_t *cxdata, size_t size,
vpx_codec_stream_info_t *stream_info);
@@ -63,17 +62,11 @@
vpx_codec_err_t DecodeFrame(const uint8_t *cxdata, size_t size,
void *user_priv);
- DxDataIterator GetDxData() {
- return DxDataIterator(&decoder_);
- }
+ DxDataIterator GetDxData() { return DxDataIterator(&decoder_); }
- void set_deadline(unsigned long deadline) {
- deadline_ = deadline;
- }
+ void set_deadline(unsigned long deadline) { deadline_ = deadline; }
- void Control(int ctrl_id, int arg) {
- Control(ctrl_id, arg, VPX_CODEC_OK);
- }
+ void Control(int ctrl_id, int arg) { Control(ctrl_id, arg, VPX_CODEC_OK); }
void Control(int ctrl_id, const void *arg) {
InitOnce();
@@ -87,7 +80,7 @@
ASSERT_EQ(expected_value, res) << DecodeError();
}
- const char* DecodeError() {
+ const char *DecodeError() {
const char *detail = vpx_codec_error_detail(&decoder_);
return detail ? detail : vpx_codec_error(&decoder_);
}
@@ -97,11 +90,11 @@
vpx_get_frame_buffer_cb_fn_t cb_get,
vpx_release_frame_buffer_cb_fn_t cb_release, void *user_priv) {
InitOnce();
- return vpx_codec_set_frame_buffer_functions(
- &decoder_, cb_get, cb_release, user_priv);
+ return vpx_codec_set_frame_buffer_functions(&decoder_, cb_get, cb_release,
+ user_priv);
}
- const char* GetDecoderName() const {
+ const char *GetDecoderName() const {
return vpx_codec_iface_name(CodecInterface());
}
@@ -109,28 +102,25 @@
bool IsVP10() const;
- vpx_codec_ctx_t * GetDecoder() {
- return &decoder_;
- }
+ vpx_codec_ctx_t *GetDecoder() { return &decoder_; }
protected:
- virtual vpx_codec_iface_t* CodecInterface() const = 0;
+ virtual vpx_codec_iface_t *CodecInterface() const = 0;
void InitOnce() {
if (!init_done_) {
- const vpx_codec_err_t res = vpx_codec_dec_init(&decoder_,
- CodecInterface(),
- &cfg_, flags_);
+ const vpx_codec_err_t res =
+ vpx_codec_dec_init(&decoder_, CodecInterface(), &cfg_, flags_);
ASSERT_EQ(VPX_CODEC_OK, res) << DecodeError();
init_done_ = true;
}
}
- vpx_codec_ctx_t decoder_;
+ vpx_codec_ctx_t decoder_;
vpx_codec_dec_cfg_t cfg_;
- vpx_codec_flags_t flags_;
- unsigned int deadline_;
- bool init_done_;
+ vpx_codec_flags_t flags_;
+ unsigned int deadline_;
+ bool init_done_;
};
// Common test functionality for all Decoder tests.
@@ -145,37 +135,35 @@
virtual void set_flags(const vpx_codec_flags_t flags);
// Hook to be called before decompressing every frame.
- virtual void PreDecodeFrameHook(const CompressedVideoSource& /*video*/,
- Decoder* /*decoder*/) {}
+ virtual void PreDecodeFrameHook(const CompressedVideoSource & /*video*/,
+ Decoder * /*decoder*/) {}
// Hook to be called to handle decode result. Return true to continue.
virtual bool HandleDecodeResult(const vpx_codec_err_t res_dec,
- const CompressedVideoSource& /*video*/,
+ const CompressedVideoSource & /*video*/,
Decoder *decoder) {
EXPECT_EQ(VPX_CODEC_OK, res_dec) << decoder->DecodeError();
return VPX_CODEC_OK == res_dec;
}
// Hook to be called on every decompressed frame.
- virtual void DecompressedFrameHook(const vpx_image_t& /*img*/,
+ virtual void DecompressedFrameHook(const vpx_image_t & /*img*/,
const unsigned int /*frame_number*/) {}
// Hook to be called on peek result
- virtual void HandlePeekResult(Decoder* const decoder,
+ virtual void HandlePeekResult(Decoder *const decoder,
CompressedVideoSource *video,
const vpx_codec_err_t res_peek);
protected:
explicit DecoderTest(const CodecFactory *codec)
- : codec_(codec),
- cfg_(),
- flags_(0) {}
+ : codec_(codec), cfg_(), flags_(0) {}
virtual ~DecoderTest() {}
const CodecFactory *codec_;
vpx_codec_dec_cfg_t cfg_;
- vpx_codec_flags_t flags_;
+ vpx_codec_flags_t flags_;
};
} // namespace libvpx_test
diff --git a/test/denoiser_sse2_test.cc b/test/denoiser_sse2_test.cc
index 793cd17..daca510 100644
--- a/test/denoiser_sse2_test.cc
+++ b/test/denoiser_sse2_test.cc
@@ -33,9 +33,7 @@
public:
virtual ~VP9DenoiserTest() {}
- virtual void SetUp() {
- bs_ = GetParam();
- }
+ virtual void SetUp() { bs_ = GetParam(); }
virtual void TearDown() { libvpx_test::ClearSystemState(); }
@@ -68,19 +66,19 @@
sig_block[j] = rnd.Rand8();
// The pixels in mc_avg_block are generated by adding a random
// number in range [-19, 19] to corresponding pixels in sig_block.
- temp = sig_block[j] + ((rnd.Rand8() % 2 == 0) ? -1 : 1) *
- (rnd.Rand8() % 20);
+ temp =
+ sig_block[j] + ((rnd.Rand8() % 2 == 0) ? -1 : 1) * (rnd.Rand8() % 20);
// Clip.
mc_avg_block[j] = (temp < 0) ? 0 : ((temp > 255) ? 255 : temp);
}
- ASM_REGISTER_STATE_CHECK(vp9_denoiser_filter_c(
- sig_block, 64, mc_avg_block, 64, avg_block_c,
- 64, 0, bs_, motion_magnitude_random));
+ ASM_REGISTER_STATE_CHECK(vp9_denoiser_filter_c(sig_block, 64, mc_avg_block,
+ 64, avg_block_c, 64, 0, bs_,
+ motion_magnitude_random));
ASM_REGISTER_STATE_CHECK(vp9_denoiser_filter_sse2(
- sig_block, 64, mc_avg_block, 64, avg_block_sse2,
- 64, 0, bs_, motion_magnitude_random));
+ sig_block, 64, mc_avg_block, 64, avg_block_sse2, 64, 0, bs_,
+ motion_magnitude_random));
// Test bitexactness.
for (int h = 0; h < (4 << b_height_log2_lookup[bs_]); ++h) {
@@ -92,9 +90,9 @@
}
// Test for all block size.
-INSTANTIATE_TEST_CASE_P(
- SSE2, VP9DenoiserTest,
- ::testing::Values(BLOCK_8X8, BLOCK_8X16, BLOCK_16X8, BLOCK_16X16,
- BLOCK_16X32, BLOCK_32X16, BLOCK_32X32, BLOCK_32X64,
- BLOCK_64X32, BLOCK_64X64));
+INSTANTIATE_TEST_CASE_P(SSE2, VP9DenoiserTest,
+ ::testing::Values(BLOCK_8X8, BLOCK_8X16, BLOCK_16X8,
+ BLOCK_16X16, BLOCK_16X32, BLOCK_32X16,
+ BLOCK_32X32, BLOCK_32X64, BLOCK_64X32,
+ BLOCK_64X64));
} // namespace
diff --git a/test/encode_api_test.cc b/test/encode_api_test.cc
index 9e99bb8..6c1d5e3 100644
--- a/test/encode_api_test.cc
+++ b/test/encode_api_test.cc
@@ -24,7 +24,7 @@
&vpx_codec_vp10_cx_algo,
#endif
};
- uint8_t buf[1] = {0};
+ uint8_t buf[1] = { 0 };
vpx_image_t img;
vpx_codec_ctx_t enc;
vpx_codec_enc_cfg_t cfg;
diff --git a/test/encode_perf_test.cc b/test/encode_perf_test.cc
index 69be9ea..2411dcd 100644
--- a/test/encode_perf_test.cc
+++ b/test/encode_perf_test.cc
@@ -26,10 +26,7 @@
struct EncodePerfTestVideo {
EncodePerfTestVideo(const char *name_, uint32_t width_, uint32_t height_,
uint32_t bitrate_, int frames_)
- : name(name_),
- width(width_),
- height(height_),
- bitrate(bitrate_),
+ : name(name_), width(width_), height(height_), bitrate(bitrate_),
frames(frames_) {}
const char *name;
uint32_t width;
@@ -45,8 +42,8 @@
EncodePerfTestVideo("macmarcostationary_640_480_30.yuv", 640, 480, 200, 718),
EncodePerfTestVideo("niklas_640_480_30.yuv", 640, 480, 200, 471),
EncodePerfTestVideo("tacomanarrows_640_480_30.yuv", 640, 480, 200, 300),
- EncodePerfTestVideo("tacomasmallcameramovement_640_480_30.yuv",
- 640, 480, 200, 300),
+ EncodePerfTestVideo("tacomasmallcameramovement_640_480_30.yuv", 640, 480, 200,
+ 300),
EncodePerfTestVideo("thaloundeskmtg_640_480_30.yuv", 640, 480, 200, 300),
EncodePerfTestVideo("niklas_1280_720_30.yuv", 1280, 720, 600, 470),
};
@@ -61,12 +58,8 @@
public ::libvpx_test::CodecTestWithParam<libvpx_test::TestMode> {
protected:
VP9EncodePerfTest()
- : EncoderTest(GET_PARAM(0)),
- min_psnr_(kMaxPsnr),
- nframes_(0),
- encoding_mode_(GET_PARAM(1)),
- speed_(0),
- threads_(1) {}
+ : EncoderTest(GET_PARAM(0)), min_psnr_(kMaxPsnr), nframes_(0),
+ encoding_mode_(GET_PARAM(1)), speed_(0), threads_(1) {}
virtual ~VP9EncodePerfTest() {}
@@ -107,24 +100,18 @@
virtual void PSNRPktHook(const vpx_codec_cx_pkt_t *pkt) {
if (pkt->data.psnr.psnr[0] < min_psnr_) {
- min_psnr_= pkt->data.psnr.psnr[0];
+ min_psnr_ = pkt->data.psnr.psnr[0];
}
}
// for performance reasons don't decode
virtual bool DoDecode() { return 0; }
- double min_psnr() const {
- return min_psnr_;
- }
+ double min_psnr() const { return min_psnr_; }
- void set_speed(unsigned int speed) {
- speed_ = speed;
- }
+ void set_speed(unsigned int speed) { speed_ = speed; }
- void set_threads(unsigned int threads) {
- threads_ = threads;
- }
+ void set_threads(unsigned int threads) { threads_ = threads; }
private:
double min_psnr_;
@@ -157,10 +144,8 @@
const unsigned frames = kVP9EncodePerfTestVectors[i].frames;
const char *video_name = kVP9EncodePerfTestVectors[i].name;
libvpx_test::I420VideoSource video(
- video_name,
- kVP9EncodePerfTestVectors[i].width,
- kVP9EncodePerfTestVectors[i].height,
- timebase.den, timebase.num, 0,
+ video_name, kVP9EncodePerfTestVectors[i].width,
+ kVP9EncodePerfTestVectors[i].height, timebase.den, timebase.num, 0,
kVP9EncodePerfTestVectors[i].frames);
set_speed(kEncodePerfTestSpeeds[j]);
@@ -197,6 +182,6 @@
}
}
-VP10_INSTANTIATE_TEST_CASE(
- VP9EncodePerfTest, ::testing::Values(::libvpx_test::kRealTime));
+VP10_INSTANTIATE_TEST_CASE(VP9EncodePerfTest,
+ ::testing::Values(::libvpx_test::kRealTime));
} // namespace
diff --git a/test/encode_test_driver.cc b/test/encode_test_driver.cc
index a8a76c5..61044ee 100644
--- a/test/encode_test_driver.cc
+++ b/test/encode_test_driver.cc
@@ -31,14 +31,13 @@
cfg_.g_timebase = video->timebase();
cfg_.rc_twopass_stats_in = stats_->buf();
- res = vpx_codec_enc_init(&encoder_, CodecInterface(), &cfg_,
- init_flags_);
+ res = vpx_codec_enc_init(&encoder_, CodecInterface(), &cfg_, init_flags_);
ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
#if CONFIG_VP10_ENCODER
if (CodecInterface() == &vpx_codec_vp10_cx_algo) {
- // Default to 1 tile column for VP10. With CONFIG_EXT_TILE, the
- // default is already the largest possible tile size
+// Default to 1 tile column for VP10. With CONFIG_EXT_TILE, the
+// default is already the largest possible tile size
#if !CONFIG_EXT_TILE
const int log2_tile_columns = 0;
res = vpx_codec_control_(&encoder_, VP9E_SET_TILE_COLUMNS,
@@ -62,8 +61,7 @@
CxDataIterator iter = GetCxData();
while (const vpx_codec_cx_pkt_t *pkt = iter.Next()) {
- if (pkt->kind != VPX_CODEC_STATS_PKT)
- continue;
+ if (pkt->kind != VPX_CODEC_STATS_PKT) continue;
stats_->Append(*pkt);
}
@@ -83,15 +81,15 @@
}
// Encode the frame
- API_REGISTER_STATE_CHECK(
- res = vpx_codec_encode(&encoder_, img, video.pts(), video.duration(),
- frame_flags, deadline_));
+ API_REGISTER_STATE_CHECK(res = vpx_codec_encode(&encoder_, img, video.pts(),
+ video.duration(), frame_flags,
+ deadline_));
ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
}
void Encoder::Flush() {
- const vpx_codec_err_t res = vpx_codec_encode(&encoder_, NULL, 0, 0, 0,
- deadline_);
+ const vpx_codec_err_t res =
+ vpx_codec_encode(&encoder_, NULL, 0, 0, 0, deadline_);
if (!encoder_.priv)
ASSERT_EQ(VPX_CODEC_ERROR, res) << EncoderError();
else
@@ -106,22 +104,15 @@
void EncoderTest::SetMode(TestMode mode) {
switch (mode) {
- case kRealTime:
- deadline_ = VPX_DL_REALTIME;
- break;
+ case kRealTime: deadline_ = VPX_DL_REALTIME; break;
case kOnePassGood:
- case kTwoPassGood:
- deadline_ = VPX_DL_GOOD_QUALITY;
- break;
+ case kTwoPassGood: deadline_ = VPX_DL_GOOD_QUALITY; break;
case kOnePassBest:
- case kTwoPassBest:
- deadline_ = VPX_DL_BEST_QUALITY;
- break;
+ case kTwoPassBest: deadline_ = VPX_DL_BEST_QUALITY; break;
- default:
- ASSERT_TRUE(false) << "Unexpected mode " << mode;
+ default: ASSERT_TRUE(false) << "Unexpected mode " << mode;
}
if (mode == kTwoPassGood || mode == kTwoPassBest)
@@ -132,10 +123,8 @@
static bool compare_plane(const uint8_t *const buf1, const int stride1,
const uint8_t *const buf2, const int stride2,
- const int w, const int h,
- int *const mismatch_row,
- int *const mismatch_col,
- int *const mismatch_pix1,
+ const int w, const int h, int *const mismatch_row,
+ int *const mismatch_col, int *const mismatch_pix1,
int *const mismatch_pix2) {
int r, c;
@@ -145,14 +134,10 @@
const int pix2 = buf2[r * stride2 + c];
if (pix1 != pix2) {
- if (mismatch_row != NULL)
- *mismatch_row = r;
- if (mismatch_col != NULL)
- *mismatch_col = c;
- if (mismatch_pix1 != NULL)
- *mismatch_pix1 = pix1;
- if (mismatch_pix2 != NULL)
- *mismatch_pix2 = pix2;
+ if (mismatch_row != NULL) *mismatch_row = r;
+ if (mismatch_col != NULL) *mismatch_col = c;
+ if (mismatch_pix1 != NULL) *mismatch_pix1 = pix1;
+ if (mismatch_pix2 != NULL) *mismatch_pix2 = pix2;
return false;
}
}
@@ -163,86 +148,67 @@
// The function should return "true" most of the time, therefore no early
// break-out is implemented within the match checking process.
-static bool compare_img(const vpx_image_t *img1,
- const vpx_image_t *img2,
- int *const mismatch_row,
- int *const mismatch_col,
- int *const mismatch_plane,
- int *const mismatch_pix1,
+static bool compare_img(const vpx_image_t *img1, const vpx_image_t *img2,
+ int *const mismatch_row, int *const mismatch_col,
+ int *const mismatch_plane, int *const mismatch_pix1,
int *const mismatch_pix2) {
-
const unsigned int w_y = img1->d_w;
const unsigned int h_y = img1->d_h;
const unsigned int w_uv = ROUND_POWER_OF_TWO(w_y, img1->x_chroma_shift);
const unsigned int h_uv = ROUND_POWER_OF_TWO(h_y, img1->y_chroma_shift);
- if (img1->fmt != img2->fmt
- || img1->cs != img2->cs
- || img1->d_w != img2->d_w
- || img1->d_h != img2->d_h) {
- if (mismatch_row != NULL)
- *mismatch_row = -1;
- if (mismatch_col != NULL)
- *mismatch_col = -1;
+ if (img1->fmt != img2->fmt || img1->cs != img2->cs ||
+ img1->d_w != img2->d_w || img1->d_h != img2->d_h) {
+ if (mismatch_row != NULL) *mismatch_row = -1;
+ if (mismatch_col != NULL) *mismatch_col = -1;
return false;
}
- if (!compare_plane(img1->planes[VPX_PLANE_Y], img1->stride[VPX_PLANE_Y],
- img2->planes[VPX_PLANE_Y], img2->stride[VPX_PLANE_Y],
- w_y, h_y,
- mismatch_row, mismatch_col,
- mismatch_pix1, mismatch_pix2)) {
- if (mismatch_plane != NULL)
- *mismatch_plane = VPX_PLANE_Y;
+ if (!compare_plane(img1->planes[VPX_PLANE_Y], img1->stride[VPX_PLANE_Y],
+ img2->planes[VPX_PLANE_Y], img2->stride[VPX_PLANE_Y], w_y,
+ h_y, mismatch_row, mismatch_col, mismatch_pix1,
+ mismatch_pix2)) {
+ if (mismatch_plane != NULL) *mismatch_plane = VPX_PLANE_Y;
return false;
}
- if (!compare_plane(img1->planes[VPX_PLANE_U], img1->stride[VPX_PLANE_U],
- img2->planes[VPX_PLANE_U], img2->stride[VPX_PLANE_U],
- w_uv, h_uv,
- mismatch_row, mismatch_col,
- mismatch_pix1, mismatch_pix2)) {
- if (mismatch_plane != NULL)
- *mismatch_plane = VPX_PLANE_U;
+ if (!compare_plane(img1->planes[VPX_PLANE_U], img1->stride[VPX_PLANE_U],
+ img2->planes[VPX_PLANE_U], img2->stride[VPX_PLANE_U], w_uv,
+ h_uv, mismatch_row, mismatch_col, mismatch_pix1,
+ mismatch_pix2)) {
+ if (mismatch_plane != NULL) *mismatch_plane = VPX_PLANE_U;
return false;
}
- if (!compare_plane(img1->planes[VPX_PLANE_V], img1->stride[VPX_PLANE_V],
- img2->planes[VPX_PLANE_V], img2->stride[VPX_PLANE_V],
- w_uv, h_uv,
- mismatch_row, mismatch_col,
- mismatch_pix1, mismatch_pix2)) {
- if (mismatch_plane != NULL)
- *mismatch_plane = VPX_PLANE_U;
+ if (!compare_plane(img1->planes[VPX_PLANE_V], img1->stride[VPX_PLANE_V],
+ img2->planes[VPX_PLANE_V], img2->stride[VPX_PLANE_V], w_uv,
+ h_uv, mismatch_row, mismatch_col, mismatch_pix1,
+ mismatch_pix2)) {
+ if (mismatch_plane != NULL) *mismatch_plane = VPX_PLANE_U;
return false;
}
return true;
}
-void EncoderTest::MismatchHook(const vpx_image_t* img_enc,
- const vpx_image_t* img_dec) {
+void EncoderTest::MismatchHook(const vpx_image_t *img_enc,
+ const vpx_image_t *img_dec) {
int mismatch_row = 0;
int mismatch_col = 0;
int mismatch_plane = 0;
int mismatch_pix_enc = 0;
int mismatch_pix_dec = 0;
- ASSERT_FALSE(compare_img(img_enc, img_dec,
- &mismatch_row, &mismatch_col,
- &mismatch_plane,
- &mismatch_pix_enc,
+ ASSERT_FALSE(compare_img(img_enc, img_dec, &mismatch_row, &mismatch_col,
+ &mismatch_plane, &mismatch_pix_enc,
&mismatch_pix_dec));
- GTEST_FAIL()
- << "Encode/Decode mismatch found:"
- << std::endl
- << " pixel value enc/dec: " << mismatch_pix_enc << "/" << mismatch_pix_dec
- << std::endl
- << " plane: " << mismatch_plane
- << std::endl
- << " row/col: " << mismatch_row << "/" << mismatch_col
- << std::endl;
+ GTEST_FAIL() << "Encode/Decode mismatch found:" << std::endl
+ << " pixel value enc/dec: " << mismatch_pix_enc << "/"
+ << mismatch_pix_dec << std::endl
+ << " plane: " << mismatch_plane << std::endl
+ << " row/col: " << mismatch_row << "/"
+ << mismatch_col << std::endl;
}
void EncoderTest::RunLoop(VideoSource *video) {
@@ -306,10 +272,9 @@
has_cxdata = true;
if (decoder.get() != NULL && DoDecode()) {
vpx_codec_err_t res_dec = decoder->DecodeFrame(
- (const uint8_t*)pkt->data.frame.buf, pkt->data.frame.sz);
+ (const uint8_t *)pkt->data.frame.buf, pkt->data.frame.sz);
- if (!HandleDecodeResult(res_dec, *video, decoder.get()))
- break;
+ if (!HandleDecodeResult(res_dec, *video, decoder.get())) break;
has_dxdata = true;
}
@@ -318,20 +283,16 @@
FramePktHook(pkt);
break;
- case VPX_CODEC_PSNR_PKT:
- PSNRPktHook(pkt);
- break;
+ case VPX_CODEC_PSNR_PKT: PSNRPktHook(pkt); break;
- default:
- break;
+ default: break;
}
}
// Flush the decoder when there are no more fragments.
if ((init_flags_ & VPX_CODEC_USE_OUTPUT_PARTITION) && has_dxdata) {
const vpx_codec_err_t res_dec = decoder->DecodeFrame(NULL, 0);
- if (!HandleDecodeResult(res_dec, *video, decoder.get()))
- break;
+ if (!HandleDecodeResult(res_dec, *video, decoder.get())) break;
}
if (has_dxdata && has_cxdata) {
@@ -339,23 +300,20 @@
DxDataIterator dec_iter = decoder->GetDxData();
const vpx_image_t *img_dec = dec_iter.Next();
if (img_enc && img_dec) {
- const bool res = compare_img(img_enc, img_dec,
- NULL, NULL, NULL, NULL, NULL);
+ const bool res =
+ compare_img(img_enc, img_dec, NULL, NULL, NULL, NULL, NULL);
if (!res) { // Mismatch
MismatchHook(img_enc, img_dec);
}
}
- if (img_dec)
- DecompressedFrameHook(*img_dec, video->pts());
+ if (img_dec) DecompressedFrameHook(*img_dec, video->pts());
}
- if (!Continue())
- break;
+ if (!Continue()) break;
}
EndPassHook();
- if (!Continue())
- break;
+ if (!Continue()) break;
}
}
diff --git a/test/encode_test_driver.h b/test/encode_test_driver.h
index 720ec84..94abd03 100644
--- a/test/encode_test_driver.h
+++ b/test/encode_test_driver.h
@@ -33,19 +33,17 @@
kTwoPassGood,
kTwoPassBest
};
-#define ALL_TEST_MODES ::testing::Values(::libvpx_test::kRealTime, \
- ::libvpx_test::kOnePassGood, \
- ::libvpx_test::kOnePassBest, \
- ::libvpx_test::kTwoPassGood, \
- ::libvpx_test::kTwoPassBest)
+#define ALL_TEST_MODES \
+ ::testing::Values(::libvpx_test::kRealTime, ::libvpx_test::kOnePassGood, \
+ ::libvpx_test::kOnePassBest, ::libvpx_test::kTwoPassGood, \
+ ::libvpx_test::kTwoPassBest)
-#define ONE_PASS_TEST_MODES ::testing::Values(::libvpx_test::kRealTime, \
- ::libvpx_test::kOnePassGood, \
- ::libvpx_test::kOnePassBest)
+#define ONE_PASS_TEST_MODES \
+ ::testing::Values(::libvpx_test::kRealTime, ::libvpx_test::kOnePassGood, \
+ ::libvpx_test::kOnePassBest)
-#define TWO_PASS_TEST_MODES ::testing::Values(::libvpx_test::kTwoPassGood, \
- ::libvpx_test::kTwoPassBest)
-
+#define TWO_PASS_TEST_MODES \
+ ::testing::Values(::libvpx_test::kTwoPassGood, ::libvpx_test::kTwoPassBest)
// Provides an object to handle the libvpx get_cx_data() iteration pattern
class CxDataIterator {
@@ -58,8 +56,8 @@
}
private:
- vpx_codec_ctx_t *encoder_;
- vpx_codec_iter_t iter_;
+ vpx_codec_ctx_t *encoder_;
+ vpx_codec_iter_t iter_;
};
// Implements an in-memory store for libvpx twopass statistics
@@ -75,15 +73,12 @@
return buf;
}
- void Reset() {
- buffer_.clear();
- }
+ void Reset() { buffer_.clear(); }
protected:
- std::string buffer_;
+ std::string buffer_;
};
-
// Provides a simplified interface to manage one video encoding pass, given
// a configuration and video source.
//
@@ -97,13 +92,9 @@
memset(&encoder_, 0, sizeof(encoder_));
}
- virtual ~Encoder() {
- vpx_codec_destroy(&encoder_);
- }
+ virtual ~Encoder() { vpx_codec_destroy(&encoder_); }
- CxDataIterator GetCxData() {
- return CxDataIterator(&encoder_);
- }
+ CxDataIterator GetCxData() { return CxDataIterator(&encoder_); }
void InitEncoder(VideoSource *video);
@@ -115,9 +106,7 @@
void EncodeFrame(VideoSource *video, const unsigned long frame_flags);
// Convenience wrapper for EncodeFrame()
- void EncodeFrame(VideoSource *video) {
- EncodeFrame(video, 0);
- }
+ void EncodeFrame(VideoSource *video) { EncodeFrame(video, 0); }
void Control(int ctrl_id, int arg) {
const vpx_codec_err_t res = vpx_codec_control_(&encoder_, ctrl_id, arg);
@@ -147,12 +136,10 @@
cfg_ = *cfg;
}
- void set_deadline(unsigned long deadline) {
- deadline_ = deadline;
- }
+ void set_deadline(unsigned long deadline) { deadline_ = deadline; }
protected:
- virtual vpx_codec_iface_t* CodecInterface() const = 0;
+ virtual vpx_codec_iface_t *CodecInterface() const = 0;
const char *EncoderError() {
const char *detail = vpx_codec_error_detail(&encoder_);
@@ -166,11 +153,11 @@
// Flush the encoder on EOS
void Flush();
- vpx_codec_ctx_t encoder_;
- vpx_codec_enc_cfg_t cfg_;
- unsigned long deadline_;
- unsigned long init_flags_;
- TwopassStatsStore *stats_;
+ vpx_codec_ctx_t encoder_;
+ vpx_codec_enc_cfg_t cfg_;
+ unsigned long deadline_;
+ unsigned long init_flags_;
+ TwopassStatsStore *stats_;
};
// Common test functionality for all Encoder tests.
@@ -212,36 +199,35 @@
virtual void EndPassHook() {}
// Hook to be called before encoding a frame.
- virtual void PreEncodeFrameHook(VideoSource* /*video*/) {}
- virtual void PreEncodeFrameHook(VideoSource* /*video*/,
- Encoder* /*encoder*/) {}
+ virtual void PreEncodeFrameHook(VideoSource * /*video*/) {}
+ virtual void PreEncodeFrameHook(VideoSource * /*video*/,
+ Encoder * /*encoder*/) {}
// Hook to be called on every compressed data packet.
- virtual void FramePktHook(const vpx_codec_cx_pkt_t* /*pkt*/) {}
+ virtual void FramePktHook(const vpx_codec_cx_pkt_t * /*pkt*/) {}
// Hook to be called on every PSNR packet.
- virtual void PSNRPktHook(const vpx_codec_cx_pkt_t* /*pkt*/) {}
+ virtual void PSNRPktHook(const vpx_codec_cx_pkt_t * /*pkt*/) {}
// Hook to determine whether the encode loop should continue.
virtual bool Continue() const {
return !(::testing::Test::HasFatalFailure() || abort_);
}
- const CodecFactory *codec_;
+ const CodecFactory *codec_;
// Hook to determine whether to decode frame after encoding
virtual bool DoDecode() const { return 1; }
// Hook to handle encode/decode mismatch
- virtual void MismatchHook(const vpx_image_t *img1,
- const vpx_image_t *img2);
+ virtual void MismatchHook(const vpx_image_t *img1, const vpx_image_t *img2);
// Hook to be called on every decompressed frame.
- virtual void DecompressedFrameHook(const vpx_image_t& /*img*/,
+ virtual void DecompressedFrameHook(const vpx_image_t & /*img*/,
vpx_codec_pts_t /*pts*/) {}
// Hook to be called to handle decode result. Return true to continue.
virtual bool HandleDecodeResult(const vpx_codec_err_t res_dec,
- const VideoSource& /*video*/,
+ const VideoSource & /*video*/,
Decoder *decoder) {
EXPECT_EQ(VPX_CODEC_OK, res_dec) << decoder->DecodeError();
return VPX_CODEC_OK == res_dec;
@@ -253,15 +239,15 @@
return pkt;
}
- bool abort_;
- vpx_codec_enc_cfg_t cfg_;
- vpx_codec_dec_cfg_t dec_cfg_;
- unsigned int passes_;
- unsigned long deadline_;
- TwopassStatsStore stats_;
- unsigned long init_flags_;
- unsigned long frame_flags_;
- vpx_codec_pts_t last_pts_;
+ bool abort_;
+ vpx_codec_enc_cfg_t cfg_;
+ vpx_codec_dec_cfg_t dec_cfg_;
+ unsigned int passes_;
+ unsigned long deadline_;
+ TwopassStatsStore stats_;
+ unsigned long init_flags_;
+ unsigned long frame_flags_;
+ vpx_codec_pts_t last_pts_;
};
} // namespace libvpx_test
diff --git a/test/encoder_parms_get_to_decoder.cc b/test/encoder_parms_get_to_decoder.cc
index ea4640c..8af0b7d 100644
--- a/test/encoder_parms_get_to_decoder.cc
+++ b/test/encoder_parms_get_to_decoder.cc
@@ -29,7 +29,7 @@
};
const EncodePerfTestVideo kVP9EncodePerfTestVectors[] = {
- {"niklas_1280_720_30.y4m", 1280, 720, 600, 10},
+ { "niklas_1280_720_30.y4m", 1280, 720, 600, 10 },
};
struct EncodeParameters {
@@ -45,10 +45,10 @@
};
const EncodeParameters kVP9EncodeParameterSet[] = {
- {0, 0, 0, 1, 0, VPX_CR_STUDIO_RANGE, VPX_CS_BT_601, { 0, 0 }},
- {0, 0, 0, 0, 0, VPX_CR_FULL_RANGE, VPX_CS_BT_709, { 0, 0 }},
- {0, 0, 1, 0, 0, VPX_CR_FULL_RANGE, VPX_CS_BT_2020, { 0, 0 }},
- {0, 2, 0, 0, 1, VPX_CR_STUDIO_RANGE, VPX_CS_UNKNOWN, { 640, 480 }},
+ { 0, 0, 0, 1, 0, VPX_CR_STUDIO_RANGE, VPX_CS_BT_601, { 0, 0 } },
+ { 0, 0, 0, 0, 0, VPX_CR_FULL_RANGE, VPX_CS_BT_709, { 0, 0 } },
+ { 0, 0, 1, 0, 0, VPX_CR_FULL_RANGE, VPX_CS_BT_2020, { 0, 0 } },
+ { 0, 2, 0, 0, 1, VPX_CR_STUDIO_RANGE, VPX_CS_UNKNOWN, { 640, 480 } },
// TODO(JBB): Test profiles (requires more work).
};
@@ -144,6 +144,6 @@
}
VP10_INSTANTIATE_TEST_CASE(VpxEncoderParmsGetToDecoder,
- ::testing::ValuesIn(kVP9EncodeParameterSet),
- ::testing::ValuesIn(kVP9EncodePerfTestVectors));
+ ::testing::ValuesIn(kVP9EncodeParameterSet),
+ ::testing::ValuesIn(kVP9EncodePerfTestVectors));
} // namespace
diff --git a/test/end_to_end_test.cc b/test/end_to_end_test.cc
index b858008..fe5c479 100644
--- a/test/end_to_end_test.cc
+++ b/test/end_to_end_test.cc
@@ -18,7 +18,7 @@
namespace {
-const unsigned int kWidth = 160;
+const unsigned int kWidth = 160;
const unsigned int kHeight = 90;
const unsigned int kFramerate = 50;
const unsigned int kFrames = 10;
@@ -30,23 +30,15 @@
// We make two cases here to enable the testing and
// guard picture quality.
#if CONFIG_VP10_ENCODER && CONFIG_VP9_HIGHBITDEPTH
- { 36.0, 37.0, 37.0, 37.0, 37.0 },
- { 31.0, 36.0, 36.0, 36.0, 36.0 },
- { 31.0, 35.0, 35.0, 35.0, 35.0 },
- { 31.0, 34.0, 34.0, 34.0, 34.0 },
- { 31.0, 33.0, 33.0, 33.0, 33.0 },
- { 31.0, 32.0, 32.0, 32.0, 32.0 },
- { 30.0, 31.0, 31.0, 31.0, 31.0 },
- { 29.0, 30.0, 30.0, 30.0, 30.0 },
+ { 36.0, 37.0, 37.0, 37.0, 37.0 }, { 31.0, 36.0, 36.0, 36.0, 36.0 },
+ { 31.0, 35.0, 35.0, 35.0, 35.0 }, { 31.0, 34.0, 34.0, 34.0, 34.0 },
+ { 31.0, 33.0, 33.0, 33.0, 33.0 }, { 31.0, 32.0, 32.0, 32.0, 32.0 },
+ { 30.0, 31.0, 31.0, 31.0, 31.0 }, { 29.0, 30.0, 30.0, 30.0, 30.0 },
#else
- { 36.0, 37.0, 37.0, 37.0, 37.0 },
- { 35.0, 36.0, 36.0, 36.0, 36.0 },
- { 34.0, 35.0, 35.0, 35.0, 35.0 },
- { 33.0, 34.0, 34.0, 34.0, 34.0 },
- { 32.0, 33.0, 33.0, 33.0, 33.0 },
- { 31.0, 32.0, 32.0, 32.0, 32.0 },
- { 30.0, 31.0, 31.0, 31.0, 31.0 },
- { 29.0, 30.0, 30.0, 30.0, 30.0 },
+ { 36.0, 37.0, 37.0, 37.0, 37.0 }, { 35.0, 36.0, 36.0, 36.0, 36.0 },
+ { 34.0, 35.0, 35.0, 35.0, 35.0 }, { 33.0, 34.0, 34.0, 34.0, 34.0 },
+ { 32.0, 33.0, 33.0, 33.0, 33.0 }, { 31.0, 32.0, 32.0, 32.0, 32.0 },
+ { 30.0, 31.0, 31.0, 31.0, 31.0 }, { 29.0, 30.0, 30.0, 30.0, 30.0 },
#endif // CONFIG_VP9_HIGHBITDEPTH && CONFIG_VP10_ENCODER
};
@@ -59,31 +51,30 @@
} TestVideoParam;
const TestVideoParam kTestVectors[] = {
- {"park_joy_90p_8_420.y4m", 8, VPX_IMG_FMT_I420, VPX_BITS_8, 0},
- {"park_joy_90p_8_422.y4m", 8, VPX_IMG_FMT_I422, VPX_BITS_8, 1},
- {"park_joy_90p_8_444.y4m", 8, VPX_IMG_FMT_I444, VPX_BITS_8, 1},
- {"park_joy_90p_8_440.yuv", 8, VPX_IMG_FMT_I440, VPX_BITS_8, 1},
+ { "park_joy_90p_8_420.y4m", 8, VPX_IMG_FMT_I420, VPX_BITS_8, 0 },
+ { "park_joy_90p_8_422.y4m", 8, VPX_IMG_FMT_I422, VPX_BITS_8, 1 },
+ { "park_joy_90p_8_444.y4m", 8, VPX_IMG_FMT_I444, VPX_BITS_8, 1 },
+ { "park_joy_90p_8_440.yuv", 8, VPX_IMG_FMT_I440, VPX_BITS_8, 1 },
#if CONFIG_VP9_HIGHBITDEPTH
- {"park_joy_90p_10_420.y4m", 10, VPX_IMG_FMT_I42016, VPX_BITS_10, 2},
- {"park_joy_90p_10_422.y4m", 10, VPX_IMG_FMT_I42216, VPX_BITS_10, 3},
- {"park_joy_90p_10_444.y4m", 10, VPX_IMG_FMT_I44416, VPX_BITS_10, 3},
- {"park_joy_90p_10_440.yuv", 10, VPX_IMG_FMT_I44016, VPX_BITS_10, 3},
- {"park_joy_90p_12_420.y4m", 12, VPX_IMG_FMT_I42016, VPX_BITS_12, 2},
- {"park_joy_90p_12_422.y4m", 12, VPX_IMG_FMT_I42216, VPX_BITS_12, 3},
- {"park_joy_90p_12_444.y4m", 12, VPX_IMG_FMT_I44416, VPX_BITS_12, 3},
- {"park_joy_90p_12_440.yuv", 12, VPX_IMG_FMT_I44016, VPX_BITS_12, 3},
+ { "park_joy_90p_10_420.y4m", 10, VPX_IMG_FMT_I42016, VPX_BITS_10, 2 },
+ { "park_joy_90p_10_422.y4m", 10, VPX_IMG_FMT_I42216, VPX_BITS_10, 3 },
+ { "park_joy_90p_10_444.y4m", 10, VPX_IMG_FMT_I44416, VPX_BITS_10, 3 },
+ { "park_joy_90p_10_440.yuv", 10, VPX_IMG_FMT_I44016, VPX_BITS_10, 3 },
+ { "park_joy_90p_12_420.y4m", 12, VPX_IMG_FMT_I42016, VPX_BITS_12, 2 },
+ { "park_joy_90p_12_422.y4m", 12, VPX_IMG_FMT_I42216, VPX_BITS_12, 3 },
+ { "park_joy_90p_12_444.y4m", 12, VPX_IMG_FMT_I44416, VPX_BITS_12, 3 },
+ { "park_joy_90p_12_440.yuv", 12, VPX_IMG_FMT_I44016, VPX_BITS_12, 3 },
#endif // CONFIG_VP9_HIGHBITDEPTH
};
// Encoding modes tested
const libvpx_test::TestMode kEncodingModeVectors[] = {
- ::libvpx_test::kTwoPassGood,
- ::libvpx_test::kOnePassGood,
+ ::libvpx_test::kTwoPassGood, ::libvpx_test::kOnePassGood,
::libvpx_test::kRealTime,
};
// Speed settings tested
-const int kCpuUsedVectors[] = {1, 2, 3, 5, 6};
+const int kCpuUsedVectors[] = { 1, 2, 3, 5, 6 };
int is_extension_y4m(const char *filename) {
const char *dot = strrchr(filename, '.');
@@ -95,17 +86,13 @@
class EndToEndTestLarge
: public ::libvpx_test::EncoderTest,
- public ::libvpx_test::CodecTestWith3Params<libvpx_test::TestMode, \
+ public ::libvpx_test::CodecTestWith3Params<libvpx_test::TestMode,
TestVideoParam, int> {
protected:
EndToEndTestLarge()
- : EncoderTest(GET_PARAM(0)),
- test_video_param_(GET_PARAM(2)),
- cpu_used_(GET_PARAM(3)),
- psnr_(0.0),
- nframes_(0),
- encoding_mode_(GET_PARAM(1)) {
- }
+ : EncoderTest(GET_PARAM(0)), test_video_param_(GET_PARAM(2)),
+ cpu_used_(GET_PARAM(3)), psnr_(0.0), nframes_(0),
+ encoding_mode_(GET_PARAM(1)) {}
virtual ~EndToEndTestLarge() {}
@@ -156,8 +143,7 @@
}
double GetAveragePsnr() const {
- if (nframes_)
- return psnr_ / nframes_;
+ if (nframes_) return psnr_ / nframes_;
return 0.0;
}
@@ -181,30 +167,26 @@
cfg_.g_input_bit_depth = test_video_param_.input_bit_depth;
cfg_.g_bit_depth = test_video_param_.bit_depth;
init_flags_ = VPX_CODEC_USE_PSNR;
- if (cfg_.g_bit_depth > 8)
- init_flags_ |= VPX_CODEC_USE_HIGHBITDEPTH;
+ if (cfg_.g_bit_depth > 8) init_flags_ |= VPX_CODEC_USE_HIGHBITDEPTH;
libvpx_test::VideoSource *video;
if (is_extension_y4m(test_video_param_.filename)) {
- video = new libvpx_test::Y4mVideoSource(test_video_param_.filename,
- 0, kFrames);
+ video =
+ new libvpx_test::Y4mVideoSource(test_video_param_.filename, 0, kFrames);
} else {
video = new libvpx_test::YUVVideoSource(test_video_param_.filename,
- test_video_param_.fmt,
- kWidth, kHeight,
- kFramerate, 1, 0, kFrames);
+ test_video_param_.fmt, kWidth,
+ kHeight, kFramerate, 1, 0, kFrames);
}
ASSERT_NO_FATAL_FAILURE(RunLoop(video));
const double psnr = GetAveragePsnr();
EXPECT_GT(psnr, GetPsnrThreshold());
- delete(video);
+ delete (video);
}
-
-VP10_INSTANTIATE_TEST_CASE(
- EndToEndTestLarge,
- ::testing::ValuesIn(kEncodingModeVectors),
- ::testing::ValuesIn(kTestVectors),
- ::testing::ValuesIn(kCpuUsedVectors));
+VP10_INSTANTIATE_TEST_CASE(EndToEndTestLarge,
+ ::testing::ValuesIn(kEncodingModeVectors),
+ ::testing::ValuesIn(kTestVectors),
+ ::testing::ValuesIn(kCpuUsedVectors));
} // namespace
diff --git a/test/error_block_test.cc b/test/error_block_test.cc
index 629ba97..90130ca 100644
--- a/test/error_block_test.cc
+++ b/test/error_block_test.cc
@@ -32,20 +32,18 @@
typedef int64_t (*ErrorBlockFunc)(const tran_low_t *coeff,
const tran_low_t *dqcoeff,
- intptr_t block_size,
- int64_t *ssz, int bps);
+ intptr_t block_size, int64_t *ssz, int bps);
typedef std::tr1::tuple<ErrorBlockFunc, ErrorBlockFunc, vpx_bit_depth_t>
- ErrorBlockParam;
+ ErrorBlockParam;
-class ErrorBlockTest
- : public ::testing::TestWithParam<ErrorBlockParam> {
+class ErrorBlockTest : public ::testing::TestWithParam<ErrorBlockParam> {
public:
virtual ~ErrorBlockTest() {}
virtual void SetUp() {
- error_block_op_ = GET_PARAM(0);
+ error_block_op_ = GET_PARAM(0);
ref_error_block_op_ = GET_PARAM(1);
- bit_depth_ = GET_PARAM(2);
+ bit_depth_ = GET_PARAM(2);
}
virtual void TearDown() { libvpx_test::ClearSystemState(); }
@@ -76,18 +74,18 @@
// can be used for optimization, so generate test input precisely.
if (rnd(2)) {
// Positive number
- coeff[j] = rnd(1 << msb);
+ coeff[j] = rnd(1 << msb);
dqcoeff[j] = rnd(1 << msb);
} else {
// Negative number
- coeff[j] = -rnd(1 << msb);
+ coeff[j] = -rnd(1 << msb);
dqcoeff[j] = -rnd(1 << msb);
}
}
- ref_ret = ref_error_block_op_(coeff, dqcoeff, block_size, &ref_ssz,
- bit_depth_);
- ASM_REGISTER_STATE_CHECK(ret = error_block_op_(coeff, dqcoeff, block_size,
- &ssz, bit_depth_));
+ ref_ret =
+ ref_error_block_op_(coeff, dqcoeff, block_size, &ref_ssz, bit_depth_);
+ ASM_REGISTER_STATE_CHECK(
+ ret = error_block_op_(coeff, dqcoeff, block_size, &ssz, bit_depth_));
err_count += (ref_ret != ret) | (ref_ssz != ssz);
if (err_count && !err_count_total) {
first_failure = i;
@@ -117,35 +115,35 @@
int k = (i / 9) % 9;
// Change the maximum coeff value, to test different bit boundaries
- if ( k == 8 && (i % 9) == 0 ) {
+ if (k == 8 && (i % 9) == 0) {
max_val >>= 1;
}
block_size = 16 << (i % 9); // All block sizes from 4x4, 8x4 ..64x64
for (int j = 0; j < block_size; j++) {
if (k < 4) {
// Test at positive maximum values
- coeff[j] = k % 2 ? max_val : 0;
+ coeff[j] = k % 2 ? max_val : 0;
dqcoeff[j] = (k >> 1) % 2 ? max_val : 0;
} else if (k < 8) {
// Test at negative maximum values
- coeff[j] = k % 2 ? -max_val : 0;
+ coeff[j] = k % 2 ? -max_val : 0;
dqcoeff[j] = (k >> 1) % 2 ? -max_val : 0;
} else {
if (rnd(2)) {
// Positive number
- coeff[j] = rnd(1 << 14);
+ coeff[j] = rnd(1 << 14);
dqcoeff[j] = rnd(1 << 14);
} else {
// Negative number
- coeff[j] = -rnd(1 << 14);
+ coeff[j] = -rnd(1 << 14);
dqcoeff[j] = -rnd(1 << 14);
}
}
}
- ref_ret = ref_error_block_op_(coeff, dqcoeff, block_size, &ref_ssz,
- bit_depth_);
- ASM_REGISTER_STATE_CHECK(ret = error_block_op_(coeff, dqcoeff, block_size,
- &ssz, bit_depth_));
+ ref_ret =
+ ref_error_block_op_(coeff, dqcoeff, block_size, &ref_ssz, bit_depth_);
+ ASM_REGISTER_STATE_CHECK(
+ ret = error_block_op_(coeff, dqcoeff, block_size, &ssz, bit_depth_));
err_count += (ref_ret != ret) | (ref_ssz != ssz);
if (err_count && !err_count_total) {
first_failure = i;
@@ -162,13 +160,12 @@
INSTANTIATE_TEST_CASE_P(
SSE2, ErrorBlockTest,
- ::testing::Values(
- make_tuple(&vp10_highbd_block_error_sse2,
- &vp10_highbd_block_error_c, VPX_BITS_10),
- make_tuple(&vp10_highbd_block_error_sse2,
- &vp10_highbd_block_error_c, VPX_BITS_12),
- make_tuple(&vp10_highbd_block_error_sse2,
- &vp10_highbd_block_error_c, VPX_BITS_8)));
+ ::testing::Values(make_tuple(&vp10_highbd_block_error_sse2,
+ &vp10_highbd_block_error_c, VPX_BITS_10),
+ make_tuple(&vp10_highbd_block_error_sse2,
+ &vp10_highbd_block_error_c, VPX_BITS_12),
+ make_tuple(&vp10_highbd_block_error_sse2,
+ &vp10_highbd_block_error_c, VPX_BITS_8)));
#endif // HAVE_SSE2
#endif // CONFIG_VP9_HIGHBITDEPTH
diff --git a/test/error_resilience_test.cc b/test/error_resilience_test.cc
index 99419ad..2c1392c 100644
--- a/test/error_resilience_test.cc
+++ b/test/error_resilience_test.cc
@@ -19,16 +19,13 @@
const int kMaxErrorFrames = 12;
const int kMaxDroppableFrames = 12;
-class ErrorResilienceTestLarge : public ::libvpx_test::EncoderTest,
- public ::libvpx_test::CodecTestWithParam<libvpx_test::TestMode> {
+class ErrorResilienceTestLarge
+ : public ::libvpx_test::EncoderTest,
+ public ::libvpx_test::CodecTestWithParam<libvpx_test::TestMode> {
protected:
ErrorResilienceTestLarge()
- : EncoderTest(GET_PARAM(0)),
- psnr_(0.0),
- nframes_(0),
- mismatch_psnr_(0.0),
- mismatch_nframes_(0),
- encoding_mode_(GET_PARAM(1)) {
+ : EncoderTest(GET_PARAM(0)), psnr_(0.0), nframes_(0), mismatch_psnr_(0.0),
+ mismatch_nframes_(0), encoding_mode_(GET_PARAM(1)) {
Reset();
}
@@ -59,17 +56,15 @@
virtual void PreEncodeFrameHook(libvpx_test::VideoSource *video,
::libvpx_test::Encoder * /*encoder*/) {
- frame_flags_ &= ~(VP8_EFLAG_NO_UPD_LAST |
- VP8_EFLAG_NO_UPD_GF |
- VP8_EFLAG_NO_UPD_ARF);
+ frame_flags_ &=
+ ~(VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF);
if (droppable_nframes_ > 0 &&
(cfg_.g_pass == VPX_RC_LAST_PASS || cfg_.g_pass == VPX_RC_ONE_PASS)) {
for (unsigned int i = 0; i < droppable_nframes_; ++i) {
if (droppable_frames_[i] == video->frame()) {
- std::cout << "Encoding droppable frame: "
- << droppable_frames_[i] << "\n";
- frame_flags_ |= (VP8_EFLAG_NO_UPD_LAST |
- VP8_EFLAG_NO_UPD_GF |
+ std::cout << "Encoding droppable frame: " << droppable_frames_[i]
+ << "\n";
+ frame_flags_ |= (VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_GF |
VP8_EFLAG_NO_UPD_ARF);
return;
}
@@ -78,14 +73,12 @@
}
double GetAveragePsnr() const {
- if (nframes_)
- return psnr_ / nframes_;
+ if (nframes_) return psnr_ / nframes_;
return 0.0;
}
double GetAverageMismatchPsnr() const {
- if (mismatch_nframes_)
- return mismatch_psnr_ / mismatch_nframes_;
+ if (mismatch_nframes_) return mismatch_psnr_ / mismatch_nframes_;
return 0.0;
}
@@ -103,8 +96,7 @@
return 1;
}
- virtual void MismatchHook(const vpx_image_t *img1,
- const vpx_image_t *img2) {
+ virtual void MismatchHook(const vpx_image_t *img1, const vpx_image_t *img2) {
double mismatch_psnr = compute_psnr(img1, img2);
mismatch_psnr_ += mismatch_psnr;
++mismatch_nframes_;
@@ -132,13 +124,9 @@
droppable_frames_[i] = list[i];
}
- unsigned int GetMismatchFrames() {
- return mismatch_nframes_;
- }
+ unsigned int GetMismatchFrames() { return mismatch_nframes_; }
- void SetPatternSwitch(int frame_switch) {
- pattern_switch_ = frame_switch;
- }
+ void SetPatternSwitch(int frame_switch) { pattern_switch_ = frame_switch; }
private:
double psnr_;
@@ -209,15 +197,14 @@
// In addition to isolated loss/drop, add a long consecutive series
// (of size 9) of dropped frames.
unsigned int num_droppable_frames = 11;
- unsigned int droppable_frame_list[] = {5, 16, 22, 23, 24, 25, 26, 27, 28,
- 29, 30};
+ unsigned int droppable_frame_list[] = { 5, 16, 22, 23, 24, 25,
+ 26, 27, 28, 29, 30 };
SetDroppableFrames(num_droppable_frames, droppable_frame_list);
SetErrorFrames(num_droppable_frames, droppable_frame_list);
ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
// Test that no mismatches have been found
- std::cout << " Mismatch frames: "
- << GetMismatchFrames() << "\n";
- EXPECT_EQ(GetMismatchFrames(), (unsigned int) 0);
+ std::cout << " Mismatch frames: " << GetMismatchFrames() << "\n";
+ EXPECT_EQ(GetMismatchFrames(), (unsigned int)0);
// Reset previously set of error/droppable frames.
Reset();
diff --git a/test/ethread_test.cc b/test/ethread_test.cc
index cb0497a..6b80ccc 100644
--- a/test/ethread_test.cc
+++ b/test/ethread_test.cc
@@ -23,10 +23,8 @@
public ::libvpx_test::CodecTestWith2Params<libvpx_test::TestMode, int> {
protected:
VPxEncoderThreadTest()
- : EncoderTest(GET_PARAM(0)),
- encoder_initialized_(false),
- encoding_mode_(GET_PARAM(1)),
- set_cpu_used_(GET_PARAM(2)) {
+ : EncoderTest(GET_PARAM(0)), encoder_initialized_(false),
+ encoding_mode_(GET_PARAM(1)), set_cpu_used_(GET_PARAM(2)) {
init_flags_ = VPX_CODEC_USE_PSNR;
vpx_codec_dec_cfg_t cfg = vpx_codec_dec_cfg_t();
cfg.w = 1280;
@@ -43,9 +41,7 @@
md5_dec_.clear();
md5_enc_.clear();
}
- virtual ~VPxEncoderThreadTest() {
- delete decoder_;
- }
+ virtual ~VPxEncoderThreadTest() { delete decoder_; }
virtual void SetUp() {
InitializeConfig();
@@ -104,12 +100,12 @@
size_enc_.push_back(pkt->data.frame.sz);
::libvpx_test::MD5 md5_enc;
- md5_enc.Add(reinterpret_cast<uint8_t*>(pkt->data.frame.buf),
+ md5_enc.Add(reinterpret_cast<uint8_t *>(pkt->data.frame.buf),
pkt->data.frame.sz);
md5_enc_.push_back(md5_enc.Get());
const vpx_codec_err_t res = decoder_->DecodeFrame(
- reinterpret_cast<uint8_t*>(pkt->data.frame.buf), pkt->data.frame.sz);
+ reinterpret_cast<uint8_t *>(pkt->data.frame.buf), pkt->data.frame.sz);
if (res != VPX_CODEC_OK) {
abort_ = true;
ASSERT_EQ(VPX_CODEC_OK, res);
@@ -169,24 +165,19 @@
std::vector<std::string> md5_dec_;
};
-TEST_P(VPxEncoderThreadTest, EncoderResultTest) {
- DoTest();
-}
+TEST_P(VPxEncoderThreadTest, EncoderResultTest) { DoTest(); }
class VPxEncoderThreadTestLarge : public VPxEncoderThreadTest {};
-TEST_P(VPxEncoderThreadTestLarge, EncoderResultTest) {
- DoTest();
-}
+TEST_P(VPxEncoderThreadTestLarge, EncoderResultTest) { DoTest(); }
+VP10_INSTANTIATE_TEST_CASE(VPxEncoderThreadTest,
+ ::testing::Values(::libvpx_test::kTwoPassGood,
+ ::libvpx_test::kOnePassGood),
+ ::testing::Range(3, 9));
-VP10_INSTANTIATE_TEST_CASE(
- VPxEncoderThreadTest,
- ::testing::Values(::libvpx_test::kTwoPassGood, ::libvpx_test::kOnePassGood),
- ::testing::Range(3, 9));
-
-VP10_INSTANTIATE_TEST_CASE(
- VPxEncoderThreadTestLarge,
- ::testing::Values(::libvpx_test::kTwoPassGood, ::libvpx_test::kOnePassGood),
- ::testing::Range(1, 3));
+VP10_INSTANTIATE_TEST_CASE(VPxEncoderThreadTestLarge,
+ ::testing::Values(::libvpx_test::kTwoPassGood,
+ ::libvpx_test::kOnePassGood),
+ ::testing::Range(1, 3));
} // namespace
diff --git a/test/fdct4x4_test.cc b/test/fdct4x4_test.cc
index fe9ffcc..00114f4 100644
--- a/test/fdct4x4_test.cc
+++ b/test/fdct4x4_test.cc
@@ -36,9 +36,8 @@
using libvpx_test::FhtFunc;
typedef std::tr1::tuple<FdctFunc, IdctFunc, int, vpx_bit_depth_t, int>
-Dct4x4Param;
-typedef std::tr1::tuple<FhtFunc, IhtFunc, int, vpx_bit_depth_t, int>
-Ht4x4Param;
+ Dct4x4Param;
+typedef std::tr1::tuple<FhtFunc, IhtFunc, int, vpx_bit_depth_t, int> Ht4x4Param;
void fdct4x4_ref(const int16_t *in, tran_low_t *out, int stride,
int /*tx_type*/) {
@@ -90,18 +89,16 @@
#endif // HAVE_SSE2
#endif // CONFIG_VP9_HIGHBITDEPTH
-
-class Trans4x4DCT
- : public libvpx_test::TransformTestBase,
- public ::testing::TestWithParam<Dct4x4Param> {
+class Trans4x4DCT : public libvpx_test::TransformTestBase,
+ public ::testing::TestWithParam<Dct4x4Param> {
public:
virtual ~Trans4x4DCT() {}
virtual void SetUp() {
fwd_txfm_ = GET_PARAM(0);
inv_txfm_ = GET_PARAM(1);
- tx_type_ = GET_PARAM(2);
- pitch_ = 4;
+ tx_type_ = GET_PARAM(2);
+ pitch_ = 4;
fwd_txfm_ref = fdct4x4_ref;
bit_depth_ = GET_PARAM(3);
mask_ = (1 << bit_depth_) - 1;
@@ -121,33 +118,24 @@
IdctFunc inv_txfm_;
};
-TEST_P(Trans4x4DCT, AccuracyCheck) {
- RunAccuracyCheck(1);
-}
+TEST_P(Trans4x4DCT, AccuracyCheck) { RunAccuracyCheck(1); }
-TEST_P(Trans4x4DCT, CoeffCheck) {
- RunCoeffCheck();
-}
+TEST_P(Trans4x4DCT, CoeffCheck) { RunCoeffCheck(); }
-TEST_P(Trans4x4DCT, MemCheck) {
- RunMemCheck();
-}
+TEST_P(Trans4x4DCT, MemCheck) { RunMemCheck(); }
-TEST_P(Trans4x4DCT, InvAccuracyCheck) {
- RunInvAccuracyCheck(1);
-}
+TEST_P(Trans4x4DCT, InvAccuracyCheck) { RunInvAccuracyCheck(1); }
-class Trans4x4HT
- : public libvpx_test::TransformTestBase,
- public ::testing::TestWithParam<Ht4x4Param> {
+class Trans4x4HT : public libvpx_test::TransformTestBase,
+ public ::testing::TestWithParam<Ht4x4Param> {
public:
virtual ~Trans4x4HT() {}
virtual void SetUp() {
fwd_txfm_ = GET_PARAM(0);
inv_txfm_ = GET_PARAM(1);
- tx_type_ = GET_PARAM(2);
- pitch_ = 4;
+ tx_type_ = GET_PARAM(2);
+ pitch_ = 4;
fwd_txfm_ref = fht4x4_ref;
bit_depth_ = GET_PARAM(3);
mask_ = (1 << bit_depth_) - 1;
@@ -168,33 +156,24 @@
IhtFunc inv_txfm_;
};
-TEST_P(Trans4x4HT, AccuracyCheck) {
- RunAccuracyCheck(1);
-}
+TEST_P(Trans4x4HT, AccuracyCheck) { RunAccuracyCheck(1); }
-TEST_P(Trans4x4HT, CoeffCheck) {
- RunCoeffCheck();
-}
+TEST_P(Trans4x4HT, CoeffCheck) { RunCoeffCheck(); }
-TEST_P(Trans4x4HT, MemCheck) {
- RunMemCheck();
-}
+TEST_P(Trans4x4HT, MemCheck) { RunMemCheck(); }
-TEST_P(Trans4x4HT, InvAccuracyCheck) {
- RunInvAccuracyCheck(1);
-}
+TEST_P(Trans4x4HT, InvAccuracyCheck) { RunInvAccuracyCheck(1); }
-class Trans4x4WHT
- : public libvpx_test::TransformTestBase,
- public ::testing::TestWithParam<Dct4x4Param> {
+class Trans4x4WHT : public libvpx_test::TransformTestBase,
+ public ::testing::TestWithParam<Dct4x4Param> {
public:
virtual ~Trans4x4WHT() {}
virtual void SetUp() {
fwd_txfm_ = GET_PARAM(0);
inv_txfm_ = GET_PARAM(1);
- tx_type_ = GET_PARAM(2);
- pitch_ = 4;
+ tx_type_ = GET_PARAM(2);
+ pitch_ = 4;
fwd_txfm_ref = fwht4x4_ref;
bit_depth_ = GET_PARAM(3);
mask_ = (1 << bit_depth_) - 1;
@@ -214,21 +193,13 @@
IdctFunc inv_txfm_;
};
-TEST_P(Trans4x4WHT, AccuracyCheck) {
- RunAccuracyCheck(0);
-}
+TEST_P(Trans4x4WHT, AccuracyCheck) { RunAccuracyCheck(0); }
-TEST_P(Trans4x4WHT, CoeffCheck) {
- RunCoeffCheck();
-}
+TEST_P(Trans4x4WHT, CoeffCheck) { RunCoeffCheck(); }
-TEST_P(Trans4x4WHT, MemCheck) {
- RunMemCheck();
-}
+TEST_P(Trans4x4WHT, MemCheck) { RunMemCheck(); }
-TEST_P(Trans4x4WHT, InvAccuracyCheck) {
- RunInvAccuracyCheck(0);
-}
+TEST_P(Trans4x4WHT, InvAccuracyCheck) { RunInvAccuracyCheck(0); }
using std::tr1::make_tuple;
#if CONFIG_VP9_HIGHBITDEPTH
@@ -239,10 +210,10 @@
make_tuple(&vpx_highbd_fdct4x4_c, &idct4x4_12, 0, VPX_BITS_12, 16),
make_tuple(&vpx_fdct4x4_c, &vpx_idct4x4_16_add_c, 0, VPX_BITS_8, 16)));
#else
-INSTANTIATE_TEST_CASE_P(
- C, Trans4x4DCT,
- ::testing::Values(
- make_tuple(&vpx_fdct4x4_c, &vpx_idct4x4_16_add_c, 0, VPX_BITS_8, 16)));
+INSTANTIATE_TEST_CASE_P(C, Trans4x4DCT,
+ ::testing::Values(make_tuple(&vpx_fdct4x4_c,
+ &vpx_idct4x4_16_add_c, 0,
+ VPX_BITS_8, 16)));
#endif // CONFIG_VP9_HIGHBITDEPTH
#if CONFIG_VP9_HIGHBITDEPTH
@@ -279,107 +250,93 @@
make_tuple(&vp10_highbd_fwht4x4_c, &iwht4x4_12, 0, VPX_BITS_12, 16),
make_tuple(&vp10_fwht4x4_c, &vpx_iwht4x4_16_add_c, 0, VPX_BITS_8, 16)));
#else
-INSTANTIATE_TEST_CASE_P(
- C, Trans4x4WHT,
- ::testing::Values(
- make_tuple(&vp10_fwht4x4_c, &vpx_iwht4x4_16_add_c, 0, VPX_BITS_8, 16)));
+INSTANTIATE_TEST_CASE_P(C, Trans4x4WHT,
+ ::testing::Values(make_tuple(&vp10_fwht4x4_c,
+ &vpx_iwht4x4_16_add_c, 0,
+ VPX_BITS_8, 16)));
#endif // CONFIG_VP9_HIGHBITDEPTH
#if HAVE_NEON_ASM && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
-INSTANTIATE_TEST_CASE_P(
- NEON, Trans4x4DCT,
- ::testing::Values(
- make_tuple(&vpx_fdct4x4_c,
- &vpx_idct4x4_16_add_neon, 0, VPX_BITS_8, 16)));
+INSTANTIATE_TEST_CASE_P(NEON, Trans4x4DCT,
+ ::testing::Values(make_tuple(&vpx_fdct4x4_c,
+ &vpx_idct4x4_16_add_neon,
+ 0, VPX_BITS_8, 16)));
#endif // HAVE_NEON_ASM && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
#if HAVE_NEON && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
NEON, Trans4x4HT,
::testing::Values(
- make_tuple(&vp10_fht4x4_c, &vp10_iht4x4_16_add_neon,
- 0, VPX_BITS_8, 16),
- make_tuple(&vp10_fht4x4_c, &vp10_iht4x4_16_add_neon,
- 1, VPX_BITS_8, 16),
- make_tuple(&vp10_fht4x4_c, &vp10_iht4x4_16_add_neon,
- 2, VPX_BITS_8, 16),
- make_tuple(&vp10_fht4x4_c, &vp10_iht4x4_16_add_neon,
- 3, VPX_BITS_8, 16)));
+ make_tuple(&vp10_fht4x4_c, &vp10_iht4x4_16_add_neon, 0, VPX_BITS_8, 16),
+ make_tuple(&vp10_fht4x4_c, &vp10_iht4x4_16_add_neon, 1, VPX_BITS_8, 16),
+ make_tuple(&vp10_fht4x4_c, &vp10_iht4x4_16_add_neon, 2, VPX_BITS_8, 16),
+ make_tuple(&vp10_fht4x4_c, &vp10_iht4x4_16_add_neon, 3, VPX_BITS_8,
+ 16)));
#endif // HAVE_NEON && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
#if HAVE_SSE2 && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
SSE2, Trans4x4WHT,
- ::testing::Values(
- make_tuple(&vp10_fwht4x4_c, &vpx_iwht4x4_16_add_c,
- 0, VPX_BITS_8, 16),
- make_tuple(&vp10_fwht4x4_c, &vpx_iwht4x4_16_add_sse2,
- 0, VPX_BITS_8, 16)));
+ ::testing::Values(make_tuple(&vp10_fwht4x4_c, &vpx_iwht4x4_16_add_c, 0,
+ VPX_BITS_8, 16),
+ make_tuple(&vp10_fwht4x4_c, &vpx_iwht4x4_16_add_sse2, 0,
+ VPX_BITS_8, 16)));
#endif
#if HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
-INSTANTIATE_TEST_CASE_P(
- SSE2, Trans4x4DCT,
- ::testing::Values(
- make_tuple(&vpx_fdct4x4_sse2,
- &vpx_idct4x4_16_add_sse2, 0, VPX_BITS_8, 16)));
+INSTANTIATE_TEST_CASE_P(SSE2, Trans4x4DCT,
+ ::testing::Values(make_tuple(&vpx_fdct4x4_sse2,
+ &vpx_idct4x4_16_add_sse2,
+ 0, VPX_BITS_8, 16)));
INSTANTIATE_TEST_CASE_P(
SSE2, Trans4x4HT,
- ::testing::Values(
- make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 0,
- VPX_BITS_8, 16),
- make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 1,
- VPX_BITS_8, 16),
- make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 2,
- VPX_BITS_8, 16),
- make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 3,
- VPX_BITS_8, 16)));
+ ::testing::Values(make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 0,
+ VPX_BITS_8, 16),
+ make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 1,
+ VPX_BITS_8, 16),
+ make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 2,
+ VPX_BITS_8, 16),
+ make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 3,
+ VPX_BITS_8, 16)));
#endif // HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
#if HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
SSE2, Trans4x4DCT,
::testing::Values(
- make_tuple(&vpx_highbd_fdct4x4_c, &idct4x4_10_sse2, 0,
- VPX_BITS_10, 16),
- make_tuple(&vpx_highbd_fdct4x4_sse2, &idct4x4_10_sse2, 0,
- VPX_BITS_10, 16),
- make_tuple(&vpx_highbd_fdct4x4_c, &idct4x4_12_sse2, 0,
- VPX_BITS_12, 16),
- make_tuple(&vpx_highbd_fdct4x4_sse2, &idct4x4_12_sse2, 0,
- VPX_BITS_12, 16),
- make_tuple(&vpx_fdct4x4_sse2, &vpx_idct4x4_16_add_c, 0,
- VPX_BITS_8, 16)));
+ make_tuple(&vpx_highbd_fdct4x4_c, &idct4x4_10_sse2, 0, VPX_BITS_10, 16),
+ make_tuple(&vpx_highbd_fdct4x4_sse2, &idct4x4_10_sse2, 0, VPX_BITS_10,
+ 16),
+ make_tuple(&vpx_highbd_fdct4x4_c, &idct4x4_12_sse2, 0, VPX_BITS_12, 16),
+ make_tuple(&vpx_highbd_fdct4x4_sse2, &idct4x4_12_sse2, 0, VPX_BITS_12,
+ 16),
+ make_tuple(&vpx_fdct4x4_sse2, &vpx_idct4x4_16_add_c, 0, VPX_BITS_8,
+ 16)));
INSTANTIATE_TEST_CASE_P(
SSE2, Trans4x4HT,
::testing::Values(
- make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_c,
- 0, VPX_BITS_8, 16),
- make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_c,
- 1, VPX_BITS_8, 16),
- make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_c,
- 2, VPX_BITS_8, 16),
- make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_c,
- 3, VPX_BITS_8, 16)));
+ make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_c, 0, VPX_BITS_8, 16),
+ make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_c, 1, VPX_BITS_8, 16),
+ make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_c, 2, VPX_BITS_8, 16),
+ make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_c, 3, VPX_BITS_8,
+ 16)));
#endif // HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
#if HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
-INSTANTIATE_TEST_CASE_P(
- MSA, Trans4x4DCT,
- ::testing::Values(
- make_tuple(&vpx_fdct4x4_msa, &vpx_idct4x4_16_add_msa, 0,
- VPX_BITS_8, 16)));
+INSTANTIATE_TEST_CASE_P(MSA, Trans4x4DCT,
+ ::testing::Values(make_tuple(&vpx_fdct4x4_msa,
+ &vpx_idct4x4_16_add_msa, 0,
+ VPX_BITS_8, 16)));
INSTANTIATE_TEST_CASE_P(
MSA, Trans4x4HT,
- ::testing::Values(
- make_tuple(&vp10_fht4x4_msa, &vp10_iht4x4_16_add_msa, 0,
- VPX_BITS_8, 16),
- make_tuple(&vp10_fht4x4_msa, &vp10_iht4x4_16_add_msa, 1,
- VPX_BITS_8, 16),
- make_tuple(&vp10_fht4x4_msa, &vp10_iht4x4_16_add_msa, 2,
- VPX_BITS_8, 16),
- make_tuple(&vp10_fht4x4_msa, &vp10_iht4x4_16_add_msa, 3,
- VPX_BITS_8, 16)));
+ ::testing::Values(make_tuple(&vp10_fht4x4_msa, &vp10_iht4x4_16_add_msa, 0,
+ VPX_BITS_8, 16),
+ make_tuple(&vp10_fht4x4_msa, &vp10_iht4x4_16_add_msa, 1,
+ VPX_BITS_8, 16),
+ make_tuple(&vp10_fht4x4_msa, &vp10_iht4x4_16_add_msa, 2,
+ VPX_BITS_8, 16),
+ make_tuple(&vp10_fht4x4_msa, &vp10_iht4x4_16_add_msa, 3,
+ VPX_BITS_8, 16)));
#endif // HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
} // namespace
diff --git a/test/fdct8x8_test.cc b/test/fdct8x8_test.cc
index 123ff53..cfdf519 100644
--- a/test/fdct8x8_test.cc
+++ b/test/fdct8x8_test.cc
@@ -53,8 +53,7 @@
out[k] = 0.0;
for (int n = 0; n < 8; n++)
out[k] += in[n] * cos(kPi * (2 * n + 1) * k / 16.0);
- if (k == 0)
- out[k] = out[k] * kInvSqrt2;
+ if (k == 0) out[k] = out[k] * kInvSqrt2;
}
}
@@ -63,25 +62,20 @@
// First transform columns
for (int i = 0; i < 8; ++i) {
double temp_in[8], temp_out[8];
- for (int j = 0; j < 8; ++j)
- temp_in[j] = input[j*8 + i];
+ for (int j = 0; j < 8; ++j) temp_in[j] = input[j * 8 + i];
reference_8x8_dct_1d(temp_in, temp_out);
- for (int j = 0; j < 8; ++j)
- output[j * 8 + i] = temp_out[j];
+ for (int j = 0; j < 8; ++j) output[j * 8 + i] = temp_out[j];
}
// Then transform rows
for (int i = 0; i < 8; ++i) {
double temp_in[8], temp_out[8];
- for (int j = 0; j < 8; ++j)
- temp_in[j] = output[j + i*8];
+ for (int j = 0; j < 8; ++j) temp_in[j] = output[j + i * 8];
reference_8x8_dct_1d(temp_in, temp_out);
// Scale by some magic number
- for (int j = 0; j < 8; ++j)
- output[j + i * 8] = temp_out[j] * 2;
+ for (int j = 0; j < 8; ++j) output[j + i * 8] = temp_out[j] * 2;
}
}
-
void fdct8x8_ref(const int16_t *in, tran_low_t *out, int stride,
int /*tx_type*/) {
vpx_fdct8x8_c(in, out, stride);
@@ -177,8 +171,7 @@
<< 1. * max_diff / count_test_block * 100 << "%"
<< " for input range [-255, 255] at index " << j
<< " count0: " << count_sign_block[j][0]
- << " count1: " << count_sign_block[j][1]
- << " diff: " << diff;
+ << " count1: " << count_sign_block[j][1] << " diff: " << diff;
}
memset(count_sign_block, 0, sizeof(count_sign_block));
@@ -186,8 +179,8 @@
for (int i = 0; i < count_test_block; ++i) {
// Initialize a test block with input range [-mask_ / 16, mask_ / 16].
for (int j = 0; j < 64; ++j)
- test_input_block[j] = ((rnd.Rand16() & mask_) >> 4) -
- ((rnd.Rand16() & mask_) >> 4);
+ test_input_block[j] =
+ ((rnd.Rand16() & mask_) >> 4) - ((rnd.Rand16() & mask_) >> 4);
ASM_REGISTER_STATE_CHECK(
RunFwdTxfm(test_input_block, test_output_block, pitch_));
@@ -207,8 +200,7 @@
<< 1. * max_diff / count_test_block * 100 << "%"
<< " for input range [-15, 15] at index " << j
<< " count0: " << count_sign_block[j][0]
- << " count1: " << count_sign_block[j][1]
- << " diff: " << diff;
+ << " count1: " << count_sign_block[j][1] << " diff: " << diff;
}
}
@@ -245,19 +237,18 @@
ASM_REGISTER_STATE_CHECK(
RunFwdTxfm(test_input_block, test_temp_block, pitch_));
for (int j = 0; j < 64; ++j) {
- if (test_temp_block[j] > 0) {
- test_temp_block[j] += 2;
- test_temp_block[j] /= 4;
- test_temp_block[j] *= 4;
- } else {
- test_temp_block[j] -= 2;
- test_temp_block[j] /= 4;
- test_temp_block[j] *= 4;
- }
+ if (test_temp_block[j] > 0) {
+ test_temp_block[j] += 2;
+ test_temp_block[j] /= 4;
+ test_temp_block[j] *= 4;
+ } else {
+ test_temp_block[j] -= 2;
+ test_temp_block[j] /= 4;
+ test_temp_block[j] *= 4;
+ }
}
if (bit_depth_ == VPX_BITS_8) {
- ASM_REGISTER_STATE_CHECK(
- RunInvTxfm(test_temp_block, dst, pitch_));
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block, dst, pitch_));
#if CONFIG_VP9_HIGHBITDEPTH
} else {
ASM_REGISTER_STATE_CHECK(
@@ -273,19 +264,18 @@
const int diff = dst[j] - src[j];
#endif
const int error = diff * diff;
- if (max_error < error)
- max_error = error;
+ if (max_error < error) max_error = error;
total_error += error;
}
}
EXPECT_GE(1 << 2 * (bit_depth_ - 8), max_error)
- << "Error: 8x8 FDCT/IDCT or FHT/IHT has an individual"
- << " roundtrip error > 1";
+ << "Error: 8x8 FDCT/IDCT or FHT/IHT has an individual"
+ << " roundtrip error > 1";
- EXPECT_GE((count_test_block << 2 * (bit_depth_ - 8))/5, total_error)
- << "Error: 8x8 FDCT/IDCT or FHT/IHT has average roundtrip "
- << "error > 1/5 per block";
+ EXPECT_GE((count_test_block << 2 * (bit_depth_ - 8)) / 5, total_error)
+ << "Error: 8x8 FDCT/IDCT or FHT/IHT has average roundtrip "
+ << "error > 1/5 per block";
}
void RunExtremalCheck() {
@@ -341,8 +331,7 @@
ASM_REGISTER_STATE_CHECK(
fwd_txfm_ref(test_input_block, ref_temp_block, pitch_, tx_type_));
if (bit_depth_ == VPX_BITS_8) {
- ASM_REGISTER_STATE_CHECK(
- RunInvTxfm(test_temp_block, dst, pitch_));
+ ASM_REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block, dst, pitch_));
#if CONFIG_VP9_HIGHBITDEPTH
} else {
ASM_REGISTER_STATE_CHECK(
@@ -358,8 +347,7 @@
const int diff = dst[j] - src[j];
#endif
const int error = diff * diff;
- if (max_error < error)
- max_error = error;
+ if (max_error < error) max_error = error;
total_error += error;
const int coeff_diff = test_temp_block[j] - ref_temp_block[j];
@@ -370,7 +358,7 @@
<< "Error: Extremal 8x8 FDCT/IDCT or FHT/IHT has"
<< "an individual roundtrip error > 1";
- EXPECT_GE((count_test_block << 2 * (bit_depth_ - 8))/5, total_error)
+ EXPECT_GE((count_test_block << 2 * (bit_depth_ - 8)) / 5, total_error)
<< "Error: Extremal 8x8 FDCT/IDCT or FHT/IHT has average"
<< " roundtrip error > 1/5 per block";
@@ -418,8 +406,8 @@
ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, pitch_));
#if CONFIG_VP9_HIGHBITDEPTH
} else {
- ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16),
- pitch_));
+ ASM_REGISTER_STATE_CHECK(
+ RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16), pitch_));
#endif
}
@@ -432,8 +420,7 @@
#endif
const uint32_t error = diff * diff;
EXPECT_GE(1u << 2 * (bit_depth_ - 8), error)
- << "Error: 8x8 IDCT has error " << error
- << " at index " << j;
+ << "Error: 8x8 IDCT has error " << error << " at index " << j;
}
}
}
@@ -461,13 +448,12 @@
const int32_t diff = coeff[j] - coeff_r[j];
const uint32_t error = diff * diff;
EXPECT_GE(9u << 2 * (bit_depth_ - 8), error)
- << "Error: 8x8 DCT has error " << error
- << " at index " << j;
+ << "Error: 8x8 DCT has error " << error << " at index " << j;
}
}
}
-void CompareInvReference(IdctFunc ref_txfm, int thresh) {
+ void CompareInvReference(IdctFunc ref_txfm, int thresh) {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 10000;
const int eob = 12;
@@ -484,7 +470,7 @@
for (int j = 0; j < kNumCoeffs; ++j) {
if (j < eob) {
// Random values less than the threshold, either positive or negative
- coeff[scan[j]] = rnd(thresh) * (1-2*(i%2));
+ coeff[scan[j]] = rnd(thresh) * (1 - 2 * (i % 2));
} else {
coeff[scan[j]] = 0;
}
@@ -504,8 +490,8 @@
#if CONFIG_VP9_HIGHBITDEPTH
} else {
ref_txfm(coeff, CONVERT_TO_BYTEPTR(ref16), pitch_);
- ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16),
- pitch_));
+ ASM_REGISTER_STATE_CHECK(
+ RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16), pitch_));
#endif
}
@@ -517,9 +503,8 @@
const int diff = dst[j] - ref[j];
#endif
const uint32_t error = diff * diff;
- EXPECT_EQ(0u, error)
- << "Error: 8x8 IDCT has error " << error
- << " at index " << j;
+ EXPECT_EQ(0u, error) << "Error: 8x8 IDCT has error " << error
+ << " at index " << j;
}
}
}
@@ -530,17 +515,16 @@
int mask_;
};
-class FwdTrans8x8DCT
- : public FwdTrans8x8TestBase,
- public ::testing::TestWithParam<Dct8x8Param> {
+class FwdTrans8x8DCT : public FwdTrans8x8TestBase,
+ public ::testing::TestWithParam<Dct8x8Param> {
public:
virtual ~FwdTrans8x8DCT() {}
virtual void SetUp() {
fwd_txfm_ = GET_PARAM(0);
inv_txfm_ = GET_PARAM(1);
- tx_type_ = GET_PARAM(2);
- pitch_ = 8;
+ tx_type_ = GET_PARAM(2);
+ pitch_ = 8;
fwd_txfm_ref = fdct8x8_ref;
bit_depth_ = GET_PARAM(3);
mask_ = (1 << bit_depth_) - 1;
@@ -560,37 +544,26 @@
IdctFunc inv_txfm_;
};
-TEST_P(FwdTrans8x8DCT, SignBiasCheck) {
- RunSignBiasCheck();
-}
+TEST_P(FwdTrans8x8DCT, SignBiasCheck) { RunSignBiasCheck(); }
-TEST_P(FwdTrans8x8DCT, RoundTripErrorCheck) {
- RunRoundTripErrorCheck();
-}
+TEST_P(FwdTrans8x8DCT, RoundTripErrorCheck) { RunRoundTripErrorCheck(); }
-TEST_P(FwdTrans8x8DCT, ExtremalCheck) {
- RunExtremalCheck();
-}
+TEST_P(FwdTrans8x8DCT, ExtremalCheck) { RunExtremalCheck(); }
-TEST_P(FwdTrans8x8DCT, FwdAccuracyCheck) {
- RunFwdAccuracyCheck();
-}
+TEST_P(FwdTrans8x8DCT, FwdAccuracyCheck) { RunFwdAccuracyCheck(); }
-TEST_P(FwdTrans8x8DCT, InvAccuracyCheck) {
- RunInvAccuracyCheck();
-}
+TEST_P(FwdTrans8x8DCT, InvAccuracyCheck) { RunInvAccuracyCheck(); }
-class FwdTrans8x8HT
- : public FwdTrans8x8TestBase,
- public ::testing::TestWithParam<Ht8x8Param> {
+class FwdTrans8x8HT : public FwdTrans8x8TestBase,
+ public ::testing::TestWithParam<Ht8x8Param> {
public:
virtual ~FwdTrans8x8HT() {}
virtual void SetUp() {
fwd_txfm_ = GET_PARAM(0);
inv_txfm_ = GET_PARAM(1);
- tx_type_ = GET_PARAM(2);
- pitch_ = 8;
+ tx_type_ = GET_PARAM(2);
+ pitch_ = 8;
fwd_txfm_ref = fht8x8_ref;
bit_depth_ = GET_PARAM(3);
mask_ = (1 << bit_depth_) - 1;
@@ -610,21 +583,14 @@
IhtFunc inv_txfm_;
};
-TEST_P(FwdTrans8x8HT, SignBiasCheck) {
- RunSignBiasCheck();
-}
+TEST_P(FwdTrans8x8HT, SignBiasCheck) { RunSignBiasCheck(); }
-TEST_P(FwdTrans8x8HT, RoundTripErrorCheck) {
- RunRoundTripErrorCheck();
-}
+TEST_P(FwdTrans8x8HT, RoundTripErrorCheck) { RunRoundTripErrorCheck(); }
-TEST_P(FwdTrans8x8HT, ExtremalCheck) {
- RunExtremalCheck();
-}
+TEST_P(FwdTrans8x8HT, ExtremalCheck) { RunExtremalCheck(); }
-class InvTrans8x8DCT
- : public FwdTrans8x8TestBase,
- public ::testing::TestWithParam<Idct8x8Param> {
+class InvTrans8x8DCT : public FwdTrans8x8TestBase,
+ public ::testing::TestWithParam<Idct8x8Param> {
public:
virtual ~InvTrans8x8DCT() {}
@@ -664,10 +630,10 @@
make_tuple(&vpx_highbd_fdct8x8_c, &idct8x8_10, 0, VPX_BITS_10),
make_tuple(&vpx_highbd_fdct8x8_c, &idct8x8_12, 0, VPX_BITS_12)));
#else
-INSTANTIATE_TEST_CASE_P(
- C, FwdTrans8x8DCT,
- ::testing::Values(
- make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c, 0, VPX_BITS_8)));
+INSTANTIATE_TEST_CASE_P(C, FwdTrans8x8DCT,
+ ::testing::Values(make_tuple(&vpx_fdct8x8_c,
+ &vpx_idct8x8_64_add_c, 0,
+ VPX_BITS_8)));
#endif // CONFIG_VP9_HIGHBITDEPTH
#if CONFIG_VP9_HIGHBITDEPTH
@@ -697,11 +663,10 @@
#endif // CONFIG_VP9_HIGHBITDEPTH
#if HAVE_NEON_ASM && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
-INSTANTIATE_TEST_CASE_P(
- NEON, FwdTrans8x8DCT,
- ::testing::Values(
- make_tuple(&vpx_fdct8x8_neon, &vpx_idct8x8_64_add_neon, 0,
- VPX_BITS_8)));
+INSTANTIATE_TEST_CASE_P(NEON, FwdTrans8x8DCT,
+ ::testing::Values(make_tuple(&vpx_fdct8x8_neon,
+ &vpx_idct8x8_64_add_neon,
+ 0, VPX_BITS_8)));
#endif // HAVE_NEON_ASM && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
#if HAVE_NEON && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
@@ -715,37 +680,33 @@
#endif // HAVE_NEON && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
#if HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
-INSTANTIATE_TEST_CASE_P(
- SSE2, FwdTrans8x8DCT,
- ::testing::Values(
- make_tuple(&vpx_fdct8x8_sse2, &vpx_idct8x8_64_add_sse2, 0,
- VPX_BITS_8)));
+INSTANTIATE_TEST_CASE_P(SSE2, FwdTrans8x8DCT,
+ ::testing::Values(make_tuple(&vpx_fdct8x8_sse2,
+ &vpx_idct8x8_64_add_sse2,
+ 0, VPX_BITS_8)));
INSTANTIATE_TEST_CASE_P(
SSE2, FwdTrans8x8HT,
::testing::Values(
- make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2,
- 0, VPX_BITS_8),
- make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2,
- 1, VPX_BITS_8),
- make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2,
- 2, VPX_BITS_8),
- make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2,
- 3, VPX_BITS_8)));
+ make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 0, VPX_BITS_8),
+ make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 1, VPX_BITS_8),
+ make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 2, VPX_BITS_8),
+ make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 3,
+ VPX_BITS_8)));
#endif // HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
#if HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
SSE2, FwdTrans8x8DCT,
- ::testing::Values(
- make_tuple(&vpx_fdct8x8_sse2, &vpx_idct8x8_64_add_c, 0, VPX_BITS_8),
- make_tuple(&vpx_highbd_fdct8x8_c,
- &idct8x8_64_add_10_sse2, 12, VPX_BITS_10),
- make_tuple(&vpx_highbd_fdct8x8_sse2,
- &idct8x8_64_add_10_sse2, 12, VPX_BITS_10),
- make_tuple(&vpx_highbd_fdct8x8_c,
- &idct8x8_64_add_12_sse2, 12, VPX_BITS_12),
- make_tuple(&vpx_highbd_fdct8x8_sse2,
- &idct8x8_64_add_12_sse2, 12, VPX_BITS_12)));
+ ::testing::Values(make_tuple(&vpx_fdct8x8_sse2, &vpx_idct8x8_64_add_c, 0,
+ VPX_BITS_8),
+ make_tuple(&vpx_highbd_fdct8x8_c, &idct8x8_64_add_10_sse2,
+ 12, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct8x8_sse2,
+ &idct8x8_64_add_10_sse2, 12, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct8x8_c, &idct8x8_64_add_12_sse2,
+ 12, VPX_BITS_12),
+ make_tuple(&vpx_highbd_fdct8x8_sse2,
+ &idct8x8_64_add_12_sse2, 12, VPX_BITS_12)));
INSTANTIATE_TEST_CASE_P(
SSE2, FwdTrans8x8HT,
@@ -760,30 +721,27 @@
INSTANTIATE_TEST_CASE_P(
SSE2, InvTrans8x8DCT,
::testing::Values(
- make_tuple(&idct8x8_10_add_10_c,
- &idct8x8_10_add_10_sse2, 6225, VPX_BITS_10),
- make_tuple(&idct8x8_10,
- &idct8x8_64_add_10_sse2, 6225, VPX_BITS_10),
- make_tuple(&idct8x8_10_add_12_c,
- &idct8x8_10_add_12_sse2, 6225, VPX_BITS_12),
- make_tuple(&idct8x8_12,
- &idct8x8_64_add_12_sse2, 6225, VPX_BITS_12)));
+ make_tuple(&idct8x8_10_add_10_c, &idct8x8_10_add_10_sse2, 6225,
+ VPX_BITS_10),
+ make_tuple(&idct8x8_10, &idct8x8_64_add_10_sse2, 6225, VPX_BITS_10),
+ make_tuple(&idct8x8_10_add_12_c, &idct8x8_10_add_12_sse2, 6225,
+ VPX_BITS_12),
+ make_tuple(&idct8x8_12, &idct8x8_64_add_12_sse2, 6225, VPX_BITS_12)));
#endif // HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
-#if HAVE_SSSE3 && ARCH_X86_64 && \
- !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
-INSTANTIATE_TEST_CASE_P(
- SSSE3, FwdTrans8x8DCT,
- ::testing::Values(
- make_tuple(&vpx_fdct8x8_ssse3, &vpx_idct8x8_64_add_ssse3, 0,
- VPX_BITS_8)));
+#if HAVE_SSSE3 && ARCH_X86_64 && !CONFIG_VP9_HIGHBITDEPTH && \
+ !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(SSSE3, FwdTrans8x8DCT,
+ ::testing::Values(make_tuple(&vpx_fdct8x8_ssse3,
+ &vpx_idct8x8_64_add_ssse3,
+ 0, VPX_BITS_8)));
#endif
#if HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
-INSTANTIATE_TEST_CASE_P(
- MSA, FwdTrans8x8DCT,
- ::testing::Values(
- make_tuple(&vpx_fdct8x8_msa, &vpx_idct8x8_64_add_msa, 0, VPX_BITS_8)));
+INSTANTIATE_TEST_CASE_P(MSA, FwdTrans8x8DCT,
+ ::testing::Values(make_tuple(&vpx_fdct8x8_msa,
+ &vpx_idct8x8_64_add_msa, 0,
+ VPX_BITS_8)));
INSTANTIATE_TEST_CASE_P(
MSA, FwdTrans8x8HT,
::testing::Values(
diff --git a/test/frame_size_tests.cc b/test/frame_size_tests.cc
index d630e05..cc72d8e 100644
--- a/test/frame_size_tests.cc
+++ b/test/frame_size_tests.cc
@@ -13,12 +13,11 @@
namespace {
-class VP9FrameSizeTestsLarge
- : public ::libvpx_test::EncoderTest,
- public ::testing::Test {
+class VP9FrameSizeTestsLarge : public ::libvpx_test::EncoderTest,
+ public ::testing::Test {
protected:
- VP9FrameSizeTestsLarge() : EncoderTest(&::libvpx_test::kVP10),
- expected_res_(VPX_CODEC_OK) {}
+ VP9FrameSizeTestsLarge()
+ : EncoderTest(&::libvpx_test::kVP10), expected_res_(VPX_CODEC_OK) {}
virtual ~VP9FrameSizeTestsLarge() {}
virtual void SetUp() {
@@ -27,7 +26,7 @@
}
virtual bool HandleDecodeResult(const vpx_codec_err_t res_dec,
- const libvpx_test::VideoSource& /*video*/,
+ const libvpx_test::VideoSource & /*video*/,
libvpx_test::Decoder *decoder) {
EXPECT_EQ(expected_res_, res_dec) << decoder->DecodeError();
return !::testing::Test::HasFailure();
@@ -67,13 +66,13 @@
expected_res_ = VPX_CODEC_OK;
ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
#else
- // This test produces a pretty large single frame allocation, (roughly
- // 25 megabits). The encoder allocates a good number of these frames
- // one for each lag in frames (for 2 pass), and then one for each possible
- // reference buffer (8) - we can end up with up to 30 buffers of roughly this
- // size or almost 1 gig of memory.
- // In total the allocations will exceed 2GiB which may cause a failure with
- // mingw + wine, use a smaller size in that case.
+// This test produces a pretty large single frame allocation, (roughly
+// 25 megabits). The encoder allocates a good number of these frames
+// one for each lag in frames (for 2 pass), and then one for each possible
+// reference buffer (8) - we can end up with up to 30 buffers of roughly this
+// size or almost 1 gig of memory.
+// In total the allocations will exceed 2GiB which may cause a failure with
+// mingw + wine, use a smaller size in that case.
#if defined(_WIN32) && !defined(_WIN64) || defined(__OS2__)
video.SetSize(4096, 3072);
#else
diff --git a/test/function_equivalence_test.h b/test/function_equivalence_test.h
index 70f33d1..4c5a97f 100644
--- a/test/function_equivalence_test.h
+++ b/test/function_equivalence_test.h
@@ -48,18 +48,14 @@
virtual ~FunctionEquivalenceTest() {}
- virtual void SetUp() {
- params_ = this->GetParam();
- }
+ virtual void SetUp() { params_ = this->GetParam(); }
- virtual void TearDown() {
- libvpx_test::ClearSystemState();
- }
+ virtual void TearDown() { libvpx_test::ClearSystemState(); }
protected:
ACMRandom rng_;
FuncParam<T> params_;
};
-} // namespace libvpx_test
+} // namespace libvpx_test
#endif // TEST_FUNCTION_EQUIVALENCE_TEST_H_
diff --git a/test/hadamard_test.cc b/test/hadamard_test.cc
index b8eec52..e771595 100644
--- a/test/hadamard_test.cc
+++ b/test/hadamard_test.cc
@@ -80,8 +80,8 @@
const int16_t b3 = (a2 - a3) >> 1;
/* Store a 16 bit value. */
- b[ 0] = b0 + b2;
- b[ 64] = b1 + b3;
+ b[0] = b0 + b2;
+ b[64] = b1 + b3;
b[128] = b0 - b2;
b[192] = b1 - b3;
diff --git a/test/hbd_metrics_test.cc b/test/hbd_metrics_test.cc
index 14a8815..f8c0517 100644
--- a/test/hbd_metrics_test.cc
+++ b/test/hbd_metrics_test.cc
@@ -22,7 +22,6 @@
#include "vpx_ports/msvc.h"
#include "vpx_scale/yv12config.h"
-
using libvpx_test::ACMRandom;
namespace {
@@ -30,70 +29,65 @@
typedef double (*LBDMetricFunc)(const YV12_BUFFER_CONFIG *source,
const YV12_BUFFER_CONFIG *dest);
typedef double (*HBDMetricFunc)(const YV12_BUFFER_CONFIG *source,
- const YV12_BUFFER_CONFIG *dest,
- uint32_t in_bd, uint32_t bd);
+ const YV12_BUFFER_CONFIG *dest, uint32_t in_bd,
+ uint32_t bd);
double compute_hbd_psnr(const YV12_BUFFER_CONFIG *source,
- const YV12_BUFFER_CONFIG *dest,
- uint32_t in_bd, uint32_t bd) {
+ const YV12_BUFFER_CONFIG *dest, uint32_t in_bd,
+ uint32_t bd) {
PSNR_STATS psnr;
vpx_calc_highbd_psnr(source, dest, &psnr, bd, in_bd);
return psnr.psnr[0];
}
double compute_psnr(const YV12_BUFFER_CONFIG *source,
- const YV12_BUFFER_CONFIG *dest) {
+ const YV12_BUFFER_CONFIG *dest) {
PSNR_STATS psnr;
vpx_calc_psnr(source, dest, &psnr);
return psnr.psnr[0];
}
double compute_hbd_psnrhvs(const YV12_BUFFER_CONFIG *source,
- const YV12_BUFFER_CONFIG *dest,
- uint32_t in_bd, uint32_t bd) {
+ const YV12_BUFFER_CONFIG *dest, uint32_t in_bd,
+ uint32_t bd) {
double tempy, tempu, tempv;
- return vpx_psnrhvs(source, dest,
- &tempy, &tempu, &tempv, bd, in_bd);
+ return vpx_psnrhvs(source, dest, &tempy, &tempu, &tempv, bd, in_bd);
}
double compute_psnrhvs(const YV12_BUFFER_CONFIG *source,
- const YV12_BUFFER_CONFIG *dest) {
+ const YV12_BUFFER_CONFIG *dest) {
double tempy, tempu, tempv;
- return vpx_psnrhvs(source, dest,
- &tempy, &tempu, &tempv, 8, 8);
+ return vpx_psnrhvs(source, dest, &tempy, &tempu, &tempv, 8, 8);
}
double compute_hbd_fastssim(const YV12_BUFFER_CONFIG *source,
- const YV12_BUFFER_CONFIG *dest,
- uint32_t in_bd, uint32_t bd) {
+ const YV12_BUFFER_CONFIG *dest, uint32_t in_bd,
+ uint32_t bd) {
double tempy, tempu, tempv;
- return vpx_calc_fastssim(source, dest,
- &tempy, &tempu, &tempv, bd, in_bd);
+ return vpx_calc_fastssim(source, dest, &tempy, &tempu, &tempv, bd, in_bd);
}
double compute_fastssim(const YV12_BUFFER_CONFIG *source,
const YV12_BUFFER_CONFIG *dest) {
double tempy, tempu, tempv;
- return vpx_calc_fastssim(source, dest,
- &tempy, &tempu, &tempv, 8, 8);
+ return vpx_calc_fastssim(source, dest, &tempy, &tempu, &tempv, 8, 8);
}
double compute_hbd_vpxssim(const YV12_BUFFER_CONFIG *source,
- const YV12_BUFFER_CONFIG *dest,
- uint32_t in_bd, uint32_t bd) {
+ const YV12_BUFFER_CONFIG *dest, uint32_t in_bd,
+ uint32_t bd) {
double ssim, weight;
ssim = vpx_highbd_calc_ssim(source, dest, &weight, bd, in_bd);
return 100 * pow(ssim / weight, 8.0);
}
double compute_vpxssim(const YV12_BUFFER_CONFIG *source,
- const YV12_BUFFER_CONFIG *dest) {
+ const YV12_BUFFER_CONFIG *dest) {
double ssim, weight;
ssim = vpx_calc_ssim(source, dest, &weight);
return 100 * pow(ssim / weight, 8.0);
}
-
class HBDMetricsTestBase {
public:
virtual ~HBDMetricsTestBase() {}
@@ -126,8 +120,8 @@
// Create some distortion for dst buffer.
dpel = rnd.Rand8();
lbd_dst.buffer_alloc[i] = (uint8_t)dpel;
- ((uint16_t*)(hbd_src.buffer_alloc))[i] = spel << (bit_depth_ - 8);
- ((uint16_t*)(hbd_dst.buffer_alloc))[i] = dpel << (bit_depth_ - 8);
+ ((uint16_t *)(hbd_src.buffer_alloc))[i] = spel << (bit_depth_ - 8);
+ ((uint16_t *)(hbd_dst.buffer_alloc))[i] = dpel << (bit_depth_ - 8);
i++;
}
@@ -141,7 +135,7 @@
// Create some small distortion for dst buffer.
dpel = 120 + (rnd.Rand8() >> 4);
lbd_dst.buffer_alloc[i] = (uint8_t)dpel;
- ((uint16_t*)(hbd_dst.buffer_alloc))[i] = dpel << (bit_depth_ - 8);
+ ((uint16_t *)(hbd_dst.buffer_alloc))[i] = dpel << (bit_depth_ - 8);
i++;
}
@@ -155,7 +149,7 @@
// Create some small distortion for dst buffer.
dpel = 126 + (rnd.Rand8() >> 6);
lbd_dst.buffer_alloc[i] = (uint8_t)dpel;
- ((uint16_t*)(hbd_dst.buffer_alloc))[i] = dpel << (bit_depth_ - 8);
+ ((uint16_t *)(hbd_dst.buffer_alloc))[i] = dpel << (bit_depth_ - 8);
i++;
}
@@ -176,11 +170,10 @@
HBDMetricFunc hbd_metric_;
};
-typedef std::tr1::tuple<LBDMetricFunc,
- HBDMetricFunc, int, int, double> MetricTestTParam;
-class HBDMetricsTest
- : public HBDMetricsTestBase,
- public ::testing::TestWithParam<MetricTestTParam> {
+typedef std::tr1::tuple<LBDMetricFunc, HBDMetricFunc, int, int, double>
+ MetricTestTParam;
+class HBDMetricsTest : public HBDMetricsTestBase,
+ public ::testing::TestWithParam<MetricTestTParam> {
public:
virtual void SetUp() {
lbd_metric_ = GET_PARAM(0);
@@ -192,9 +185,7 @@
virtual void TearDown() {}
};
-TEST_P(HBDMetricsTest, RunAccuracyCheck) {
- RunAccuracyCheck();
-}
+TEST_P(HBDMetricsTest, RunAccuracyCheck) { RunAccuracyCheck(); }
// Allow small variation due to floating point operations.
static const double kSsim_thresh = 0.001;
@@ -205,47 +196,41 @@
INSTANTIATE_TEST_CASE_P(
VPXSSIM, HBDMetricsTest,
- ::testing::Values(
- MetricTestTParam(&compute_vpxssim, &compute_hbd_vpxssim, 8, 10,
- kSsim_thresh),
- MetricTestTParam(&compute_vpxssim, &compute_hbd_vpxssim, 10, 10,
- kPhvs_thresh),
- MetricTestTParam(&compute_vpxssim, &compute_hbd_vpxssim, 8, 12,
- kSsim_thresh),
- MetricTestTParam(&compute_vpxssim, &compute_hbd_vpxssim, 12, 12,
- kPhvs_thresh)));
+ ::testing::Values(MetricTestTParam(&compute_vpxssim, &compute_hbd_vpxssim,
+ 8, 10, kSsim_thresh),
+ MetricTestTParam(&compute_vpxssim, &compute_hbd_vpxssim,
+ 10, 10, kPhvs_thresh),
+ MetricTestTParam(&compute_vpxssim, &compute_hbd_vpxssim,
+ 8, 12, kSsim_thresh),
+ MetricTestTParam(&compute_vpxssim, &compute_hbd_vpxssim,
+ 12, 12, kPhvs_thresh)));
INSTANTIATE_TEST_CASE_P(
FASTSSIM, HBDMetricsTest,
- ::testing::Values(
- MetricTestTParam(&compute_fastssim, &compute_hbd_fastssim, 8, 10,
- kFSsim_thresh),
- MetricTestTParam(&compute_fastssim, &compute_hbd_fastssim, 10, 10,
- kFSsim_thresh),
- MetricTestTParam(&compute_fastssim, &compute_hbd_fastssim, 8, 12,
- kFSsim_thresh),
- MetricTestTParam(&compute_fastssim, &compute_hbd_fastssim, 12, 12,
- kFSsim_thresh)));
+ ::testing::Values(MetricTestTParam(&compute_fastssim, &compute_hbd_fastssim,
+ 8, 10, kFSsim_thresh),
+ MetricTestTParam(&compute_fastssim, &compute_hbd_fastssim,
+ 10, 10, kFSsim_thresh),
+ MetricTestTParam(&compute_fastssim, &compute_hbd_fastssim,
+ 8, 12, kFSsim_thresh),
+ MetricTestTParam(&compute_fastssim, &compute_hbd_fastssim,
+ 12, 12, kFSsim_thresh)));
INSTANTIATE_TEST_CASE_P(
PSNRHVS, HBDMetricsTest,
- ::testing::Values(
- MetricTestTParam(&compute_psnrhvs, &compute_hbd_psnrhvs, 8, 10,
- kPhvs_thresh),
- MetricTestTParam(&compute_psnrhvs, &compute_hbd_psnrhvs, 10, 10,
- kPhvs_thresh),
- MetricTestTParam(&compute_psnrhvs, &compute_hbd_psnrhvs, 8, 12,
- kPhvs_thresh),
- MetricTestTParam(&compute_psnrhvs, &compute_hbd_psnrhvs, 12, 12,
- kPhvs_thresh)));
+ ::testing::Values(MetricTestTParam(&compute_psnrhvs, &compute_hbd_psnrhvs,
+ 8, 10, kPhvs_thresh),
+ MetricTestTParam(&compute_psnrhvs, &compute_hbd_psnrhvs,
+ 10, 10, kPhvs_thresh),
+ MetricTestTParam(&compute_psnrhvs, &compute_hbd_psnrhvs,
+ 8, 12, kPhvs_thresh),
+ MetricTestTParam(&compute_psnrhvs, &compute_hbd_psnrhvs,
+ 12, 12, kPhvs_thresh)));
INSTANTIATE_TEST_CASE_P(
PSNR, HBDMetricsTest,
::testing::Values(
- MetricTestTParam(&compute_psnr, &compute_hbd_psnr, 8, 10,
- kPhvs_thresh),
+ MetricTestTParam(&compute_psnr, &compute_hbd_psnr, 8, 10, kPhvs_thresh),
MetricTestTParam(&compute_psnr, &compute_hbd_psnr, 10, 10,
kPhvs_thresh),
- MetricTestTParam(&compute_psnr, &compute_hbd_psnr, 8, 12,
- kPhvs_thresh),
+ MetricTestTParam(&compute_psnr, &compute_hbd_psnr, 8, 12, kPhvs_thresh),
MetricTestTParam(&compute_psnr, &compute_hbd_psnr, 12, 12,
kPhvs_thresh)));
} // namespace
-
diff --git a/test/i420_video_source.h b/test/i420_video_source.h
index 0a18480..4957382 100644
--- a/test/i420_video_source.h
+++ b/test/i420_video_source.h
@@ -21,14 +21,11 @@
// so that we can do actual file encodes.
class I420VideoSource : public YUVVideoSource {
public:
- I420VideoSource(const std::string &file_name,
- unsigned int width, unsigned int height,
- int rate_numerator, int rate_denominator,
+ I420VideoSource(const std::string &file_name, unsigned int width,
+ unsigned int height, int rate_numerator, int rate_denominator,
unsigned int start, int limit)
- : YUVVideoSource(file_name, VPX_IMG_FMT_I420,
- width, height,
- rate_numerator, rate_denominator,
- start, limit) {}
+ : YUVVideoSource(file_name, VPX_IMG_FMT_I420, width, height,
+ rate_numerator, rate_denominator, start, limit) {}
};
} // namespace libvpx_test
diff --git a/test/idct8x8_test.cc b/test/idct8x8_test.cc
index 04487c4..ee75805 100644
--- a/test/idct8x8_test.cc
+++ b/test/idct8x8_test.cc
@@ -29,9 +29,8 @@
for (int k = 0; k < 8; k++) {
output[k] = 0.0;
for (int n = 0; n < 8; n++)
- output[k] += input[n]*cos(kPi*(2*n+1)*k/16.0);
- if (k == 0)
- output[k] = output[k]*kInvSqrt2;
+ output[k] += input[n] * cos(kPi * (2 * n + 1) * k / 16.0);
+ if (k == 0) output[k] = output[k] * kInvSqrt2;
}
}
@@ -39,24 +38,19 @@
// First transform columns
for (int i = 0; i < 8; ++i) {
double temp_in[8], temp_out[8];
- for (int j = 0; j < 8; ++j)
- temp_in[j] = input[j*8 + i];
+ for (int j = 0; j < 8; ++j) temp_in[j] = input[j * 8 + i];
reference_dct_1d(temp_in, temp_out);
- for (int j = 0; j < 8; ++j)
- output[j*8 + i] = temp_out[j];
+ for (int j = 0; j < 8; ++j) output[j * 8 + i] = temp_out[j];
}
// Then transform rows
for (int i = 0; i < 8; ++i) {
double temp_in[8], temp_out[8];
- for (int j = 0; j < 8; ++j)
- temp_in[j] = output[j + i*8];
+ for (int j = 0; j < 8; ++j) temp_in[j] = output[j + i * 8];
reference_dct_1d(temp_in, temp_out);
- for (int j = 0; j < 8; ++j)
- output[j + i*8] = temp_out[j];
+ for (int j = 0; j < 8; ++j) output[j + i * 8] = temp_out[j];
}
// Scale by some magic number
- for (int i = 0; i < 64; ++i)
- output[i] *= 2;
+ for (int i = 0; i < 64; ++i) output[i] *= 2;
}
TEST(VP9Idct8x8Test, AccuracyCheck) {
@@ -73,8 +67,7 @@
dst[j] = rnd.Rand8();
}
// Initialize a test block with input range [-255, 255].
- for (int j = 0; j < 64; ++j)
- input[j] = src[j] - dst[j];
+ for (int j = 0; j < 64; ++j) input[j] = src[j] - dst[j];
reference_dct_2d(input, output_r);
for (int j = 0; j < 64; ++j)
@@ -83,9 +76,8 @@
for (int j = 0; j < 64; ++j) {
const int diff = dst[j] - src[j];
const int error = diff * diff;
- EXPECT_GE(1, error)
- << "Error: 8x8 FDCT/IDCT has error " << error
- << " at index " << j;
+ EXPECT_GE(1, error) << "Error: 8x8 FDCT/IDCT has error " << error
+ << " at index " << j;
}
}
}
diff --git a/test/intrapred_test.cc b/test/intrapred_test.cc
index 4ff1ccb..5d0cb58 100644
--- a/test/intrapred_test.cc
+++ b/test/intrapred_test.cc
@@ -28,15 +28,14 @@
const int count_test_block = 100000;
-typedef void (*IntraPred)(uint16_t* dst, ptrdiff_t stride,
- const uint16_t* above, const uint16_t* left,
- int bps);
+typedef void (*IntraPred)(uint16_t *dst, ptrdiff_t stride,
+ const uint16_t *above, const uint16_t *left, int bps);
struct IntraPredFunc {
IntraPredFunc(IntraPred pred = NULL, IntraPred ref = NULL,
int block_size_value = 0, int bit_depth_value = 0)
- : pred_fn(pred), ref_fn(ref),
- block_size(block_size_value), bit_depth(bit_depth_value) {}
+ : pred_fn(pred), ref_fn(ref), block_size(block_size_value),
+ bit_depth(bit_depth_value) {}
IntraPred pred_fn;
IntraPred ref_fn;
@@ -46,8 +45,8 @@
class VP9IntraPredTest : public ::testing::TestWithParam<IntraPredFunc> {
public:
- void RunTest(uint16_t* left_col, uint16_t* above_data,
- uint16_t* dst, uint16_t* ref_dst) {
+ void RunTest(uint16_t *left_col, uint16_t *above_data, uint16_t *dst,
+ uint16_t *ref_dst) {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int block_size = params_.block_size;
above_row_ = above_data + 16;
@@ -81,14 +80,14 @@
virtual void SetUp() {
params_ = GetParam();
stride_ = params_.block_size * 3;
- mask_ = (1 << params_.bit_depth) - 1;
+ mask_ = (1 << params_.bit_depth) - 1;
}
void Predict() {
const int bit_depth = params_.bit_depth;
params_.ref_fn(ref_dst_, stride_, above_row_, left_col_, bit_depth);
- ASM_REGISTER_STATE_CHECK(params_.pred_fn(dst_, stride_,
- above_row_, left_col_, bit_depth));
+ ASM_REGISTER_STATE_CHECK(
+ params_.pred_fn(dst_, stride_, above_row_, left_col_, bit_depth));
}
void CheckPrediction(int test_case_number, int *error_count) const {
@@ -99,7 +98,7 @@
*error_count += ref_dst_[x + y * stride_] != dst_[x + y * stride_];
if (*error_count == 1) {
ASSERT_EQ(ref_dst_[x + y * stride_], dst_[x + y * stride_])
- << " Failed on Test Case Number "<< test_case_number;
+ << " Failed on Test Case Number " << test_case_number;
}
}
}
@@ -117,8 +116,8 @@
TEST_P(VP9IntraPredTest, IntraPredTests) {
// max block size is 32
- DECLARE_ALIGNED(16, uint16_t, left_col[2*32]);
- DECLARE_ALIGNED(16, uint16_t, above_data[2*32+32]);
+ DECLARE_ALIGNED(16, uint16_t, left_col[2 * 32]);
+ DECLARE_ALIGNED(16, uint16_t, above_data[2 * 32 + 32]);
DECLARE_ALIGNED(16, uint16_t, dst[3 * 32 * 32]);
DECLARE_ALIGNED(16, uint16_t, ref_dst[3 * 32 * 32]);
RunTest(left_col, above_data, dst, ref_dst);
@@ -126,86 +125,86 @@
#if HAVE_SSE2
#if CONFIG_VP9_HIGHBITDEPTH
-INSTANTIATE_TEST_CASE_P(SSE2_TO_C_8, VP9IntraPredTest,
- ::testing::Values(
- IntraPredFunc(&vpx_highbd_dc_predictor_32x32_sse2,
- &vpx_highbd_dc_predictor_32x32_c, 32, 8),
- IntraPredFunc(&vpx_highbd_tm_predictor_16x16_sse2,
- &vpx_highbd_tm_predictor_16x16_c, 16, 8),
- IntraPredFunc(&vpx_highbd_tm_predictor_32x32_sse2,
- &vpx_highbd_tm_predictor_32x32_c, 32, 8),
- IntraPredFunc(&vpx_highbd_dc_predictor_4x4_sse2,
- &vpx_highbd_dc_predictor_4x4_c, 4, 8),
- IntraPredFunc(&vpx_highbd_dc_predictor_8x8_sse2,
- &vpx_highbd_dc_predictor_8x8_c, 8, 8),
- IntraPredFunc(&vpx_highbd_dc_predictor_16x16_sse2,
- &vpx_highbd_dc_predictor_16x16_c, 16, 8),
- IntraPredFunc(&vpx_highbd_v_predictor_4x4_sse2,
- &vpx_highbd_v_predictor_4x4_c, 4, 8),
- IntraPredFunc(&vpx_highbd_v_predictor_8x8_sse2,
- &vpx_highbd_v_predictor_8x8_c, 8, 8),
- IntraPredFunc(&vpx_highbd_v_predictor_16x16_sse2,
- &vpx_highbd_v_predictor_16x16_c, 16, 8),
- IntraPredFunc(&vpx_highbd_v_predictor_32x32_sse2,
- &vpx_highbd_v_predictor_32x32_c, 32, 8),
- IntraPredFunc(&vpx_highbd_tm_predictor_4x4_sse2,
- &vpx_highbd_tm_predictor_4x4_c, 4, 8),
- IntraPredFunc(&vpx_highbd_tm_predictor_8x8_sse2,
- &vpx_highbd_tm_predictor_8x8_c, 8, 8)));
+INSTANTIATE_TEST_CASE_P(
+ SSE2_TO_C_8, VP9IntraPredTest,
+ ::testing::Values(IntraPredFunc(&vpx_highbd_dc_predictor_32x32_sse2,
+ &vpx_highbd_dc_predictor_32x32_c, 32, 8),
+ IntraPredFunc(&vpx_highbd_tm_predictor_16x16_sse2,
+ &vpx_highbd_tm_predictor_16x16_c, 16, 8),
+ IntraPredFunc(&vpx_highbd_tm_predictor_32x32_sse2,
+ &vpx_highbd_tm_predictor_32x32_c, 32, 8),
+ IntraPredFunc(&vpx_highbd_dc_predictor_4x4_sse2,
+ &vpx_highbd_dc_predictor_4x4_c, 4, 8),
+ IntraPredFunc(&vpx_highbd_dc_predictor_8x8_sse2,
+ &vpx_highbd_dc_predictor_8x8_c, 8, 8),
+ IntraPredFunc(&vpx_highbd_dc_predictor_16x16_sse2,
+ &vpx_highbd_dc_predictor_16x16_c, 16, 8),
+ IntraPredFunc(&vpx_highbd_v_predictor_4x4_sse2,
+ &vpx_highbd_v_predictor_4x4_c, 4, 8),
+ IntraPredFunc(&vpx_highbd_v_predictor_8x8_sse2,
+ &vpx_highbd_v_predictor_8x8_c, 8, 8),
+ IntraPredFunc(&vpx_highbd_v_predictor_16x16_sse2,
+ &vpx_highbd_v_predictor_16x16_c, 16, 8),
+ IntraPredFunc(&vpx_highbd_v_predictor_32x32_sse2,
+ &vpx_highbd_v_predictor_32x32_c, 32, 8),
+ IntraPredFunc(&vpx_highbd_tm_predictor_4x4_sse2,
+ &vpx_highbd_tm_predictor_4x4_c, 4, 8),
+ IntraPredFunc(&vpx_highbd_tm_predictor_8x8_sse2,
+ &vpx_highbd_tm_predictor_8x8_c, 8, 8)));
-INSTANTIATE_TEST_CASE_P(SSE2_TO_C_10, VP9IntraPredTest,
- ::testing::Values(
- IntraPredFunc(&vpx_highbd_dc_predictor_32x32_sse2,
- &vpx_highbd_dc_predictor_32x32_c, 32, 10),
- IntraPredFunc(&vpx_highbd_tm_predictor_16x16_sse2,
- &vpx_highbd_tm_predictor_16x16_c, 16, 10),
- IntraPredFunc(&vpx_highbd_tm_predictor_32x32_sse2,
- &vpx_highbd_tm_predictor_32x32_c, 32, 10),
- IntraPredFunc(&vpx_highbd_dc_predictor_4x4_sse2,
- &vpx_highbd_dc_predictor_4x4_c, 4, 10),
- IntraPredFunc(&vpx_highbd_dc_predictor_8x8_sse2,
- &vpx_highbd_dc_predictor_8x8_c, 8, 10),
- IntraPredFunc(&vpx_highbd_dc_predictor_16x16_sse2,
- &vpx_highbd_dc_predictor_16x16_c, 16, 10),
- IntraPredFunc(&vpx_highbd_v_predictor_4x4_sse2,
- &vpx_highbd_v_predictor_4x4_c, 4, 10),
- IntraPredFunc(&vpx_highbd_v_predictor_8x8_sse2,
- &vpx_highbd_v_predictor_8x8_c, 8, 10),
- IntraPredFunc(&vpx_highbd_v_predictor_16x16_sse2,
- &vpx_highbd_v_predictor_16x16_c, 16, 10),
- IntraPredFunc(&vpx_highbd_v_predictor_32x32_sse2,
- &vpx_highbd_v_predictor_32x32_c, 32, 10),
- IntraPredFunc(&vpx_highbd_tm_predictor_4x4_sse2,
- &vpx_highbd_tm_predictor_4x4_c, 4, 10),
- IntraPredFunc(&vpx_highbd_tm_predictor_8x8_sse2,
- &vpx_highbd_tm_predictor_8x8_c, 8, 10)));
+INSTANTIATE_TEST_CASE_P(
+ SSE2_TO_C_10, VP9IntraPredTest,
+ ::testing::Values(IntraPredFunc(&vpx_highbd_dc_predictor_32x32_sse2,
+ &vpx_highbd_dc_predictor_32x32_c, 32, 10),
+ IntraPredFunc(&vpx_highbd_tm_predictor_16x16_sse2,
+ &vpx_highbd_tm_predictor_16x16_c, 16, 10),
+ IntraPredFunc(&vpx_highbd_tm_predictor_32x32_sse2,
+ &vpx_highbd_tm_predictor_32x32_c, 32, 10),
+ IntraPredFunc(&vpx_highbd_dc_predictor_4x4_sse2,
+ &vpx_highbd_dc_predictor_4x4_c, 4, 10),
+ IntraPredFunc(&vpx_highbd_dc_predictor_8x8_sse2,
+ &vpx_highbd_dc_predictor_8x8_c, 8, 10),
+ IntraPredFunc(&vpx_highbd_dc_predictor_16x16_sse2,
+ &vpx_highbd_dc_predictor_16x16_c, 16, 10),
+ IntraPredFunc(&vpx_highbd_v_predictor_4x4_sse2,
+ &vpx_highbd_v_predictor_4x4_c, 4, 10),
+ IntraPredFunc(&vpx_highbd_v_predictor_8x8_sse2,
+ &vpx_highbd_v_predictor_8x8_c, 8, 10),
+ IntraPredFunc(&vpx_highbd_v_predictor_16x16_sse2,
+ &vpx_highbd_v_predictor_16x16_c, 16, 10),
+ IntraPredFunc(&vpx_highbd_v_predictor_32x32_sse2,
+ &vpx_highbd_v_predictor_32x32_c, 32, 10),
+ IntraPredFunc(&vpx_highbd_tm_predictor_4x4_sse2,
+ &vpx_highbd_tm_predictor_4x4_c, 4, 10),
+ IntraPredFunc(&vpx_highbd_tm_predictor_8x8_sse2,
+ &vpx_highbd_tm_predictor_8x8_c, 8, 10)));
-INSTANTIATE_TEST_CASE_P(SSE2_TO_C_12, VP9IntraPredTest,
- ::testing::Values(
- IntraPredFunc(&vpx_highbd_dc_predictor_32x32_sse2,
- &vpx_highbd_dc_predictor_32x32_c, 32, 12),
- IntraPredFunc(&vpx_highbd_tm_predictor_16x16_sse2,
- &vpx_highbd_tm_predictor_16x16_c, 16, 12),
- IntraPredFunc(&vpx_highbd_tm_predictor_32x32_sse2,
- &vpx_highbd_tm_predictor_32x32_c, 32, 12),
- IntraPredFunc(&vpx_highbd_dc_predictor_4x4_sse2,
- &vpx_highbd_dc_predictor_4x4_c, 4, 12),
- IntraPredFunc(&vpx_highbd_dc_predictor_8x8_sse2,
- &vpx_highbd_dc_predictor_8x8_c, 8, 12),
- IntraPredFunc(&vpx_highbd_dc_predictor_16x16_sse2,
- &vpx_highbd_dc_predictor_16x16_c, 16, 12),
- IntraPredFunc(&vpx_highbd_v_predictor_4x4_sse2,
- &vpx_highbd_v_predictor_4x4_c, 4, 12),
- IntraPredFunc(&vpx_highbd_v_predictor_8x8_sse2,
- &vpx_highbd_v_predictor_8x8_c, 8, 12),
- IntraPredFunc(&vpx_highbd_v_predictor_16x16_sse2,
- &vpx_highbd_v_predictor_16x16_c, 16, 12),
- IntraPredFunc(&vpx_highbd_v_predictor_32x32_sse2,
- &vpx_highbd_v_predictor_32x32_c, 32, 12),
- IntraPredFunc(&vpx_highbd_tm_predictor_4x4_sse2,
- &vpx_highbd_tm_predictor_4x4_c, 4, 12),
- IntraPredFunc(&vpx_highbd_tm_predictor_8x8_sse2,
- &vpx_highbd_tm_predictor_8x8_c, 8, 12)));
+INSTANTIATE_TEST_CASE_P(
+ SSE2_TO_C_12, VP9IntraPredTest,
+ ::testing::Values(IntraPredFunc(&vpx_highbd_dc_predictor_32x32_sse2,
+ &vpx_highbd_dc_predictor_32x32_c, 32, 12),
+ IntraPredFunc(&vpx_highbd_tm_predictor_16x16_sse2,
+ &vpx_highbd_tm_predictor_16x16_c, 16, 12),
+ IntraPredFunc(&vpx_highbd_tm_predictor_32x32_sse2,
+ &vpx_highbd_tm_predictor_32x32_c, 32, 12),
+ IntraPredFunc(&vpx_highbd_dc_predictor_4x4_sse2,
+ &vpx_highbd_dc_predictor_4x4_c, 4, 12),
+ IntraPredFunc(&vpx_highbd_dc_predictor_8x8_sse2,
+ &vpx_highbd_dc_predictor_8x8_c, 8, 12),
+ IntraPredFunc(&vpx_highbd_dc_predictor_16x16_sse2,
+ &vpx_highbd_dc_predictor_16x16_c, 16, 12),
+ IntraPredFunc(&vpx_highbd_v_predictor_4x4_sse2,
+ &vpx_highbd_v_predictor_4x4_c, 4, 12),
+ IntraPredFunc(&vpx_highbd_v_predictor_8x8_sse2,
+ &vpx_highbd_v_predictor_8x8_c, 8, 12),
+ IntraPredFunc(&vpx_highbd_v_predictor_16x16_sse2,
+ &vpx_highbd_v_predictor_16x16_c, 16, 12),
+ IntraPredFunc(&vpx_highbd_v_predictor_32x32_sse2,
+ &vpx_highbd_v_predictor_32x32_c, 32, 12),
+ IntraPredFunc(&vpx_highbd_tm_predictor_4x4_sse2,
+ &vpx_highbd_tm_predictor_4x4_c, 4, 12),
+ IntraPredFunc(&vpx_highbd_tm_predictor_8x8_sse2,
+ &vpx_highbd_tm_predictor_8x8_c, 8, 12)));
#endif // CONFIG_VP9_HIGHBITDEPTH
#endif // HAVE_SSE2
diff --git a/test/ivf_video_source.h b/test/ivf_video_source.h
index 824a39d..b87624a 100644
--- a/test/ivf_video_source.h
+++ b/test/ivf_video_source.h
@@ -29,19 +29,13 @@
class IVFVideoSource : public CompressedVideoSource {
public:
explicit IVFVideoSource(const std::string &file_name)
- : file_name_(file_name),
- input_file_(NULL),
- compressed_frame_buf_(NULL),
- frame_sz_(0),
- frame_(0),
- end_of_file_(false) {
- }
+ : file_name_(file_name), input_file_(NULL), compressed_frame_buf_(NULL),
+ frame_sz_(0), frame_(0), end_of_file_(false) {}
virtual ~IVFVideoSource() {
delete[] compressed_frame_buf_;
- if (input_file_)
- fclose(input_file_);
+ if (input_file_) fclose(input_file_);
}
virtual void Init() {
@@ -54,15 +48,16 @@
virtual void Begin() {
input_file_ = OpenTestDataFile(file_name_);
ASSERT_TRUE(input_file_ != NULL) << "Input file open failed. Filename: "
- << file_name_;
+ << file_name_;
// Read file header
uint8_t file_hdr[kIvfFileHdrSize];
ASSERT_EQ(kIvfFileHdrSize, fread(file_hdr, 1, kIvfFileHdrSize, input_file_))
<< "File header read failed.";
// Check file header
- ASSERT_TRUE(file_hdr[0] == 'D' && file_hdr[1] == 'K' && file_hdr[2] == 'I'
- && file_hdr[3] == 'F') << "Input is not an IVF file.";
+ ASSERT_TRUE(file_hdr[0] == 'D' && file_hdr[1] == 'K' &&
+ file_hdr[2] == 'I' && file_hdr[3] == 'F')
+ << "Input is not an IVF file.";
FillFrame();
}
@@ -76,8 +71,8 @@
ASSERT_TRUE(input_file_ != NULL);
uint8_t frame_hdr[kIvfFrameHdrSize];
// Check frame header and read a frame from input_file.
- if (fread(frame_hdr, 1, kIvfFrameHdrSize, input_file_)
- != kIvfFrameHdrSize) {
+ if (fread(frame_hdr, 1, kIvfFrameHdrSize, input_file_) !=
+ kIvfFrameHdrSize) {
end_of_file_ = true;
} else {
end_of_file_ = false;
diff --git a/test/level_test.cc b/test/level_test.cc
index 62d0247..fbbb539 100644
--- a/test/level_test.cc
+++ b/test/level_test.cc
@@ -19,12 +19,9 @@
public ::libvpx_test::CodecTestWith2Params<libvpx_test::TestMode, int> {
protected:
LevelTest()
- : EncoderTest(GET_PARAM(0)),
- encoding_mode_(GET_PARAM(1)),
- cpu_used_(GET_PARAM(2)),
- min_gf_internal_(24),
- target_level_(0),
- level_(0) {}
+ : EncoderTest(GET_PARAM(0)), encoding_mode_(GET_PARAM(1)),
+ cpu_used_(GET_PARAM(2)), min_gf_internal_(24), target_level_(0),
+ level_(0) {}
virtual ~LevelTest() {}
virtual void SetUp() {
diff --git a/test/lossless_test.cc b/test/lossless_test.cc
index 1584979..c7f1963 100644
--- a/test/lossless_test.cc
+++ b/test/lossless_test.cc
@@ -21,15 +21,13 @@
const int kMaxPsnr = 100;
-class LosslessTestLarge : public ::libvpx_test::EncoderTest,
- public ::libvpx_test::CodecTestWithParam<libvpx_test::TestMode> {
+class LosslessTestLarge
+ : public ::libvpx_test::EncoderTest,
+ public ::libvpx_test::CodecTestWithParam<libvpx_test::TestMode> {
protected:
LosslessTestLarge()
- : EncoderTest(GET_PARAM(0)),
- psnr_(kMaxPsnr),
- nframes_(0),
- encoding_mode_(GET_PARAM(1)) {
- }
+ : EncoderTest(GET_PARAM(0)), psnr_(kMaxPsnr), nframes_(0),
+ encoding_mode_(GET_PARAM(1)) {}
virtual ~LosslessTestLarge() {}
@@ -55,13 +53,10 @@
}
virtual void PSNRPktHook(const vpx_codec_cx_pkt_t *pkt) {
- if (pkt->data.psnr.psnr[0] < psnr_)
- psnr_= pkt->data.psnr.psnr[0];
+ if (pkt->data.psnr.psnr[0] < psnr_) psnr_ = pkt->data.psnr.psnr[0];
}
- double GetMinPsnr() const {
- return psnr_;
- }
+ double GetMinPsnr() const { return psnr_; }
private:
double psnr_;
@@ -123,7 +118,6 @@
EXPECT_GE(psnr_lossless, kMaxPsnr);
}
-
VP10_INSTANTIATE_TEST_CASE(LosslessTestLarge,
::testing::Values(::libvpx_test::kOnePassGood,
::libvpx_test::kTwoPassGood));
diff --git a/test/lpf_8_test.cc b/test/lpf_8_test.cc
index b14352c..8aaab23 100644
--- a/test/lpf_8_test.cc
+++ b/test/lpf_8_test.cc
@@ -36,8 +36,7 @@
#if CONFIG_VP9_HIGHBITDEPTH
typedef void (*loop_op_t)(uint16_t *s, int p, const uint8_t *blimit,
- const uint8_t *limit, const uint8_t *thresh,
- int bd);
+ const uint8_t *limit, const uint8_t *thresh, int bd);
typedef void (*dual_loop_op_t)(uint16_t *s, int p, const uint8_t *blimit0,
const uint8_t *limit0, const uint8_t *thresh0,
const uint8_t *blimit1, const uint8_t *limit1,
@@ -108,21 +107,18 @@
for (int i = 0; i < count_test_block; ++i) {
int err_count = 0;
uint8_t tmp = static_cast<uint8_t>(rnd(3 * MAX_LOOP_FILTER + 4));
- DECLARE_ALIGNED(16, const uint8_t, blimit[16]) = {
- tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
- tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
- };
+ DECLARE_ALIGNED(16, const uint8_t,
+ blimit[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
+ tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp };
tmp = static_cast<uint8_t>(rnd(MAX_LOOP_FILTER));
- DECLARE_ALIGNED(16, const uint8_t, limit[16]) = {
- tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
- tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
- };
+ DECLARE_ALIGNED(16, const uint8_t,
+ limit[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
+ tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp };
tmp = rnd.Rand8();
- DECLARE_ALIGNED(16, const uint8_t, thresh[16]) = {
- tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
- tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
- };
- int32_t p = kNumCoeffs/32;
+ DECLARE_ALIGNED(16, const uint8_t,
+ thresh[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
+ tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp };
+ int32_t p = kNumCoeffs / 32;
uint16_t tmp_s[kNumCoeffs];
int j = 0;
@@ -158,7 +154,7 @@
ASM_REGISTER_STATE_CHECK(
loopfilter_op_(s + 8 + p * 8, p, blimit, limit, thresh, bd));
#else
- ref_loopfilter_op_(ref_s+8+p*8, p, blimit, limit, thresh);
+ ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit, limit, thresh);
ASM_REGISTER_STATE_CHECK(
loopfilter_op_(s + 8 + p * 8, p, blimit, limit, thresh));
#endif // CONFIG_VP9_HIGHBITDEPTH
@@ -206,20 +202,17 @@
for (int i = 0; i < count_test_block; ++i) {
int err_count = 0;
uint8_t tmp = static_cast<uint8_t>(rnd(3 * MAX_LOOP_FILTER + 4));
- DECLARE_ALIGNED(16, const uint8_t, blimit[16]) = {
- tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
- tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
- };
+ DECLARE_ALIGNED(16, const uint8_t,
+ blimit[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
+ tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp };
tmp = static_cast<uint8_t>(rnd(MAX_LOOP_FILTER));
- DECLARE_ALIGNED(16, const uint8_t, limit[16]) = {
- tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
- tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
- };
+ DECLARE_ALIGNED(16, const uint8_t,
+ limit[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
+ tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp };
tmp = rnd.Rand8();
- DECLARE_ALIGNED(16, const uint8_t, thresh[16]) = {
- tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
- tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
- };
+ DECLARE_ALIGNED(16, const uint8_t,
+ thresh[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
+ tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp };
int32_t p = kNumCoeffs / 32;
for (int j = 0; j < kNumCoeffs; ++j) {
s[j] = rnd.Rand16() & mask_;
@@ -230,7 +223,7 @@
ASM_REGISTER_STATE_CHECK(
loopfilter_op_(s + 8 + p * 8, p, blimit, limit, thresh, bd));
#else
- ref_loopfilter_op_(ref_s+8+p*8, p, blimit, limit, thresh);
+ ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit, limit, thresh);
ASM_REGISTER_STATE_CHECK(
loopfilter_op_(s + 8 + p * 8, p, blimit, limit, thresh));
#endif // CONFIG_VP9_HIGHBITDEPTH
@@ -256,43 +249,37 @@
DECLARE_ALIGNED(16, uint16_t, s[kNumCoeffs]);
DECLARE_ALIGNED(16, uint16_t, ref_s[kNumCoeffs]);
#else
- DECLARE_ALIGNED(8, uint8_t, s[kNumCoeffs]);
- DECLARE_ALIGNED(8, uint8_t, ref_s[kNumCoeffs]);
+ DECLARE_ALIGNED(8, uint8_t, s[kNumCoeffs]);
+ DECLARE_ALIGNED(8, uint8_t, ref_s[kNumCoeffs]);
#endif // CONFIG_VP9_HIGHBITDEPTH
int err_count_total = 0;
int first_failure = -1;
for (int i = 0; i < count_test_block; ++i) {
int err_count = 0;
uint8_t tmp = static_cast<uint8_t>(rnd(3 * MAX_LOOP_FILTER + 4));
- DECLARE_ALIGNED(16, const uint8_t, blimit0[16]) = {
- tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
- tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
- };
+ DECLARE_ALIGNED(16, const uint8_t,
+ blimit0[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
+ tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp };
tmp = static_cast<uint8_t>(rnd(MAX_LOOP_FILTER));
- DECLARE_ALIGNED(16, const uint8_t, limit0[16]) = {
- tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
- tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
- };
+ DECLARE_ALIGNED(16, const uint8_t,
+ limit0[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
+ tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp };
tmp = rnd.Rand8();
- DECLARE_ALIGNED(16, const uint8_t, thresh0[16]) = {
- tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
- tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
- };
+ DECLARE_ALIGNED(16, const uint8_t,
+ thresh0[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
+ tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp };
tmp = static_cast<uint8_t>(rnd(3 * MAX_LOOP_FILTER + 4));
- DECLARE_ALIGNED(16, const uint8_t, blimit1[16]) = {
- tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
- tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
- };
+ DECLARE_ALIGNED(16, const uint8_t,
+ blimit1[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
+ tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp };
tmp = static_cast<uint8_t>(rnd(MAX_LOOP_FILTER));
- DECLARE_ALIGNED(16, const uint8_t, limit1[16]) = {
- tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
- tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
- };
+ DECLARE_ALIGNED(16, const uint8_t,
+ limit1[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
+ tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp };
tmp = rnd.Rand8();
- DECLARE_ALIGNED(16, const uint8_t, thresh1[16]) = {
- tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
- tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
- };
+ DECLARE_ALIGNED(16, const uint8_t,
+ thresh1[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
+ tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp };
int32_t p = kNumCoeffs / 32;
uint16_t tmp_s[kNumCoeffs];
int j = 0;
@@ -325,17 +312,16 @@
ref_s[j] = s[j];
}
#if CONFIG_VP9_HIGHBITDEPTH
- ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit0, limit0, thresh0,
- blimit1, limit1, thresh1, bd);
- ASM_REGISTER_STATE_CHECK(
- loopfilter_op_(s + 8 + p * 8, p, blimit0, limit0, thresh0,
- blimit1, limit1, thresh1, bd));
+ ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit0, limit0, thresh0, blimit1,
+ limit1, thresh1, bd);
+ ASM_REGISTER_STATE_CHECK(loopfilter_op_(s + 8 + p * 8, p, blimit0, limit0,
+ thresh0, blimit1, limit1, thresh1,
+ bd));
#else
- ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit0, limit0, thresh0,
- blimit1, limit1, thresh1);
- ASM_REGISTER_STATE_CHECK(
- loopfilter_op_(s + 8 + p * 8, p, blimit0, limit0, thresh0,
- blimit1, limit1, thresh1));
+ ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit0, limit0, thresh0, blimit1,
+ limit1, thresh1);
+ ASM_REGISTER_STATE_CHECK(loopfilter_op_(s + 8 + p * 8, p, blimit0, limit0,
+ thresh0, blimit1, limit1, thresh1));
#endif // CONFIG_VP9_HIGHBITDEPTH
for (int j = 0; j < kNumCoeffs; ++j) {
err_count += ref_s[j] != s[j];
@@ -358,43 +344,37 @@
DECLARE_ALIGNED(16, uint16_t, s[kNumCoeffs]);
DECLARE_ALIGNED(16, uint16_t, ref_s[kNumCoeffs]);
#else
- DECLARE_ALIGNED(8, uint8_t, s[kNumCoeffs]);
- DECLARE_ALIGNED(8, uint8_t, ref_s[kNumCoeffs]);
+ DECLARE_ALIGNED(8, uint8_t, s[kNumCoeffs]);
+ DECLARE_ALIGNED(8, uint8_t, ref_s[kNumCoeffs]);
#endif // CONFIG_VP9_HIGHBITDEPTH
int err_count_total = 0;
int first_failure = -1;
for (int i = 0; i < count_test_block; ++i) {
int err_count = 0;
uint8_t tmp = static_cast<uint8_t>(rnd(3 * MAX_LOOP_FILTER + 4));
- DECLARE_ALIGNED(16, const uint8_t, blimit0[16]) = {
- tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
- tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
- };
+ DECLARE_ALIGNED(16, const uint8_t,
+ blimit0[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
+ tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp };
tmp = static_cast<uint8_t>(rnd(MAX_LOOP_FILTER));
- DECLARE_ALIGNED(16, const uint8_t, limit0[16]) = {
- tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
- tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
- };
+ DECLARE_ALIGNED(16, const uint8_t,
+ limit0[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
+ tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp };
tmp = rnd.Rand8();
- DECLARE_ALIGNED(16, const uint8_t, thresh0[16]) = {
- tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
- tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
- };
+ DECLARE_ALIGNED(16, const uint8_t,
+ thresh0[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
+ tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp };
tmp = static_cast<uint8_t>(rnd(3 * MAX_LOOP_FILTER + 4));
- DECLARE_ALIGNED(16, const uint8_t, blimit1[16]) = {
- tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
- tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
- };
+ DECLARE_ALIGNED(16, const uint8_t,
+ blimit1[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
+ tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp };
tmp = static_cast<uint8_t>(rnd(MAX_LOOP_FILTER));
- DECLARE_ALIGNED(16, const uint8_t, limit1[16]) = {
- tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
- tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
- };
+ DECLARE_ALIGNED(16, const uint8_t,
+ limit1[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
+ tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp };
tmp = rnd.Rand8();
- DECLARE_ALIGNED(16, const uint8_t, thresh1[16]) = {
- tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
- tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
- };
+ DECLARE_ALIGNED(16, const uint8_t,
+ thresh1[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
+ tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp };
int32_t p = kNumCoeffs / 32; // TODO(pdlf) can we have non-square here?
for (int j = 0; j < kNumCoeffs; ++j) {
s[j] = rnd.Rand16() & mask_;
@@ -402,17 +382,16 @@
}
#if CONFIG_VP9_HIGHBITDEPTH
const int32_t bd = bit_depth_;
- ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit0, limit0, thresh0,
- blimit1, limit1, thresh1, bd);
- ASM_REGISTER_STATE_CHECK(
- loopfilter_op_(s + 8 + p * 8, p, blimit0, limit0,
- thresh0, blimit1, limit1, thresh1, bd));
+ ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit0, limit0, thresh0, blimit1,
+ limit1, thresh1, bd);
+ ASM_REGISTER_STATE_CHECK(loopfilter_op_(s + 8 + p * 8, p, blimit0, limit0,
+ thresh0, blimit1, limit1, thresh1,
+ bd));
#else
- ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit0, limit0, thresh0,
- blimit1, limit1, thresh1);
- ASM_REGISTER_STATE_CHECK(
- loopfilter_op_(s + 8 + p * 8, p, blimit0, limit0, thresh0,
- blimit1, limit1, thresh1));
+ ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit0, limit0, thresh0, blimit1,
+ limit1, thresh1);
+ ASM_REGISTER_STATE_CHECK(loopfilter_op_(s + 8 + p * 8, p, blimit0, limit0,
+ thresh0, blimit1, limit1, thresh1));
#endif // CONFIG_VP9_HIGHBITDEPTH
for (int j = 0; j < kNumCoeffs; ++j) {
err_count += ref_s[j] != s[j];
@@ -434,129 +413,120 @@
#if CONFIG_VP9_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
SSE2, Loop8Test6Param,
- ::testing::Values(
- make_tuple(&vpx_highbd_lpf_horizontal_4_sse2,
- &vpx_highbd_lpf_horizontal_4_c, 8),
- make_tuple(&vpx_highbd_lpf_vertical_4_sse2,
- &vpx_highbd_lpf_vertical_4_c, 8),
- make_tuple(&vpx_highbd_lpf_horizontal_8_sse2,
- &vpx_highbd_lpf_horizontal_8_c, 8),
- make_tuple(&vpx_highbd_lpf_horizontal_edge_8_sse2,
- &vpx_highbd_lpf_horizontal_edge_8_c, 8),
- make_tuple(&vpx_highbd_lpf_horizontal_edge_16_sse2,
- &vpx_highbd_lpf_horizontal_edge_16_c, 8),
- make_tuple(&vpx_highbd_lpf_vertical_8_sse2,
- &vpx_highbd_lpf_vertical_8_c, 8),
- make_tuple(&vpx_highbd_lpf_vertical_16_sse2,
- &vpx_highbd_lpf_vertical_16_c, 8),
- make_tuple(&vpx_highbd_lpf_horizontal_4_sse2,
- &vpx_highbd_lpf_horizontal_4_c, 10),
- make_tuple(&vpx_highbd_lpf_vertical_4_sse2,
- &vpx_highbd_lpf_vertical_4_c, 10),
- make_tuple(&vpx_highbd_lpf_horizontal_8_sse2,
- &vpx_highbd_lpf_horizontal_8_c, 10),
- make_tuple(&vpx_highbd_lpf_horizontal_edge_8_sse2,
- &vpx_highbd_lpf_horizontal_edge_8_c, 10),
- make_tuple(&vpx_highbd_lpf_horizontal_edge_16_sse2,
- &vpx_highbd_lpf_horizontal_edge_16_c, 10),
- make_tuple(&vpx_highbd_lpf_vertical_8_sse2,
- &vpx_highbd_lpf_vertical_8_c, 10),
- make_tuple(&vpx_highbd_lpf_vertical_16_sse2,
- &vpx_highbd_lpf_vertical_16_c, 10),
- make_tuple(&vpx_highbd_lpf_horizontal_4_sse2,
- &vpx_highbd_lpf_horizontal_4_c, 12),
- make_tuple(&vpx_highbd_lpf_vertical_4_sse2,
- &vpx_highbd_lpf_vertical_4_c, 12),
- make_tuple(&vpx_highbd_lpf_horizontal_8_sse2,
- &vpx_highbd_lpf_horizontal_8_c, 12),
- make_tuple(&vpx_highbd_lpf_horizontal_edge_8_sse2,
- &vpx_highbd_lpf_horizontal_edge_8_c, 12),
- make_tuple(&vpx_highbd_lpf_horizontal_edge_16_sse2,
- &vpx_highbd_lpf_horizontal_edge_16_c, 12),
- make_tuple(&vpx_highbd_lpf_vertical_8_sse2,
- &vpx_highbd_lpf_vertical_8_c, 12),
- make_tuple(&vpx_highbd_lpf_vertical_16_sse2,
- &vpx_highbd_lpf_vertical_16_c, 12),
- make_tuple(&vpx_highbd_lpf_vertical_16_dual_sse2,
- &vpx_highbd_lpf_vertical_16_dual_c, 8),
- make_tuple(&vpx_highbd_lpf_vertical_16_dual_sse2,
- &vpx_highbd_lpf_vertical_16_dual_c, 10),
- make_tuple(&vpx_highbd_lpf_vertical_16_dual_sse2,
- &vpx_highbd_lpf_vertical_16_dual_c, 12)));
+ ::testing::Values(make_tuple(&vpx_highbd_lpf_horizontal_4_sse2,
+ &vpx_highbd_lpf_horizontal_4_c, 8),
+ make_tuple(&vpx_highbd_lpf_vertical_4_sse2,
+ &vpx_highbd_lpf_vertical_4_c, 8),
+ make_tuple(&vpx_highbd_lpf_horizontal_8_sse2,
+ &vpx_highbd_lpf_horizontal_8_c, 8),
+ make_tuple(&vpx_highbd_lpf_horizontal_edge_8_sse2,
+ &vpx_highbd_lpf_horizontal_edge_8_c, 8),
+ make_tuple(&vpx_highbd_lpf_horizontal_edge_16_sse2,
+ &vpx_highbd_lpf_horizontal_edge_16_c, 8),
+ make_tuple(&vpx_highbd_lpf_vertical_8_sse2,
+ &vpx_highbd_lpf_vertical_8_c, 8),
+ make_tuple(&vpx_highbd_lpf_vertical_16_sse2,
+ &vpx_highbd_lpf_vertical_16_c, 8),
+ make_tuple(&vpx_highbd_lpf_horizontal_4_sse2,
+ &vpx_highbd_lpf_horizontal_4_c, 10),
+ make_tuple(&vpx_highbd_lpf_vertical_4_sse2,
+ &vpx_highbd_lpf_vertical_4_c, 10),
+ make_tuple(&vpx_highbd_lpf_horizontal_8_sse2,
+ &vpx_highbd_lpf_horizontal_8_c, 10),
+ make_tuple(&vpx_highbd_lpf_horizontal_edge_8_sse2,
+ &vpx_highbd_lpf_horizontal_edge_8_c, 10),
+ make_tuple(&vpx_highbd_lpf_horizontal_edge_16_sse2,
+ &vpx_highbd_lpf_horizontal_edge_16_c, 10),
+ make_tuple(&vpx_highbd_lpf_vertical_8_sse2,
+ &vpx_highbd_lpf_vertical_8_c, 10),
+ make_tuple(&vpx_highbd_lpf_vertical_16_sse2,
+ &vpx_highbd_lpf_vertical_16_c, 10),
+ make_tuple(&vpx_highbd_lpf_horizontal_4_sse2,
+ &vpx_highbd_lpf_horizontal_4_c, 12),
+ make_tuple(&vpx_highbd_lpf_vertical_4_sse2,
+ &vpx_highbd_lpf_vertical_4_c, 12),
+ make_tuple(&vpx_highbd_lpf_horizontal_8_sse2,
+ &vpx_highbd_lpf_horizontal_8_c, 12),
+ make_tuple(&vpx_highbd_lpf_horizontal_edge_8_sse2,
+ &vpx_highbd_lpf_horizontal_edge_8_c, 12),
+ make_tuple(&vpx_highbd_lpf_horizontal_edge_16_sse2,
+ &vpx_highbd_lpf_horizontal_edge_16_c, 12),
+ make_tuple(&vpx_highbd_lpf_vertical_8_sse2,
+ &vpx_highbd_lpf_vertical_8_c, 12),
+ make_tuple(&vpx_highbd_lpf_vertical_16_sse2,
+ &vpx_highbd_lpf_vertical_16_c, 12),
+ make_tuple(&vpx_highbd_lpf_vertical_16_dual_sse2,
+ &vpx_highbd_lpf_vertical_16_dual_c, 8),
+ make_tuple(&vpx_highbd_lpf_vertical_16_dual_sse2,
+ &vpx_highbd_lpf_vertical_16_dual_c, 10),
+ make_tuple(&vpx_highbd_lpf_vertical_16_dual_sse2,
+ &vpx_highbd_lpf_vertical_16_dual_c, 12)));
#else
INSTANTIATE_TEST_CASE_P(
SSE2, Loop8Test6Param,
::testing::Values(
- make_tuple(&vpx_lpf_horizontal_4_sse2,
- &vpx_lpf_horizontal_4_c, 8),
- make_tuple(&vpx_lpf_horizontal_8_sse2,
- &vpx_lpf_horizontal_8_c, 8),
+ make_tuple(&vpx_lpf_horizontal_4_sse2, &vpx_lpf_horizontal_4_c, 8),
+ make_tuple(&vpx_lpf_horizontal_8_sse2, &vpx_lpf_horizontal_8_c, 8),
make_tuple(&vpx_lpf_horizontal_edge_8_sse2,
&vpx_lpf_horizontal_edge_8_c, 8),
make_tuple(&vpx_lpf_horizontal_edge_16_sse2,
&vpx_lpf_horizontal_edge_16_c, 8),
- make_tuple(&vpx_lpf_vertical_4_sse2,
- &vpx_lpf_vertical_4_c, 8),
- make_tuple(&vpx_lpf_vertical_8_sse2,
- &vpx_lpf_vertical_8_c, 8),
- make_tuple(&vpx_lpf_vertical_16_sse2,
- &vpx_lpf_vertical_16_c, 8),
- make_tuple(&vpx_lpf_vertical_16_dual_sse2,
- &vpx_lpf_vertical_16_dual_c, 8)));
+ make_tuple(&vpx_lpf_vertical_4_sse2, &vpx_lpf_vertical_4_c, 8),
+ make_tuple(&vpx_lpf_vertical_8_sse2, &vpx_lpf_vertical_8_c, 8),
+ make_tuple(&vpx_lpf_vertical_16_sse2, &vpx_lpf_vertical_16_c, 8),
+ make_tuple(&vpx_lpf_vertical_16_dual_sse2, &vpx_lpf_vertical_16_dual_c,
+ 8)));
#endif // CONFIG_VP9_HIGHBITDEPTH
#endif
#if HAVE_AVX2 && (!CONFIG_VP9_HIGHBITDEPTH)
INSTANTIATE_TEST_CASE_P(
AVX2, Loop8Test6Param,
- ::testing::Values(
- make_tuple(&vpx_lpf_horizontal_edge_8_avx2,
- &vpx_lpf_horizontal_edge_8_c, 8),
- make_tuple(&vpx_lpf_horizontal_edge_16_avx2,
- &vpx_lpf_horizontal_edge_16_c, 8)));
+ ::testing::Values(make_tuple(&vpx_lpf_horizontal_edge_8_avx2,
+ &vpx_lpf_horizontal_edge_8_c, 8),
+ make_tuple(&vpx_lpf_horizontal_edge_16_avx2,
+ &vpx_lpf_horizontal_edge_16_c, 8)));
#endif
#if HAVE_SSE2
#if CONFIG_VP9_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
SSE2, Loop8Test9Param,
- ::testing::Values(
- make_tuple(&vpx_highbd_lpf_horizontal_4_dual_sse2,
- &vpx_highbd_lpf_horizontal_4_dual_c, 8),
- make_tuple(&vpx_highbd_lpf_horizontal_8_dual_sse2,
- &vpx_highbd_lpf_horizontal_8_dual_c, 8),
- make_tuple(&vpx_highbd_lpf_vertical_4_dual_sse2,
- &vpx_highbd_lpf_vertical_4_dual_c, 8),
- make_tuple(&vpx_highbd_lpf_vertical_8_dual_sse2,
- &vpx_highbd_lpf_vertical_8_dual_c, 8),
- make_tuple(&vpx_highbd_lpf_horizontal_4_dual_sse2,
- &vpx_highbd_lpf_horizontal_4_dual_c, 10),
- make_tuple(&vpx_highbd_lpf_horizontal_8_dual_sse2,
- &vpx_highbd_lpf_horizontal_8_dual_c, 10),
- make_tuple(&vpx_highbd_lpf_vertical_4_dual_sse2,
- &vpx_highbd_lpf_vertical_4_dual_c, 10),
- make_tuple(&vpx_highbd_lpf_vertical_8_dual_sse2,
- &vpx_highbd_lpf_vertical_8_dual_c, 10),
- make_tuple(&vpx_highbd_lpf_horizontal_4_dual_sse2,
- &vpx_highbd_lpf_horizontal_4_dual_c, 12),
- make_tuple(&vpx_highbd_lpf_horizontal_8_dual_sse2,
- &vpx_highbd_lpf_horizontal_8_dual_c, 12),
- make_tuple(&vpx_highbd_lpf_vertical_4_dual_sse2,
- &vpx_highbd_lpf_vertical_4_dual_c, 12),
- make_tuple(&vpx_highbd_lpf_vertical_8_dual_sse2,
- &vpx_highbd_lpf_vertical_8_dual_c, 12)));
+ ::testing::Values(make_tuple(&vpx_highbd_lpf_horizontal_4_dual_sse2,
+ &vpx_highbd_lpf_horizontal_4_dual_c, 8),
+ make_tuple(&vpx_highbd_lpf_horizontal_8_dual_sse2,
+ &vpx_highbd_lpf_horizontal_8_dual_c, 8),
+ make_tuple(&vpx_highbd_lpf_vertical_4_dual_sse2,
+ &vpx_highbd_lpf_vertical_4_dual_c, 8),
+ make_tuple(&vpx_highbd_lpf_vertical_8_dual_sse2,
+ &vpx_highbd_lpf_vertical_8_dual_c, 8),
+ make_tuple(&vpx_highbd_lpf_horizontal_4_dual_sse2,
+ &vpx_highbd_lpf_horizontal_4_dual_c, 10),
+ make_tuple(&vpx_highbd_lpf_horizontal_8_dual_sse2,
+ &vpx_highbd_lpf_horizontal_8_dual_c, 10),
+ make_tuple(&vpx_highbd_lpf_vertical_4_dual_sse2,
+ &vpx_highbd_lpf_vertical_4_dual_c, 10),
+ make_tuple(&vpx_highbd_lpf_vertical_8_dual_sse2,
+ &vpx_highbd_lpf_vertical_8_dual_c, 10),
+ make_tuple(&vpx_highbd_lpf_horizontal_4_dual_sse2,
+ &vpx_highbd_lpf_horizontal_4_dual_c, 12),
+ make_tuple(&vpx_highbd_lpf_horizontal_8_dual_sse2,
+ &vpx_highbd_lpf_horizontal_8_dual_c, 12),
+ make_tuple(&vpx_highbd_lpf_vertical_4_dual_sse2,
+ &vpx_highbd_lpf_vertical_4_dual_c, 12),
+ make_tuple(&vpx_highbd_lpf_vertical_8_dual_sse2,
+ &vpx_highbd_lpf_vertical_8_dual_c, 12)));
#else
INSTANTIATE_TEST_CASE_P(
SSE2, Loop8Test9Param,
- ::testing::Values(
- make_tuple(&vpx_lpf_horizontal_4_dual_sse2,
- &vpx_lpf_horizontal_4_dual_c, 8),
- make_tuple(&vpx_lpf_horizontal_8_dual_sse2,
- &vpx_lpf_horizontal_8_dual_c, 8),
- make_tuple(&vpx_lpf_vertical_4_dual_sse2,
- &vpx_lpf_vertical_4_dual_c, 8),
- make_tuple(&vpx_lpf_vertical_8_dual_sse2,
- &vpx_lpf_vertical_8_dual_c, 8)));
+ ::testing::Values(make_tuple(&vpx_lpf_horizontal_4_dual_sse2,
+ &vpx_lpf_horizontal_4_dual_c, 8),
+ make_tuple(&vpx_lpf_horizontal_8_dual_sse2,
+ &vpx_lpf_horizontal_8_dual_c, 8),
+ make_tuple(&vpx_lpf_vertical_4_dual_sse2,
+ &vpx_lpf_vertical_4_dual_c, 8),
+ make_tuple(&vpx_lpf_vertical_8_dual_sse2,
+ &vpx_lpf_vertical_8_dual_c, 8)));
#endif // CONFIG_VP9_HIGHBITDEPTH
#endif
@@ -568,38 +538,33 @@
NEON, Loop8Test6Param,
::testing::Values(
#if HAVE_NEON_ASM
-// Using #if inside the macro is unsupported on MSVS but the tests are not
-// currently built for MSVS with ARM and NEON.
+ // Using #if inside the macro is unsupported on MSVS but the tests are
+ // not
+ // currently built for MSVS with ARM and NEON.
make_tuple(&vpx_lpf_horizontal_edge_8_neon,
&vpx_lpf_horizontal_edge_8_c, 8),
make_tuple(&vpx_lpf_horizontal_edge_16_neon,
&vpx_lpf_horizontal_edge_16_c, 8),
- make_tuple(&vpx_lpf_vertical_16_neon,
- &vpx_lpf_vertical_16_c, 8),
- make_tuple(&vpx_lpf_vertical_16_dual_neon,
- &vpx_lpf_vertical_16_dual_c, 8),
+ make_tuple(&vpx_lpf_vertical_16_neon, &vpx_lpf_vertical_16_c, 8),
+ make_tuple(&vpx_lpf_vertical_16_dual_neon, &vpx_lpf_vertical_16_dual_c,
+ 8),
#endif // HAVE_NEON_ASM
- make_tuple(&vpx_lpf_horizontal_8_neon,
- &vpx_lpf_horizontal_8_c, 8),
- make_tuple(&vpx_lpf_vertical_8_neon,
- &vpx_lpf_vertical_8_c, 8),
- make_tuple(&vpx_lpf_horizontal_4_neon,
- &vpx_lpf_horizontal_4_c, 8),
- make_tuple(&vpx_lpf_vertical_4_neon,
- &vpx_lpf_vertical_4_c, 8)));
-INSTANTIATE_TEST_CASE_P(
- NEON, Loop8Test9Param,
- ::testing::Values(
+ make_tuple(&vpx_lpf_horizontal_8_neon, &vpx_lpf_horizontal_8_c, 8),
+ make_tuple(&vpx_lpf_vertical_8_neon, &vpx_lpf_vertical_8_c, 8),
+ make_tuple(&vpx_lpf_horizontal_4_neon, &vpx_lpf_horizontal_4_c, 8),
+ make_tuple(&vpx_lpf_vertical_4_neon, &vpx_lpf_vertical_4_c, 8)));
+INSTANTIATE_TEST_CASE_P(NEON, Loop8Test9Param,
+ ::testing::Values(
#if HAVE_NEON_ASM
- make_tuple(&vpx_lpf_horizontal_8_dual_neon,
- &vpx_lpf_horizontal_8_dual_c, 8),
- make_tuple(&vpx_lpf_vertical_8_dual_neon,
- &vpx_lpf_vertical_8_dual_c, 8),
+ make_tuple(&vpx_lpf_horizontal_8_dual_neon,
+ &vpx_lpf_horizontal_8_dual_c, 8),
+ make_tuple(&vpx_lpf_vertical_8_dual_neon,
+ &vpx_lpf_vertical_8_dual_c, 8),
#endif // HAVE_NEON_ASM
- make_tuple(&vpx_lpf_horizontal_4_dual_neon,
- &vpx_lpf_horizontal_4_dual_c, 8),
- make_tuple(&vpx_lpf_vertical_4_dual_neon,
- &vpx_lpf_vertical_4_dual_c, 8)));
+ make_tuple(&vpx_lpf_horizontal_4_dual_neon,
+ &vpx_lpf_horizontal_4_dual_c, 8),
+ make_tuple(&vpx_lpf_vertical_4_dual_neon,
+ &vpx_lpf_vertical_4_dual_c, 8)));
#endif // CONFIG_VP9_HIGHBITDEPTH
#endif // HAVE_NEON
@@ -607,66 +572,52 @@
INSTANTIATE_TEST_CASE_P(
DSPR2, Loop8Test6Param,
::testing::Values(
- make_tuple(&vpx_lpf_horizontal_4_dspr2,
- &vpx_lpf_horizontal_4_c, 8),
- make_tuple(&vpx_lpf_horizontal_8_dspr2,
- &vpx_lpf_horizontal_8_c, 8),
- make_tuple(&vpx_lpf_horizontal_edge_8,
- &vpx_lpf_horizontal_edge_8, 8),
- make_tuple(&vpx_lpf_horizontal_edge_16,
- &vpx_lpf_horizontal_edge_16, 8),
- make_tuple(&vpx_lpf_vertical_4_dspr2,
- &vpx_lpf_vertical_4_c, 8),
- make_tuple(&vpx_lpf_vertical_8_dspr2,
- &vpx_lpf_vertical_8_c, 8),
- make_tuple(&vpx_lpf_vertical_16_dspr2,
- &vpx_lpf_vertical_16_c, 8),
- make_tuple(&vpx_lpf_vertical_16_dual_dspr2,
- &vpx_lpf_vertical_16_dual_c, 8)));
+ make_tuple(&vpx_lpf_horizontal_4_dspr2, &vpx_lpf_horizontal_4_c, 8),
+ make_tuple(&vpx_lpf_horizontal_8_dspr2, &vpx_lpf_horizontal_8_c, 8),
+ make_tuple(&vpx_lpf_horizontal_edge_8, &vpx_lpf_horizontal_edge_8, 8),
+ make_tuple(&vpx_lpf_horizontal_edge_16, &vpx_lpf_horizontal_edge_16, 8),
+ make_tuple(&vpx_lpf_vertical_4_dspr2, &vpx_lpf_vertical_4_c, 8),
+ make_tuple(&vpx_lpf_vertical_8_dspr2, &vpx_lpf_vertical_8_c, 8),
+ make_tuple(&vpx_lpf_vertical_16_dspr2, &vpx_lpf_vertical_16_c, 8),
+ make_tuple(&vpx_lpf_vertical_16_dual_dspr2, &vpx_lpf_vertical_16_dual_c,
+ 8)));
INSTANTIATE_TEST_CASE_P(
DSPR2, Loop8Test9Param,
- ::testing::Values(
- make_tuple(&vpx_lpf_horizontal_4_dual_dspr2,
- &vpx_lpf_horizontal_4_dual_c, 8),
- make_tuple(&vpx_lpf_horizontal_8_dual_dspr2,
- &vpx_lpf_horizontal_8_dual_c, 8),
- make_tuple(&vpx_lpf_vertical_4_dual_dspr2,
- &vpx_lpf_vertical_4_dual_c, 8),
- make_tuple(&vpx_lpf_vertical_8_dual_dspr2,
- &vpx_lpf_vertical_8_dual_c, 8)));
+ ::testing::Values(make_tuple(&vpx_lpf_horizontal_4_dual_dspr2,
+ &vpx_lpf_horizontal_4_dual_c, 8),
+ make_tuple(&vpx_lpf_horizontal_8_dual_dspr2,
+ &vpx_lpf_horizontal_8_dual_c, 8),
+ make_tuple(&vpx_lpf_vertical_4_dual_dspr2,
+ &vpx_lpf_vertical_4_dual_c, 8),
+ make_tuple(&vpx_lpf_vertical_8_dual_dspr2,
+ &vpx_lpf_vertical_8_dual_c, 8)));
#endif // HAVE_DSPR2 && !CONFIG_VP9_HIGHBITDEPTH
#if HAVE_MSA && (!CONFIG_VP9_HIGHBITDEPTH)
INSTANTIATE_TEST_CASE_P(
MSA, Loop8Test6Param,
::testing::Values(
- make_tuple(&vpx_lpf_horizontal_4_msa,
- &vpx_lpf_horizontal_4_c, 8),
- make_tuple(&vpx_lpf_horizontal_8_msa,
- &vpx_lpf_horizontal_8_c, 8),
- make_tuple(&vpx_lpf_horizontal_edge_8_msa,
- &vpx_lpf_horizontal_edge_8_c, 8),
+ make_tuple(&vpx_lpf_horizontal_4_msa, &vpx_lpf_horizontal_4_c, 8),
+ make_tuple(&vpx_lpf_horizontal_8_msa, &vpx_lpf_horizontal_8_c, 8),
+ make_tuple(&vpx_lpf_horizontal_edge_8_msa, &vpx_lpf_horizontal_edge_8_c,
+ 8),
make_tuple(&vpx_lpf_horizontal_edge_16_msa,
&vpx_lpf_horizontal_edge_16_c, 8),
- make_tuple(&vpx_lpf_vertical_4_msa,
- &vpx_lpf_vertical_4_c, 8),
- make_tuple(&vpx_lpf_vertical_8_msa,
- &vpx_lpf_vertical_8_c, 8),
- make_tuple(&vpx_lpf_vertical_16_msa,
- &vpx_lpf_vertical_16_c, 8)));
+ make_tuple(&vpx_lpf_vertical_4_msa, &vpx_lpf_vertical_4_c, 8),
+ make_tuple(&vpx_lpf_vertical_8_msa, &vpx_lpf_vertical_8_c, 8),
+ make_tuple(&vpx_lpf_vertical_16_msa, &vpx_lpf_vertical_16_c, 8)));
INSTANTIATE_TEST_CASE_P(
MSA, Loop8Test9Param,
- ::testing::Values(
- make_tuple(&vpx_lpf_horizontal_4_dual_msa,
- &vpx_lpf_horizontal_4_dual_c, 8),
- make_tuple(&vpx_lpf_horizontal_8_dual_msa,
- &vpx_lpf_horizontal_8_dual_c, 8),
- make_tuple(&vpx_lpf_vertical_4_dual_msa,
- &vpx_lpf_vertical_4_dual_c, 8),
- make_tuple(&vpx_lpf_vertical_8_dual_msa,
- &vpx_lpf_vertical_8_dual_c, 8)));
+ ::testing::Values(make_tuple(&vpx_lpf_horizontal_4_dual_msa,
+ &vpx_lpf_horizontal_4_dual_c, 8),
+ make_tuple(&vpx_lpf_horizontal_8_dual_msa,
+ &vpx_lpf_horizontal_8_dual_c, 8),
+ make_tuple(&vpx_lpf_vertical_4_dual_msa,
+ &vpx_lpf_vertical_4_dual_c, 8),
+ make_tuple(&vpx_lpf_vertical_8_dual_msa,
+ &vpx_lpf_vertical_8_dual_c, 8)));
#endif // HAVE_MSA && (!CONFIG_VP9_HIGHBITDEPTH)
} // namespace
diff --git a/test/masked_sad_test.cc b/test/masked_sad_test.cc
index 13fff0f..c5b5081 100644
--- a/test/masked_sad_test.cc
+++ b/test/masked_sad_test.cc
@@ -36,7 +36,7 @@
public:
virtual ~MaskedSADTest() {}
virtual void SetUp() {
- maskedSAD_op_ = GET_PARAM(0);
+ maskedSAD_op_ = GET_PARAM(0);
ref_maskedSAD_op_ = GET_PARAM(1);
}
@@ -50,36 +50,35 @@
TEST_P(MaskedSADTest, OperationCheck) {
unsigned int ref_ret, ret;
ACMRandom rnd(ACMRandom::DeterministicSeed());
- DECLARE_ALIGNED(16, uint8_t, src_ptr[MAX_SB_SIZE*MAX_SB_SIZE]);
- DECLARE_ALIGNED(16, uint8_t, ref_ptr[MAX_SB_SIZE*MAX_SB_SIZE]);
- DECLARE_ALIGNED(16, uint8_t, msk_ptr[MAX_SB_SIZE*MAX_SB_SIZE]);
+ DECLARE_ALIGNED(16, uint8_t, src_ptr[MAX_SB_SIZE * MAX_SB_SIZE]);
+ DECLARE_ALIGNED(16, uint8_t, ref_ptr[MAX_SB_SIZE * MAX_SB_SIZE]);
+ DECLARE_ALIGNED(16, uint8_t, msk_ptr[MAX_SB_SIZE * MAX_SB_SIZE]);
int err_count = 0;
int first_failure = -1;
int src_stride = MAX_SB_SIZE;
int ref_stride = MAX_SB_SIZE;
int msk_stride = MAX_SB_SIZE;
for (int i = 0; i < number_of_iterations; ++i) {
- for (int j = 0; j < MAX_SB_SIZE*MAX_SB_SIZE; j++) {
+ for (int j = 0; j < MAX_SB_SIZE * MAX_SB_SIZE; j++) {
src_ptr[j] = rnd.Rand8();
ref_ptr[j] = rnd.Rand8();
- msk_ptr[j] = ((rnd.Rand8()&0x7f) > 64) ? rnd.Rand8()&0x3f : 64;
+ msk_ptr[j] = ((rnd.Rand8() & 0x7f) > 64) ? rnd.Rand8() & 0x3f : 64;
assert(msk_ptr[j] <= 64);
}
ref_ret = ref_maskedSAD_op_(src_ptr, src_stride, ref_ptr, ref_stride,
msk_ptr, msk_stride);
- ASM_REGISTER_STATE_CHECK(ret = maskedSAD_op_(src_ptr, src_stride,
- ref_ptr, ref_stride,
- msk_ptr, msk_stride));
+ ASM_REGISTER_STATE_CHECK(ret = maskedSAD_op_(src_ptr, src_stride, ref_ptr,
+ ref_stride, msk_ptr,
+ msk_stride));
if (ret != ref_ret) {
err_count++;
- if (first_failure == -1)
- first_failure = i;
+ if (first_failure == -1) first_failure = i;
}
}
EXPECT_EQ(0, err_count)
- << "Error: Masked SAD Test, C output doesn't match SSSE3 output. "
- << "First failed at test case " << first_failure;
+ << "Error: Masked SAD Test, C output doesn't match SSSE3 output. "
+ << "First failed at test case " << first_failure;
}
#if CONFIG_VP9_HIGHBITDEPTH
@@ -89,12 +88,12 @@
typedef std::tr1::tuple<HighbdMaskedSADFunc, HighbdMaskedSADFunc>
HighbdMaskedSADParam;
-class HighbdMaskedSADTest : public ::testing::
- TestWithParam<HighbdMaskedSADParam> {
+class HighbdMaskedSADTest
+ : public ::testing::TestWithParam<HighbdMaskedSADParam> {
public:
virtual ~HighbdMaskedSADTest() {}
virtual void SetUp() {
- maskedSAD_op_ = GET_PARAM(0);
+ maskedSAD_op_ = GET_PARAM(0);
ref_maskedSAD_op_ = GET_PARAM(1);
}
@@ -108,37 +107,36 @@
TEST_P(HighbdMaskedSADTest, OperationCheck) {
unsigned int ref_ret, ret;
ACMRandom rnd(ACMRandom::DeterministicSeed());
- DECLARE_ALIGNED(16, uint16_t, src_ptr[MAX_SB_SIZE*MAX_SB_SIZE]);
- DECLARE_ALIGNED(16, uint16_t, ref_ptr[MAX_SB_SIZE*MAX_SB_SIZE]);
- DECLARE_ALIGNED(16, uint8_t, msk_ptr[MAX_SB_SIZE*MAX_SB_SIZE]);
- uint8_t* src8_ptr = CONVERT_TO_BYTEPTR(src_ptr);
- uint8_t* ref8_ptr = CONVERT_TO_BYTEPTR(ref_ptr);
+ DECLARE_ALIGNED(16, uint16_t, src_ptr[MAX_SB_SIZE * MAX_SB_SIZE]);
+ DECLARE_ALIGNED(16, uint16_t, ref_ptr[MAX_SB_SIZE * MAX_SB_SIZE]);
+ DECLARE_ALIGNED(16, uint8_t, msk_ptr[MAX_SB_SIZE * MAX_SB_SIZE]);
+ uint8_t *src8_ptr = CONVERT_TO_BYTEPTR(src_ptr);
+ uint8_t *ref8_ptr = CONVERT_TO_BYTEPTR(ref_ptr);
int err_count = 0;
int first_failure = -1;
int src_stride = MAX_SB_SIZE;
int ref_stride = MAX_SB_SIZE;
int msk_stride = MAX_SB_SIZE;
for (int i = 0; i < number_of_iterations; ++i) {
- for (int j = 0; j < MAX_SB_SIZE*MAX_SB_SIZE; j++) {
- src_ptr[j] = rnd.Rand16()&0xfff;
- ref_ptr[j] = rnd.Rand16()&0xfff;
- msk_ptr[j] = ((rnd.Rand8()&0x7f) > 64) ? rnd.Rand8()&0x3f : 64;
+ for (int j = 0; j < MAX_SB_SIZE * MAX_SB_SIZE; j++) {
+ src_ptr[j] = rnd.Rand16() & 0xfff;
+ ref_ptr[j] = rnd.Rand16() & 0xfff;
+ msk_ptr[j] = ((rnd.Rand8() & 0x7f) > 64) ? rnd.Rand8() & 0x3f : 64;
}
ref_ret = ref_maskedSAD_op_(src8_ptr, src_stride, ref8_ptr, ref_stride,
msk_ptr, msk_stride);
- ASM_REGISTER_STATE_CHECK(ret = maskedSAD_op_(src8_ptr, src_stride,
- ref8_ptr, ref_stride,
- msk_ptr, msk_stride));
+ ASM_REGISTER_STATE_CHECK(ret = maskedSAD_op_(src8_ptr, src_stride, ref8_ptr,
+ ref_stride, msk_ptr,
+ msk_stride));
if (ret != ref_ret) {
err_count++;
- if (first_failure == -1)
- first_failure = i;
+ if (first_failure == -1) first_failure = i;
}
}
EXPECT_EQ(0, err_count)
- << "Error: High BD Masked SAD Test, C output doesn't match SSSE3 output. "
- << "First failed at test case " << first_failure;
+ << "Error: High BD Masked SAD Test, C output doesn't match SSSE3 output. "
+ << "First failed at test case " << first_failure;
}
#endif // CONFIG_VP9_HIGHBITDEPTH
@@ -146,80 +144,63 @@
#if HAVE_SSSE3
INSTANTIATE_TEST_CASE_P(
- SSSE3_C_COMPARE, MaskedSADTest,
- ::testing::Values(
+ SSSE3_C_COMPARE, MaskedSADTest,
+ ::testing::Values(
#if CONFIG_EXT_PARTITION
- make_tuple(&vpx_masked_sad128x128_ssse3,
- &vpx_masked_sad128x128_c),
- make_tuple(&vpx_masked_sad128x64_ssse3,
- &vpx_masked_sad128x64_c),
- make_tuple(&vpx_masked_sad64x128_ssse3,
- &vpx_masked_sad64x128_c),
+ make_tuple(&vpx_masked_sad128x128_ssse3, &vpx_masked_sad128x128_c),
+ make_tuple(&vpx_masked_sad128x64_ssse3, &vpx_masked_sad128x64_c),
+ make_tuple(&vpx_masked_sad64x128_ssse3, &vpx_masked_sad64x128_c),
#endif // CONFIG_EXT_PARTITION
- make_tuple(&vpx_masked_sad64x64_ssse3,
- &vpx_masked_sad64x64_c),
- make_tuple(&vpx_masked_sad64x32_ssse3,
- &vpx_masked_sad64x32_c),
- make_tuple(&vpx_masked_sad32x64_ssse3,
- &vpx_masked_sad32x64_c),
- make_tuple(&vpx_masked_sad32x32_ssse3,
- &vpx_masked_sad32x32_c),
- make_tuple(&vpx_masked_sad32x16_ssse3,
- &vpx_masked_sad32x16_c),
- make_tuple(&vpx_masked_sad16x32_ssse3,
- &vpx_masked_sad16x32_c),
- make_tuple(&vpx_masked_sad16x16_ssse3,
- &vpx_masked_sad16x16_c),
- make_tuple(&vpx_masked_sad16x8_ssse3,
- &vpx_masked_sad16x8_c),
- make_tuple(&vpx_masked_sad8x16_ssse3,
- &vpx_masked_sad8x16_c),
- make_tuple(&vpx_masked_sad8x8_ssse3,
- &vpx_masked_sad8x8_c),
- make_tuple(&vpx_masked_sad8x4_ssse3,
- &vpx_masked_sad8x4_c),
- make_tuple(&vpx_masked_sad4x8_ssse3,
- &vpx_masked_sad4x8_c),
- make_tuple(&vpx_masked_sad4x4_ssse3,
- &vpx_masked_sad4x4_c)));
+ make_tuple(&vpx_masked_sad64x64_ssse3, &vpx_masked_sad64x64_c),
+ make_tuple(&vpx_masked_sad64x32_ssse3, &vpx_masked_sad64x32_c),
+ make_tuple(&vpx_masked_sad32x64_ssse3, &vpx_masked_sad32x64_c),
+ make_tuple(&vpx_masked_sad32x32_ssse3, &vpx_masked_sad32x32_c),
+ make_tuple(&vpx_masked_sad32x16_ssse3, &vpx_masked_sad32x16_c),
+ make_tuple(&vpx_masked_sad16x32_ssse3, &vpx_masked_sad16x32_c),
+ make_tuple(&vpx_masked_sad16x16_ssse3, &vpx_masked_sad16x16_c),
+ make_tuple(&vpx_masked_sad16x8_ssse3, &vpx_masked_sad16x8_c),
+ make_tuple(&vpx_masked_sad8x16_ssse3, &vpx_masked_sad8x16_c),
+ make_tuple(&vpx_masked_sad8x8_ssse3, &vpx_masked_sad8x8_c),
+ make_tuple(&vpx_masked_sad8x4_ssse3, &vpx_masked_sad8x4_c),
+ make_tuple(&vpx_masked_sad4x8_ssse3, &vpx_masked_sad4x8_c),
+ make_tuple(&vpx_masked_sad4x4_ssse3, &vpx_masked_sad4x4_c)));
#if CONFIG_VP9_HIGHBITDEPTH
-INSTANTIATE_TEST_CASE_P(
- SSSE3_C_COMPARE, HighbdMaskedSADTest,
- ::testing::Values(
+INSTANTIATE_TEST_CASE_P(SSSE3_C_COMPARE, HighbdMaskedSADTest,
+ ::testing::Values(
#if CONFIG_EXT_PARTITION
- make_tuple(&vpx_highbd_masked_sad128x128_ssse3,
- &vpx_highbd_masked_sad128x128_c),
- make_tuple(&vpx_highbd_masked_sad128x64_ssse3,
- &vpx_highbd_masked_sad128x64_c),
- make_tuple(&vpx_highbd_masked_sad64x128_ssse3,
- &vpx_highbd_masked_sad64x128_c),
+ make_tuple(&vpx_highbd_masked_sad128x128_ssse3,
+ &vpx_highbd_masked_sad128x128_c),
+ make_tuple(&vpx_highbd_masked_sad128x64_ssse3,
+ &vpx_highbd_masked_sad128x64_c),
+ make_tuple(&vpx_highbd_masked_sad64x128_ssse3,
+ &vpx_highbd_masked_sad64x128_c),
#endif // CONFIG_EXT_PARTITION
- make_tuple(&vpx_highbd_masked_sad64x64_ssse3,
- &vpx_highbd_masked_sad64x64_c),
- make_tuple(&vpx_highbd_masked_sad64x32_ssse3,
- &vpx_highbd_masked_sad64x32_c),
- make_tuple(&vpx_highbd_masked_sad32x64_ssse3,
- &vpx_highbd_masked_sad32x64_c),
- make_tuple(&vpx_highbd_masked_sad32x32_ssse3,
- &vpx_highbd_masked_sad32x32_c),
- make_tuple(&vpx_highbd_masked_sad32x16_ssse3,
- &vpx_highbd_masked_sad32x16_c),
- make_tuple(&vpx_highbd_masked_sad16x32_ssse3,
- &vpx_highbd_masked_sad16x32_c),
- make_tuple(&vpx_highbd_masked_sad16x16_ssse3,
- &vpx_highbd_masked_sad16x16_c),
- make_tuple(&vpx_highbd_masked_sad16x8_ssse3,
- &vpx_highbd_masked_sad16x8_c),
- make_tuple(&vpx_highbd_masked_sad8x16_ssse3,
- &vpx_highbd_masked_sad8x16_c),
- make_tuple(&vpx_highbd_masked_sad8x8_ssse3,
- &vpx_highbd_masked_sad8x8_c),
- make_tuple(&vpx_highbd_masked_sad8x4_ssse3,
- &vpx_highbd_masked_sad8x4_c),
- make_tuple(&vpx_highbd_masked_sad4x8_ssse3,
- &vpx_highbd_masked_sad4x8_c),
- make_tuple(&vpx_highbd_masked_sad4x4_ssse3,
- &vpx_highbd_masked_sad4x4_c)));
+ make_tuple(&vpx_highbd_masked_sad64x64_ssse3,
+ &vpx_highbd_masked_sad64x64_c),
+ make_tuple(&vpx_highbd_masked_sad64x32_ssse3,
+ &vpx_highbd_masked_sad64x32_c),
+ make_tuple(&vpx_highbd_masked_sad32x64_ssse3,
+ &vpx_highbd_masked_sad32x64_c),
+ make_tuple(&vpx_highbd_masked_sad32x32_ssse3,
+ &vpx_highbd_masked_sad32x32_c),
+ make_tuple(&vpx_highbd_masked_sad32x16_ssse3,
+ &vpx_highbd_masked_sad32x16_c),
+ make_tuple(&vpx_highbd_masked_sad16x32_ssse3,
+ &vpx_highbd_masked_sad16x32_c),
+ make_tuple(&vpx_highbd_masked_sad16x16_ssse3,
+ &vpx_highbd_masked_sad16x16_c),
+ make_tuple(&vpx_highbd_masked_sad16x8_ssse3,
+ &vpx_highbd_masked_sad16x8_c),
+ make_tuple(&vpx_highbd_masked_sad8x16_ssse3,
+ &vpx_highbd_masked_sad8x16_c),
+ make_tuple(&vpx_highbd_masked_sad8x8_ssse3,
+ &vpx_highbd_masked_sad8x8_c),
+ make_tuple(&vpx_highbd_masked_sad8x4_ssse3,
+ &vpx_highbd_masked_sad8x4_c),
+ make_tuple(&vpx_highbd_masked_sad4x8_ssse3,
+ &vpx_highbd_masked_sad4x8_c),
+ make_tuple(&vpx_highbd_masked_sad4x4_ssse3,
+ &vpx_highbd_masked_sad4x4_c)));
#endif // CONFIG_VP9_HIGHBITDEPTH
#endif // HAVE_SSSE3
} // namespace
diff --git a/test/masked_variance_test.cc b/test/masked_variance_test.cc
index 1710285..131599f 100644
--- a/test/masked_variance_test.cc
+++ b/test/masked_variance_test.cc
@@ -35,11 +35,11 @@
const uint8_t *m, int m_stride,
unsigned int *sse);
-typedef std::tr1::tuple<MaskedVarianceFunc,
- MaskedVarianceFunc> MaskedVarianceParam;
+typedef std::tr1::tuple<MaskedVarianceFunc, MaskedVarianceFunc>
+ MaskedVarianceParam;
-class MaskedVarianceTest :
- public ::testing::TestWithParam<MaskedVarianceParam> {
+class MaskedVarianceTest
+ : public ::testing::TestWithParam<MaskedVarianceParam> {
public:
virtual ~MaskedVarianceTest() {}
virtual void SetUp() {
@@ -58,9 +58,9 @@
unsigned int ref_ret, opt_ret;
unsigned int ref_sse, opt_sse;
ACMRandom rnd(ACMRandom::DeterministicSeed());
- DECLARE_ALIGNED(16, uint8_t, src_ptr[MAX_SB_SIZE*MAX_SB_SIZE]);
- DECLARE_ALIGNED(16, uint8_t, ref_ptr[MAX_SB_SIZE*MAX_SB_SIZE]);
- DECLARE_ALIGNED(16, uint8_t, msk_ptr[MAX_SB_SIZE*MAX_SB_SIZE]);
+ DECLARE_ALIGNED(16, uint8_t, src_ptr[MAX_SB_SIZE * MAX_SB_SIZE]);
+ DECLARE_ALIGNED(16, uint8_t, ref_ptr[MAX_SB_SIZE * MAX_SB_SIZE]);
+ DECLARE_ALIGNED(16, uint8_t, msk_ptr[MAX_SB_SIZE * MAX_SB_SIZE]);
int err_count = 0;
int first_failure = -1;
int src_stride = MAX_SB_SIZE;
@@ -68,41 +68,36 @@
int msk_stride = MAX_SB_SIZE;
for (int i = 0; i < number_of_iterations; ++i) {
- for (int j = 0; j < MAX_SB_SIZE*MAX_SB_SIZE; j++) {
+ for (int j = 0; j < MAX_SB_SIZE * MAX_SB_SIZE; j++) {
src_ptr[j] = rnd.Rand8();
ref_ptr[j] = rnd.Rand8();
msk_ptr[j] = rnd(65);
}
- ref_ret = ref_func_(src_ptr, src_stride,
- ref_ptr, ref_stride,
- msk_ptr, msk_stride,
- &ref_sse);
- ASM_REGISTER_STATE_CHECK(opt_ret = opt_func_(src_ptr, src_stride,
- ref_ptr, ref_stride,
- msk_ptr, msk_stride,
- &opt_sse));
+ ref_ret = ref_func_(src_ptr, src_stride, ref_ptr, ref_stride, msk_ptr,
+ msk_stride, &ref_sse);
+ ASM_REGISTER_STATE_CHECK(opt_ret = opt_func_(src_ptr, src_stride, ref_ptr,
+ ref_stride, msk_ptr,
+ msk_stride, &opt_sse));
if (opt_ret != ref_ret || opt_sse != ref_sse) {
err_count++;
- if (first_failure == -1)
- first_failure = i;
+ if (first_failure == -1) first_failure = i;
}
}
- EXPECT_EQ(0, err_count)
- << "Error: Masked Variance Test OperationCheck,"
- << "C output doesn't match SSSE3 output. "
- << "First failed at test case " << first_failure;
+ EXPECT_EQ(0, err_count) << "Error: Masked Variance Test OperationCheck,"
+ << "C output doesn't match SSSE3 output. "
+ << "First failed at test case " << first_failure;
}
TEST_P(MaskedVarianceTest, ExtremeValues) {
unsigned int ref_ret, opt_ret;
unsigned int ref_sse, opt_sse;
ACMRandom rnd(ACMRandom::DeterministicSeed());
- DECLARE_ALIGNED(16, uint8_t, src_ptr[MAX_SB_SIZE*MAX_SB_SIZE]);
- DECLARE_ALIGNED(16, uint8_t, ref_ptr[MAX_SB_SIZE*MAX_SB_SIZE]);
- DECLARE_ALIGNED(16, uint8_t, msk_ptr[MAX_SB_SIZE*MAX_SB_SIZE]);
+ DECLARE_ALIGNED(16, uint8_t, src_ptr[MAX_SB_SIZE * MAX_SB_SIZE]);
+ DECLARE_ALIGNED(16, uint8_t, ref_ptr[MAX_SB_SIZE * MAX_SB_SIZE]);
+ DECLARE_ALIGNED(16, uint8_t, msk_ptr[MAX_SB_SIZE * MAX_SB_SIZE]);
int err_count = 0;
int first_failure = -1;
int src_stride = MAX_SB_SIZE;
@@ -110,44 +105,36 @@
int msk_stride = MAX_SB_SIZE;
for (int i = 0; i < 8; ++i) {
- memset(src_ptr, (i & 0x1) ? 255 : 0, MAX_SB_SIZE*MAX_SB_SIZE);
- memset(ref_ptr, (i & 0x2) ? 255 : 0, MAX_SB_SIZE*MAX_SB_SIZE);
- memset(msk_ptr, (i & 0x4) ? 64 : 0, MAX_SB_SIZE*MAX_SB_SIZE);
+ memset(src_ptr, (i & 0x1) ? 255 : 0, MAX_SB_SIZE * MAX_SB_SIZE);
+ memset(ref_ptr, (i & 0x2) ? 255 : 0, MAX_SB_SIZE * MAX_SB_SIZE);
+ memset(msk_ptr, (i & 0x4) ? 64 : 0, MAX_SB_SIZE * MAX_SB_SIZE);
- ref_ret = ref_func_(src_ptr, src_stride,
- ref_ptr, ref_stride,
- msk_ptr, msk_stride,
- &ref_sse);
- ASM_REGISTER_STATE_CHECK(opt_ret = opt_func_(src_ptr, src_stride,
- ref_ptr, ref_stride,
- msk_ptr, msk_stride,
- &opt_sse));
+ ref_ret = ref_func_(src_ptr, src_stride, ref_ptr, ref_stride, msk_ptr,
+ msk_stride, &ref_sse);
+ ASM_REGISTER_STATE_CHECK(opt_ret = opt_func_(src_ptr, src_stride, ref_ptr,
+ ref_stride, msk_ptr,
+ msk_stride, &opt_sse));
if (opt_ret != ref_ret || opt_sse != ref_sse) {
err_count++;
- if (first_failure == -1)
- first_failure = i;
+ if (first_failure == -1) first_failure = i;
}
}
- EXPECT_EQ(0, err_count)
- << "Error: Masked Variance Test ExtremeValues,"
- << "C output doesn't match SSSE3 output. "
- << "First failed at test case " << first_failure;
+ EXPECT_EQ(0, err_count) << "Error: Masked Variance Test ExtremeValues,"
+ << "C output doesn't match SSSE3 output. "
+ << "First failed at test case " << first_failure;
}
typedef unsigned int (*MaskedSubPixelVarianceFunc)(
- const uint8_t *a, int a_stride,
- int xoffset, int yoffset,
- const uint8_t *b, int b_stride,
- const uint8_t *m, int m_stride,
- unsigned int *sse);
+ const uint8_t *a, int a_stride, int xoffset, int yoffset, const uint8_t *b,
+ int b_stride, const uint8_t *m, int m_stride, unsigned int *sse);
-typedef std::tr1::tuple<MaskedSubPixelVarianceFunc,
- MaskedSubPixelVarianceFunc> MaskedSubPixelVarianceParam;
+typedef std::tr1::tuple<MaskedSubPixelVarianceFunc, MaskedSubPixelVarianceFunc>
+ MaskedSubPixelVarianceParam;
-class MaskedSubPixelVarianceTest :
- public ::testing::TestWithParam<MaskedSubPixelVarianceParam> {
+class MaskedSubPixelVarianceTest
+ : public ::testing::TestWithParam<MaskedSubPixelVarianceParam> {
public:
virtual ~MaskedSubPixelVarianceTest() {}
virtual void SetUp() {
@@ -166,21 +153,21 @@
unsigned int ref_ret, opt_ret;
unsigned int ref_sse, opt_sse;
ACMRandom rnd(ACMRandom::DeterministicSeed());
- DECLARE_ALIGNED(16, uint8_t, src_ptr[(MAX_SB_SIZE+1)*(MAX_SB_SIZE+1)]);
- DECLARE_ALIGNED(16, uint8_t, ref_ptr[(MAX_SB_SIZE+1)*(MAX_SB_SIZE+1)]);
- DECLARE_ALIGNED(16, uint8_t, msk_ptr[(MAX_SB_SIZE+1)*(MAX_SB_SIZE+1)]);
+ DECLARE_ALIGNED(16, uint8_t, src_ptr[(MAX_SB_SIZE + 1) * (MAX_SB_SIZE + 1)]);
+ DECLARE_ALIGNED(16, uint8_t, ref_ptr[(MAX_SB_SIZE + 1) * (MAX_SB_SIZE + 1)]);
+ DECLARE_ALIGNED(16, uint8_t, msk_ptr[(MAX_SB_SIZE + 1) * (MAX_SB_SIZE + 1)]);
int err_count = 0;
int first_failure = -1;
- int src_stride = (MAX_SB_SIZE+1);
- int ref_stride = (MAX_SB_SIZE+1);
- int msk_stride = (MAX_SB_SIZE+1);
+ int src_stride = (MAX_SB_SIZE + 1);
+ int ref_stride = (MAX_SB_SIZE + 1);
+ int msk_stride = (MAX_SB_SIZE + 1);
int xoffset;
int yoffset;
for (int i = 0; i < number_of_iterations; ++i) {
- int xoffsets[] = {0, 4, rnd(BIL_SUBPEL_SHIFTS)};
- int yoffsets[] = {0, 4, rnd(BIL_SUBPEL_SHIFTS)};
- for (int j = 0; j < (MAX_SB_SIZE+1)*(MAX_SB_SIZE+1); j++) {
+ int xoffsets[] = { 0, 4, rnd(BIL_SUBPEL_SHIFTS) };
+ int yoffsets[] = { 0, 4, rnd(BIL_SUBPEL_SHIFTS) };
+ for (int j = 0; j < (MAX_SB_SIZE + 1) * (MAX_SB_SIZE + 1); j++) {
src_ptr[j] = rnd.Rand8();
ref_ptr[j] = rnd.Rand8();
msk_ptr[j] = rnd(65);
@@ -191,64 +178,56 @@
xoffset = xoffsets[k];
yoffset = yoffsets[l];
- ref_ret = ref_func_(src_ptr, src_stride,
- xoffset, yoffset,
- ref_ptr, ref_stride,
- msk_ptr, msk_stride,
- &ref_sse);
- ASM_REGISTER_STATE_CHECK(opt_ret = opt_func_(src_ptr, src_stride,
- xoffset, yoffset,
- ref_ptr, ref_stride,
- msk_ptr, msk_stride,
- &opt_sse));
+ ref_ret = ref_func_(src_ptr, src_stride, xoffset, yoffset, ref_ptr,
+ ref_stride, msk_ptr, msk_stride, &ref_sse);
+ ASM_REGISTER_STATE_CHECK(
+ opt_ret = opt_func_(src_ptr, src_stride, xoffset, yoffset, ref_ptr,
+ ref_stride, msk_ptr, msk_stride, &opt_sse));
if (opt_ret != ref_ret || opt_sse != ref_sse) {
- err_count++;
- if (first_failure == -1)
- first_failure = i;
+ err_count++;
+ if (first_failure == -1) first_failure = i;
}
}
}
}
EXPECT_EQ(0, err_count)
- << "Error: Masked Sub Pixel Variance Test OperationCheck,"
- << "C output doesn't match SSSE3 output. "
- << "First failed at test case " << first_failure;
+ << "Error: Masked Sub Pixel Variance Test OperationCheck,"
+ << "C output doesn't match SSSE3 output. "
+ << "First failed at test case " << first_failure;
}
TEST_P(MaskedSubPixelVarianceTest, ExtremeValues) {
unsigned int ref_ret, opt_ret;
unsigned int ref_sse, opt_sse;
ACMRandom rnd(ACMRandom::DeterministicSeed());
- DECLARE_ALIGNED(16, uint8_t, src_ptr[(MAX_SB_SIZE+1)*(MAX_SB_SIZE+1)]);
- DECLARE_ALIGNED(16, uint8_t, ref_ptr[(MAX_SB_SIZE+1)*(MAX_SB_SIZE+1)]);
- DECLARE_ALIGNED(16, uint8_t, msk_ptr[(MAX_SB_SIZE+1)*(MAX_SB_SIZE+1)]);
+ DECLARE_ALIGNED(16, uint8_t, src_ptr[(MAX_SB_SIZE + 1) * (MAX_SB_SIZE + 1)]);
+ DECLARE_ALIGNED(16, uint8_t, ref_ptr[(MAX_SB_SIZE + 1) * (MAX_SB_SIZE + 1)]);
+ DECLARE_ALIGNED(16, uint8_t, msk_ptr[(MAX_SB_SIZE + 1) * (MAX_SB_SIZE + 1)]);
int first_failure_x = -1;
int first_failure_y = -1;
int err_count = 0;
int first_failure = -1;
- int src_stride = (MAX_SB_SIZE+1);
- int ref_stride = (MAX_SB_SIZE+1);
- int msk_stride = (MAX_SB_SIZE+1);
+ int src_stride = (MAX_SB_SIZE + 1);
+ int ref_stride = (MAX_SB_SIZE + 1);
+ int msk_stride = (MAX_SB_SIZE + 1);
- for (int xoffset = 0 ; xoffset < BIL_SUBPEL_SHIFTS ; xoffset++) {
- for (int yoffset = 0 ; yoffset < BIL_SUBPEL_SHIFTS ; yoffset++) {
+ for (int xoffset = 0; xoffset < BIL_SUBPEL_SHIFTS; xoffset++) {
+ for (int yoffset = 0; yoffset < BIL_SUBPEL_SHIFTS; yoffset++) {
for (int i = 0; i < 8; ++i) {
- memset(src_ptr, (i & 0x1) ? 255 : 0, (MAX_SB_SIZE+1)*(MAX_SB_SIZE+1));
- memset(ref_ptr, (i & 0x2) ? 255 : 0, (MAX_SB_SIZE+1)*(MAX_SB_SIZE+1));
- memset(msk_ptr, (i & 0x4) ? 64 : 0, (MAX_SB_SIZE+1)*(MAX_SB_SIZE+1));
+ memset(src_ptr, (i & 0x1) ? 255 : 0,
+ (MAX_SB_SIZE + 1) * (MAX_SB_SIZE + 1));
+ memset(ref_ptr, (i & 0x2) ? 255 : 0,
+ (MAX_SB_SIZE + 1) * (MAX_SB_SIZE + 1));
+ memset(msk_ptr, (i & 0x4) ? 64 : 0,
+ (MAX_SB_SIZE + 1) * (MAX_SB_SIZE + 1));
- ref_ret = ref_func_(src_ptr, src_stride,
- xoffset, yoffset,
- ref_ptr, ref_stride,
- msk_ptr, msk_stride,
- &ref_sse);
- ASM_REGISTER_STATE_CHECK(opt_ret = opt_func_(src_ptr, src_stride,
- xoffset, yoffset,
- ref_ptr, ref_stride,
- msk_ptr, msk_stride,
- &opt_sse));
+ ref_ret = ref_func_(src_ptr, src_stride, xoffset, yoffset, ref_ptr,
+ ref_stride, msk_ptr, msk_stride, &ref_sse);
+ ASM_REGISTER_STATE_CHECK(
+ opt_ret = opt_func_(src_ptr, src_stride, xoffset, yoffset, ref_ptr,
+ ref_stride, msk_ptr, msk_stride, &opt_sse));
if (opt_ret != ref_ret || opt_sse != ref_sse) {
err_count++;
@@ -262,21 +241,19 @@
}
}
- EXPECT_EQ(0, err_count)
- << "Error: Masked Variance Test ExtremeValues,"
- << "C output doesn't match SSSE3 output. "
- << "First failed at test case " << first_failure
- << " x_offset = " << first_failure_x
- << " y_offset = " << first_failure_y;
+ EXPECT_EQ(0, err_count) << "Error: Masked Variance Test ExtremeValues,"
+ << "C output doesn't match SSSE3 output. "
+ << "First failed at test case " << first_failure
+ << " x_offset = " << first_failure_x
+ << " y_offset = " << first_failure_y;
}
#if CONFIG_VP9_HIGHBITDEPTH
-typedef std::tr1::tuple<MaskedVarianceFunc,
- MaskedVarianceFunc,
- vpx_bit_depth_t> HighbdMaskedVarianceParam;
+typedef std::tr1::tuple<MaskedVarianceFunc, MaskedVarianceFunc, vpx_bit_depth_t>
+ HighbdMaskedVarianceParam;
-class HighbdMaskedVarianceTest :
- public ::testing::TestWithParam<HighbdMaskedVarianceParam> {
+class HighbdMaskedVarianceTest
+ : public ::testing::TestWithParam<HighbdMaskedVarianceParam> {
public:
virtual ~HighbdMaskedVarianceTest() {}
virtual void SetUp() {
@@ -297,11 +274,11 @@
unsigned int ref_ret, opt_ret;
unsigned int ref_sse, opt_sse;
ACMRandom rnd(ACMRandom::DeterministicSeed());
- DECLARE_ALIGNED(16, uint16_t, src_ptr[MAX_SB_SIZE*MAX_SB_SIZE]);
- DECLARE_ALIGNED(16, uint16_t, ref_ptr[MAX_SB_SIZE*MAX_SB_SIZE]);
- DECLARE_ALIGNED(16, uint8_t, msk_ptr[MAX_SB_SIZE*MAX_SB_SIZE]);
- uint8_t* src8_ptr = CONVERT_TO_BYTEPTR(src_ptr);
- uint8_t* ref8_ptr = CONVERT_TO_BYTEPTR(ref_ptr);
+ DECLARE_ALIGNED(16, uint16_t, src_ptr[MAX_SB_SIZE * MAX_SB_SIZE]);
+ DECLARE_ALIGNED(16, uint16_t, ref_ptr[MAX_SB_SIZE * MAX_SB_SIZE]);
+ DECLARE_ALIGNED(16, uint8_t, msk_ptr[MAX_SB_SIZE * MAX_SB_SIZE]);
+ uint8_t *src8_ptr = CONVERT_TO_BYTEPTR(src_ptr);
+ uint8_t *ref8_ptr = CONVERT_TO_BYTEPTR(ref_ptr);
int err_count = 0;
int first_failure = -1;
int src_stride = MAX_SB_SIZE;
@@ -309,43 +286,38 @@
int msk_stride = MAX_SB_SIZE;
for (int i = 0; i < number_of_iterations; ++i) {
- for (int j = 0; j < MAX_SB_SIZE*MAX_SB_SIZE; j++) {
+ for (int j = 0; j < MAX_SB_SIZE * MAX_SB_SIZE; j++) {
src_ptr[j] = rnd.Rand16() & ((1 << bit_depth_) - 1);
ref_ptr[j] = rnd.Rand16() & ((1 << bit_depth_) - 1);
msk_ptr[j] = rnd(65);
}
- ref_ret = ref_func_(src8_ptr, src_stride,
- ref8_ptr, ref_stride,
- msk_ptr, msk_stride,
- &ref_sse);
- ASM_REGISTER_STATE_CHECK(opt_ret = opt_func_(src8_ptr, src_stride,
- ref8_ptr, ref_stride,
- msk_ptr, msk_stride,
- &opt_sse));
+ ref_ret = ref_func_(src8_ptr, src_stride, ref8_ptr, ref_stride, msk_ptr,
+ msk_stride, &ref_sse);
+ ASM_REGISTER_STATE_CHECK(opt_ret = opt_func_(src8_ptr, src_stride, ref8_ptr,
+ ref_stride, msk_ptr,
+ msk_stride, &opt_sse));
if (opt_ret != ref_ret || opt_sse != ref_sse) {
err_count++;
- if (first_failure == -1)
- first_failure = i;
+ if (first_failure == -1) first_failure = i;
}
}
- EXPECT_EQ(0, err_count)
- << "Error: Masked Variance Test OperationCheck,"
- << "C output doesn't match SSSE3 output. "
- << "First failed at test case " << first_failure;
+ EXPECT_EQ(0, err_count) << "Error: Masked Variance Test OperationCheck,"
+ << "C output doesn't match SSSE3 output. "
+ << "First failed at test case " << first_failure;
}
TEST_P(HighbdMaskedVarianceTest, ExtremeValues) {
unsigned int ref_ret, opt_ret;
unsigned int ref_sse, opt_sse;
ACMRandom rnd(ACMRandom::DeterministicSeed());
- DECLARE_ALIGNED(16, uint16_t, src_ptr[MAX_SB_SIZE*MAX_SB_SIZE]);
- DECLARE_ALIGNED(16, uint16_t, ref_ptr[MAX_SB_SIZE*MAX_SB_SIZE]);
- DECLARE_ALIGNED(16, uint8_t, msk_ptr[MAX_SB_SIZE*MAX_SB_SIZE]);
- uint8_t* src8_ptr = CONVERT_TO_BYTEPTR(src_ptr);
- uint8_t* ref8_ptr = CONVERT_TO_BYTEPTR(ref_ptr);
+ DECLARE_ALIGNED(16, uint16_t, src_ptr[MAX_SB_SIZE * MAX_SB_SIZE]);
+ DECLARE_ALIGNED(16, uint16_t, ref_ptr[MAX_SB_SIZE * MAX_SB_SIZE]);
+ DECLARE_ALIGNED(16, uint8_t, msk_ptr[MAX_SB_SIZE * MAX_SB_SIZE]);
+ uint8_t *src8_ptr = CONVERT_TO_BYTEPTR(src_ptr);
+ uint8_t *ref8_ptr = CONVERT_TO_BYTEPTR(ref_ptr);
int err_count = 0;
int first_failure = -1;
int src_stride = MAX_SB_SIZE;
@@ -354,39 +326,33 @@
for (int i = 0; i < 8; ++i) {
vpx_memset16(src_ptr, (i & 0x1) ? ((1 << bit_depth_) - 1) : 0,
- MAX_SB_SIZE*MAX_SB_SIZE);
+ MAX_SB_SIZE * MAX_SB_SIZE);
vpx_memset16(ref_ptr, (i & 0x2) ? ((1 << bit_depth_) - 1) : 0,
- MAX_SB_SIZE*MAX_SB_SIZE);
- memset(msk_ptr, (i & 0x4) ? 64 : 0, MAX_SB_SIZE*MAX_SB_SIZE);
+ MAX_SB_SIZE * MAX_SB_SIZE);
+ memset(msk_ptr, (i & 0x4) ? 64 : 0, MAX_SB_SIZE * MAX_SB_SIZE);
- ref_ret = ref_func_(src8_ptr, src_stride,
- ref8_ptr, ref_stride,
- msk_ptr, msk_stride,
- &ref_sse);
- ASM_REGISTER_STATE_CHECK(opt_ret = opt_func_(src8_ptr, src_stride,
- ref8_ptr, ref_stride,
- msk_ptr, msk_stride,
- &opt_sse));
+ ref_ret = ref_func_(src8_ptr, src_stride, ref8_ptr, ref_stride, msk_ptr,
+ msk_stride, &ref_sse);
+ ASM_REGISTER_STATE_CHECK(opt_ret = opt_func_(src8_ptr, src_stride, ref8_ptr,
+ ref_stride, msk_ptr,
+ msk_stride, &opt_sse));
if (opt_ret != ref_ret || opt_sse != ref_sse) {
err_count++;
- if (first_failure == -1)
- first_failure = i;
+ if (first_failure == -1) first_failure = i;
}
}
- EXPECT_EQ(0, err_count)
- << "Error: Masked Variance Test ExtremeValues,"
- << "C output doesn't match SSSE3 output. "
- << "First failed at test case " << first_failure;
+ EXPECT_EQ(0, err_count) << "Error: Masked Variance Test ExtremeValues,"
+ << "C output doesn't match SSSE3 output. "
+ << "First failed at test case " << first_failure;
}
-typedef std::tr1::tuple<MaskedSubPixelVarianceFunc,
- MaskedSubPixelVarianceFunc,
+typedef std::tr1::tuple<MaskedSubPixelVarianceFunc, MaskedSubPixelVarianceFunc,
vpx_bit_depth_t> HighbdMaskedSubPixelVarianceParam;
-class HighbdMaskedSubPixelVarianceTest :
- public ::testing::TestWithParam<HighbdMaskedSubPixelVarianceParam> {
+class HighbdMaskedSubPixelVarianceTest
+ : public ::testing::TestWithParam<HighbdMaskedSubPixelVarianceParam> {
public:
virtual ~HighbdMaskedSubPixelVarianceTest() {}
virtual void SetUp() {
@@ -407,39 +373,35 @@
unsigned int ref_ret, opt_ret;
unsigned int ref_sse, opt_sse;
ACMRandom rnd(ACMRandom::DeterministicSeed());
- DECLARE_ALIGNED(16, uint16_t, src_ptr[(MAX_SB_SIZE+1)*(MAX_SB_SIZE+1)]);
- DECLARE_ALIGNED(16, uint16_t, ref_ptr[(MAX_SB_SIZE+1)*(MAX_SB_SIZE+1)]);
- DECLARE_ALIGNED(16, uint8_t, msk_ptr[(MAX_SB_SIZE+1)*(MAX_SB_SIZE+1)]);
- uint8_t* src8_ptr = CONVERT_TO_BYTEPTR(src_ptr);
- uint8_t* ref8_ptr = CONVERT_TO_BYTEPTR(ref_ptr);
+ DECLARE_ALIGNED(16, uint16_t, src_ptr[(MAX_SB_SIZE + 1) * (MAX_SB_SIZE + 1)]);
+ DECLARE_ALIGNED(16, uint16_t, ref_ptr[(MAX_SB_SIZE + 1) * (MAX_SB_SIZE + 1)]);
+ DECLARE_ALIGNED(16, uint8_t, msk_ptr[(MAX_SB_SIZE + 1) * (MAX_SB_SIZE + 1)]);
+ uint8_t *src8_ptr = CONVERT_TO_BYTEPTR(src_ptr);
+ uint8_t *ref8_ptr = CONVERT_TO_BYTEPTR(ref_ptr);
int err_count = 0;
int first_failure = -1;
int first_failure_x = -1;
int first_failure_y = -1;
- int src_stride = (MAX_SB_SIZE+1);
- int ref_stride = (MAX_SB_SIZE+1);
- int msk_stride = (MAX_SB_SIZE+1);
+ int src_stride = (MAX_SB_SIZE + 1);
+ int ref_stride = (MAX_SB_SIZE + 1);
+ int msk_stride = (MAX_SB_SIZE + 1);
int xoffset, yoffset;
for (int i = 0; i < number_of_iterations; ++i) {
for (xoffset = 0; xoffset < BIL_SUBPEL_SHIFTS; xoffset++) {
for (yoffset = 0; yoffset < BIL_SUBPEL_SHIFTS; yoffset++) {
- for (int j = 0; j < (MAX_SB_SIZE+1)*(MAX_SB_SIZE+1); j++) {
+ for (int j = 0; j < (MAX_SB_SIZE + 1) * (MAX_SB_SIZE + 1); j++) {
src_ptr[j] = rnd.Rand16() & ((1 << bit_depth_) - 1);
ref_ptr[j] = rnd.Rand16() & ((1 << bit_depth_) - 1);
msk_ptr[j] = rnd(65);
}
- ref_ret = ref_func_(src8_ptr, src_stride,
- xoffset, yoffset,
- ref8_ptr, ref_stride,
- msk_ptr, msk_stride,
- &ref_sse);
- ASM_REGISTER_STATE_CHECK(opt_ret = opt_func_(src8_ptr, src_stride,
- xoffset, yoffset,
- ref8_ptr, ref_stride,
- msk_ptr, msk_stride,
- &opt_sse));
+ ref_ret = ref_func_(src8_ptr, src_stride, xoffset, yoffset, ref8_ptr,
+ ref_stride, msk_ptr, msk_stride, &ref_sse);
+ ASM_REGISTER_STATE_CHECK(opt_ret =
+ opt_func_(src8_ptr, src_stride, xoffset,
+ yoffset, ref8_ptr, ref_stride,
+ msk_ptr, msk_stride, &opt_sse));
if (opt_ret != ref_ret || opt_sse != ref_sse) {
err_count++;
@@ -454,49 +416,45 @@
}
EXPECT_EQ(0, err_count)
- << "Error: Masked Sub Pixel Variance Test OperationCheck,"
- << "C output doesn't match SSSE3 output. "
- << "First failed at test case " << first_failure
- << " x_offset = " << first_failure_x
- << " y_offset = " << first_failure_y;
+ << "Error: Masked Sub Pixel Variance Test OperationCheck,"
+ << "C output doesn't match SSSE3 output. "
+ << "First failed at test case " << first_failure
+ << " x_offset = " << first_failure_x << " y_offset = " << first_failure_y;
}
TEST_P(HighbdMaskedSubPixelVarianceTest, ExtremeValues) {
unsigned int ref_ret, opt_ret;
unsigned int ref_sse, opt_sse;
ACMRandom rnd(ACMRandom::DeterministicSeed());
- DECLARE_ALIGNED(16, uint16_t, src_ptr[(MAX_SB_SIZE+1)*(MAX_SB_SIZE+1)]);
- DECLARE_ALIGNED(16, uint16_t, ref_ptr[(MAX_SB_SIZE+1)*(MAX_SB_SIZE+1)]);
- DECLARE_ALIGNED(16, uint8_t, msk_ptr[(MAX_SB_SIZE+1)*(MAX_SB_SIZE+1)]);
- uint8_t* src8_ptr = CONVERT_TO_BYTEPTR(src_ptr);
- uint8_t* ref8_ptr = CONVERT_TO_BYTEPTR(ref_ptr);
+ DECLARE_ALIGNED(16, uint16_t, src_ptr[(MAX_SB_SIZE + 1) * (MAX_SB_SIZE + 1)]);
+ DECLARE_ALIGNED(16, uint16_t, ref_ptr[(MAX_SB_SIZE + 1) * (MAX_SB_SIZE + 1)]);
+ DECLARE_ALIGNED(16, uint8_t, msk_ptr[(MAX_SB_SIZE + 1) * (MAX_SB_SIZE + 1)]);
+ uint8_t *src8_ptr = CONVERT_TO_BYTEPTR(src_ptr);
+ uint8_t *ref8_ptr = CONVERT_TO_BYTEPTR(ref_ptr);
int first_failure_x = -1;
int first_failure_y = -1;
int err_count = 0;
int first_failure = -1;
- int src_stride = (MAX_SB_SIZE+1);
- int ref_stride = (MAX_SB_SIZE+1);
- int msk_stride = (MAX_SB_SIZE+1);
+ int src_stride = (MAX_SB_SIZE + 1);
+ int ref_stride = (MAX_SB_SIZE + 1);
+ int msk_stride = (MAX_SB_SIZE + 1);
- for (int xoffset = 0 ; xoffset < BIL_SUBPEL_SHIFTS ; xoffset++) {
- for (int yoffset = 0 ; yoffset < BIL_SUBPEL_SHIFTS ; yoffset++) {
+ for (int xoffset = 0; xoffset < BIL_SUBPEL_SHIFTS; xoffset++) {
+ for (int yoffset = 0; yoffset < BIL_SUBPEL_SHIFTS; yoffset++) {
for (int i = 0; i < 8; ++i) {
vpx_memset16(src_ptr, (i & 0x1) ? ((1 << bit_depth_) - 1) : 0,
- (MAX_SB_SIZE+1)*(MAX_SB_SIZE+1));
+ (MAX_SB_SIZE + 1) * (MAX_SB_SIZE + 1));
vpx_memset16(ref_ptr, (i & 0x2) ? ((1 << bit_depth_) - 1) : 0,
- (MAX_SB_SIZE+1)*(MAX_SB_SIZE+1));
- memset(msk_ptr, (i & 0x4) ? 64 : 0, (MAX_SB_SIZE+1)*(MAX_SB_SIZE+1));
+ (MAX_SB_SIZE + 1) * (MAX_SB_SIZE + 1));
+ memset(msk_ptr, (i & 0x4) ? 64 : 0,
+ (MAX_SB_SIZE + 1) * (MAX_SB_SIZE + 1));
- ref_ret = ref_func_(src8_ptr, src_stride,
- xoffset, yoffset,
- ref8_ptr, ref_stride,
- msk_ptr, msk_stride,
- &ref_sse);
- ASM_REGISTER_STATE_CHECK(opt_ret = opt_func_(src8_ptr, src_stride,
- xoffset, yoffset,
- ref8_ptr, ref_stride,
- msk_ptr, msk_stride,
- &opt_sse));
+ ref_ret = ref_func_(src8_ptr, src_stride, xoffset, yoffset, ref8_ptr,
+ ref_stride, msk_ptr, msk_stride, &ref_sse);
+ ASM_REGISTER_STATE_CHECK(opt_ret =
+ opt_func_(src8_ptr, src_stride, xoffset,
+ yoffset, ref8_ptr, ref_stride,
+ msk_ptr, msk_stride, &opt_sse));
if (opt_ret != ref_ret || opt_sse != ref_sse) {
err_count++;
@@ -510,12 +468,11 @@
}
}
- EXPECT_EQ(0, err_count)
- << "Error: Masked Variance Test ExtremeValues,"
- << "C output doesn't match SSSE3 output. "
- << "First failed at test case " << first_failure
- << " x_offset = " << first_failure_x
- << " y_offset = " << first_failure_y;
+ EXPECT_EQ(0, err_count) << "Error: Masked Variance Test ExtremeValues,"
+ << "C output doesn't match SSSE3 output. "
+ << "First failed at test case " << first_failure
+ << " x_offset = " << first_failure_x
+ << " y_offset = " << first_failure_y;
}
#endif // CONFIG_VP9_HIGHBITDEPTH
@@ -523,293 +480,308 @@
#if HAVE_SSSE3
INSTANTIATE_TEST_CASE_P(
- SSSE3_C_COMPARE, MaskedVarianceTest,
- ::testing::Values(
+ SSSE3_C_COMPARE, MaskedVarianceTest,
+ ::testing::Values(
#if CONFIG_EXT_PARTITION
- make_tuple(&vpx_masked_variance128x128_ssse3,
- &vpx_masked_variance128x128_c),
- make_tuple(&vpx_masked_variance128x64_ssse3,
- &vpx_masked_variance128x64_c),
- make_tuple(&vpx_masked_variance64x128_ssse3,
- &vpx_masked_variance64x128_c),
+ make_tuple(&vpx_masked_variance128x128_ssse3,
+ &vpx_masked_variance128x128_c),
+ make_tuple(&vpx_masked_variance128x64_ssse3,
+ &vpx_masked_variance128x64_c),
+ make_tuple(&vpx_masked_variance64x128_ssse3,
+ &vpx_masked_variance64x128_c),
#endif // CONFIG_EXT_PARTITION
- make_tuple(&vpx_masked_variance64x64_ssse3,
- &vpx_masked_variance64x64_c),
- make_tuple(&vpx_masked_variance64x32_ssse3,
- &vpx_masked_variance64x32_c),
- make_tuple(&vpx_masked_variance32x64_ssse3,
- &vpx_masked_variance32x64_c),
- make_tuple(&vpx_masked_variance32x32_ssse3,
- &vpx_masked_variance32x32_c),
- make_tuple(&vpx_masked_variance32x16_ssse3,
- &vpx_masked_variance32x16_c),
- make_tuple(&vpx_masked_variance16x32_ssse3,
- &vpx_masked_variance16x32_c),
- make_tuple(&vpx_masked_variance16x16_ssse3,
- &vpx_masked_variance16x16_c),
- make_tuple(&vpx_masked_variance16x8_ssse3,
- &vpx_masked_variance16x8_c),
- make_tuple(&vpx_masked_variance8x16_ssse3,
- &vpx_masked_variance8x16_c),
- make_tuple(&vpx_masked_variance8x8_ssse3,
- &vpx_masked_variance8x8_c),
- make_tuple(&vpx_masked_variance8x4_ssse3,
- &vpx_masked_variance8x4_c),
- make_tuple(&vpx_masked_variance4x8_ssse3,
- &vpx_masked_variance4x8_c),
- make_tuple(&vpx_masked_variance4x4_ssse3,
- &vpx_masked_variance4x4_c)));
+ make_tuple(&vpx_masked_variance64x64_ssse3,
+ &vpx_masked_variance64x64_c),
+ make_tuple(&vpx_masked_variance64x32_ssse3,
+ &vpx_masked_variance64x32_c),
+ make_tuple(&vpx_masked_variance32x64_ssse3,
+ &vpx_masked_variance32x64_c),
+ make_tuple(&vpx_masked_variance32x32_ssse3,
+ &vpx_masked_variance32x32_c),
+ make_tuple(&vpx_masked_variance32x16_ssse3,
+ &vpx_masked_variance32x16_c),
+ make_tuple(&vpx_masked_variance16x32_ssse3,
+ &vpx_masked_variance16x32_c),
+ make_tuple(&vpx_masked_variance16x16_ssse3,
+ &vpx_masked_variance16x16_c),
+ make_tuple(&vpx_masked_variance16x8_ssse3, &vpx_masked_variance16x8_c),
+ make_tuple(&vpx_masked_variance8x16_ssse3, &vpx_masked_variance8x16_c),
+ make_tuple(&vpx_masked_variance8x8_ssse3, &vpx_masked_variance8x8_c),
+ make_tuple(&vpx_masked_variance8x4_ssse3, &vpx_masked_variance8x4_c),
+ make_tuple(&vpx_masked_variance4x8_ssse3, &vpx_masked_variance4x8_c),
+ make_tuple(&vpx_masked_variance4x4_ssse3, &vpx_masked_variance4x4_c)));
INSTANTIATE_TEST_CASE_P(
- SSSE3_C_COMPARE, MaskedSubPixelVarianceTest,
- ::testing::Values(
+ SSSE3_C_COMPARE, MaskedSubPixelVarianceTest,
+ ::testing::Values(
#if CONFIG_EXT_PARTITION
- make_tuple(&vpx_masked_sub_pixel_variance128x128_ssse3,
- &vpx_masked_sub_pixel_variance128x128_c),
- make_tuple(&vpx_masked_sub_pixel_variance128x64_ssse3,
- &vpx_masked_sub_pixel_variance128x64_c),
- make_tuple(&vpx_masked_sub_pixel_variance64x128_ssse3,
- &vpx_masked_sub_pixel_variance64x128_c),
+ make_tuple(&vpx_masked_sub_pixel_variance128x128_ssse3,
+ &vpx_masked_sub_pixel_variance128x128_c),
+ make_tuple(&vpx_masked_sub_pixel_variance128x64_ssse3,
+ &vpx_masked_sub_pixel_variance128x64_c),
+ make_tuple(&vpx_masked_sub_pixel_variance64x128_ssse3,
+ &vpx_masked_sub_pixel_variance64x128_c),
#endif // CONFIG_EXT_PARTITION
- make_tuple(&vpx_masked_sub_pixel_variance64x64_ssse3,
- &vpx_masked_sub_pixel_variance64x64_c),
- make_tuple(&vpx_masked_sub_pixel_variance64x32_ssse3,
- &vpx_masked_sub_pixel_variance64x32_c),
- make_tuple(&vpx_masked_sub_pixel_variance32x64_ssse3,
- &vpx_masked_sub_pixel_variance32x64_c),
- make_tuple(&vpx_masked_sub_pixel_variance32x32_ssse3,
- &vpx_masked_sub_pixel_variance32x32_c),
- make_tuple(&vpx_masked_sub_pixel_variance32x16_ssse3,
- &vpx_masked_sub_pixel_variance32x16_c),
- make_tuple(&vpx_masked_sub_pixel_variance16x32_ssse3,
- &vpx_masked_sub_pixel_variance16x32_c),
- make_tuple(&vpx_masked_sub_pixel_variance16x16_ssse3,
- &vpx_masked_sub_pixel_variance16x16_c),
- make_tuple(&vpx_masked_sub_pixel_variance16x8_ssse3,
- &vpx_masked_sub_pixel_variance16x8_c),
- make_tuple(&vpx_masked_sub_pixel_variance8x16_ssse3,
- &vpx_masked_sub_pixel_variance8x16_c),
- make_tuple(&vpx_masked_sub_pixel_variance8x8_ssse3,
- &vpx_masked_sub_pixel_variance8x8_c),
- make_tuple(&vpx_masked_sub_pixel_variance8x4_ssse3,
- &vpx_masked_sub_pixel_variance8x4_c),
- make_tuple(&vpx_masked_sub_pixel_variance4x8_ssse3,
- &vpx_masked_sub_pixel_variance4x8_c),
- make_tuple(&vpx_masked_sub_pixel_variance4x4_ssse3,
- &vpx_masked_sub_pixel_variance4x4_c)));
+ make_tuple(&vpx_masked_sub_pixel_variance64x64_ssse3,
+ &vpx_masked_sub_pixel_variance64x64_c),
+ make_tuple(&vpx_masked_sub_pixel_variance64x32_ssse3,
+ &vpx_masked_sub_pixel_variance64x32_c),
+ make_tuple(&vpx_masked_sub_pixel_variance32x64_ssse3,
+ &vpx_masked_sub_pixel_variance32x64_c),
+ make_tuple(&vpx_masked_sub_pixel_variance32x32_ssse3,
+ &vpx_masked_sub_pixel_variance32x32_c),
+ make_tuple(&vpx_masked_sub_pixel_variance32x16_ssse3,
+ &vpx_masked_sub_pixel_variance32x16_c),
+ make_tuple(&vpx_masked_sub_pixel_variance16x32_ssse3,
+ &vpx_masked_sub_pixel_variance16x32_c),
+ make_tuple(&vpx_masked_sub_pixel_variance16x16_ssse3,
+ &vpx_masked_sub_pixel_variance16x16_c),
+ make_tuple(&vpx_masked_sub_pixel_variance16x8_ssse3,
+ &vpx_masked_sub_pixel_variance16x8_c),
+ make_tuple(&vpx_masked_sub_pixel_variance8x16_ssse3,
+ &vpx_masked_sub_pixel_variance8x16_c),
+ make_tuple(&vpx_masked_sub_pixel_variance8x8_ssse3,
+ &vpx_masked_sub_pixel_variance8x8_c),
+ make_tuple(&vpx_masked_sub_pixel_variance8x4_ssse3,
+ &vpx_masked_sub_pixel_variance8x4_c),
+ make_tuple(&vpx_masked_sub_pixel_variance4x8_ssse3,
+ &vpx_masked_sub_pixel_variance4x8_c),
+ make_tuple(&vpx_masked_sub_pixel_variance4x4_ssse3,
+ &vpx_masked_sub_pixel_variance4x4_c)));
#if CONFIG_VP9_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
- SSSE3_C_COMPARE, HighbdMaskedVarianceTest,
- ::testing::Values(
+ SSSE3_C_COMPARE, HighbdMaskedVarianceTest,
+ ::testing::Values(
#if CONFIG_EXT_PARTITION
- make_tuple(&vpx_highbd_masked_variance128x128_ssse3,
- &vpx_highbd_masked_variance128x128_c, VPX_BITS_8),
- make_tuple(&vpx_highbd_masked_variance128x64_ssse3,
- &vpx_highbd_masked_variance128x64_c, VPX_BITS_8),
- make_tuple(&vpx_highbd_masked_variance64x128_ssse3,
- &vpx_highbd_masked_variance64x128_c, VPX_BITS_8),
+ make_tuple(&vpx_highbd_masked_variance128x128_ssse3,
+ &vpx_highbd_masked_variance128x128_c, VPX_BITS_8),
+ make_tuple(&vpx_highbd_masked_variance128x64_ssse3,
+ &vpx_highbd_masked_variance128x64_c, VPX_BITS_8),
+ make_tuple(&vpx_highbd_masked_variance64x128_ssse3,
+ &vpx_highbd_masked_variance64x128_c, VPX_BITS_8),
#endif // CONFIG_EXT_PARTITION
- make_tuple(&vpx_highbd_masked_variance64x64_ssse3,
- &vpx_highbd_masked_variance64x64_c, VPX_BITS_8),
- make_tuple(&vpx_highbd_masked_variance64x32_ssse3,
- &vpx_highbd_masked_variance64x32_c, VPX_BITS_8),
- make_tuple(&vpx_highbd_masked_variance32x64_ssse3,
- &vpx_highbd_masked_variance32x64_c, VPX_BITS_8),
- make_tuple(&vpx_highbd_masked_variance32x32_ssse3,
- &vpx_highbd_masked_variance32x32_c, VPX_BITS_8),
- make_tuple(&vpx_highbd_masked_variance32x16_ssse3,
- &vpx_highbd_masked_variance32x16_c, VPX_BITS_8),
- make_tuple(&vpx_highbd_masked_variance16x32_ssse3,
- &vpx_highbd_masked_variance16x32_c, VPX_BITS_8),
- make_tuple(&vpx_highbd_masked_variance16x16_ssse3,
- &vpx_highbd_masked_variance16x16_c, VPX_BITS_8),
- make_tuple(&vpx_highbd_masked_variance16x8_ssse3,
- &vpx_highbd_masked_variance16x8_c, VPX_BITS_8),
- make_tuple(&vpx_highbd_masked_variance8x16_ssse3,
- &vpx_highbd_masked_variance8x16_c, VPX_BITS_8),
- make_tuple(&vpx_highbd_masked_variance8x8_ssse3,
- &vpx_highbd_masked_variance8x8_c, VPX_BITS_8),
- make_tuple(&vpx_highbd_masked_variance8x4_ssse3,
- &vpx_highbd_masked_variance8x4_c, VPX_BITS_8),
- make_tuple(&vpx_highbd_masked_variance4x8_ssse3,
- &vpx_highbd_masked_variance4x8_c, VPX_BITS_8),
- make_tuple(&vpx_highbd_masked_variance4x4_ssse3,
- &vpx_highbd_masked_variance4x4_c, VPX_BITS_8),
+ make_tuple(&vpx_highbd_masked_variance64x64_ssse3,
+ &vpx_highbd_masked_variance64x64_c, VPX_BITS_8),
+ make_tuple(&vpx_highbd_masked_variance64x32_ssse3,
+ &vpx_highbd_masked_variance64x32_c, VPX_BITS_8),
+ make_tuple(&vpx_highbd_masked_variance32x64_ssse3,
+ &vpx_highbd_masked_variance32x64_c, VPX_BITS_8),
+ make_tuple(&vpx_highbd_masked_variance32x32_ssse3,
+ &vpx_highbd_masked_variance32x32_c, VPX_BITS_8),
+ make_tuple(&vpx_highbd_masked_variance32x16_ssse3,
+ &vpx_highbd_masked_variance32x16_c, VPX_BITS_8),
+ make_tuple(&vpx_highbd_masked_variance16x32_ssse3,
+ &vpx_highbd_masked_variance16x32_c, VPX_BITS_8),
+ make_tuple(&vpx_highbd_masked_variance16x16_ssse3,
+ &vpx_highbd_masked_variance16x16_c, VPX_BITS_8),
+ make_tuple(&vpx_highbd_masked_variance16x8_ssse3,
+ &vpx_highbd_masked_variance16x8_c, VPX_BITS_8),
+ make_tuple(&vpx_highbd_masked_variance8x16_ssse3,
+ &vpx_highbd_masked_variance8x16_c, VPX_BITS_8),
+ make_tuple(&vpx_highbd_masked_variance8x8_ssse3,
+ &vpx_highbd_masked_variance8x8_c, VPX_BITS_8),
+ make_tuple(&vpx_highbd_masked_variance8x4_ssse3,
+ &vpx_highbd_masked_variance8x4_c, VPX_BITS_8),
+ make_tuple(&vpx_highbd_masked_variance4x8_ssse3,
+ &vpx_highbd_masked_variance4x8_c, VPX_BITS_8),
+ make_tuple(&vpx_highbd_masked_variance4x4_ssse3,
+ &vpx_highbd_masked_variance4x4_c, VPX_BITS_8),
#if CONFIG_EXT_PARTITION
- make_tuple(&vpx_highbd_10_masked_variance128x128_ssse3,
- &vpx_highbd_10_masked_variance128x128_c, VPX_BITS_10),
- make_tuple(&vpx_highbd_10_masked_variance128x64_ssse3,
- &vpx_highbd_10_masked_variance128x64_c, VPX_BITS_10),
- make_tuple(&vpx_highbd_10_masked_variance64x128_ssse3,
- &vpx_highbd_10_masked_variance64x128_c, VPX_BITS_10),
+ make_tuple(&vpx_highbd_10_masked_variance128x128_ssse3,
+ &vpx_highbd_10_masked_variance128x128_c, VPX_BITS_10),
+ make_tuple(&vpx_highbd_10_masked_variance128x64_ssse3,
+ &vpx_highbd_10_masked_variance128x64_c, VPX_BITS_10),
+ make_tuple(&vpx_highbd_10_masked_variance64x128_ssse3,
+ &vpx_highbd_10_masked_variance64x128_c, VPX_BITS_10),
#endif // CONFIG_EXT_PARTITION
- make_tuple(&vpx_highbd_10_masked_variance64x64_ssse3,
- &vpx_highbd_10_masked_variance64x64_c, VPX_BITS_10),
- make_tuple(&vpx_highbd_10_masked_variance64x32_ssse3,
- &vpx_highbd_10_masked_variance64x32_c, VPX_BITS_10),
- make_tuple(&vpx_highbd_10_masked_variance32x64_ssse3,
- &vpx_highbd_10_masked_variance32x64_c, VPX_BITS_10),
- make_tuple(&vpx_highbd_10_masked_variance32x32_ssse3,
- &vpx_highbd_10_masked_variance32x32_c, VPX_BITS_10),
- make_tuple(&vpx_highbd_10_masked_variance32x16_ssse3,
- &vpx_highbd_10_masked_variance32x16_c, VPX_BITS_10),
- make_tuple(&vpx_highbd_10_masked_variance16x32_ssse3,
- &vpx_highbd_10_masked_variance16x32_c, VPX_BITS_10),
- make_tuple(&vpx_highbd_10_masked_variance16x16_ssse3,
- &vpx_highbd_10_masked_variance16x16_c, VPX_BITS_10),
- make_tuple(&vpx_highbd_10_masked_variance16x8_ssse3,
- &vpx_highbd_10_masked_variance16x8_c, VPX_BITS_10),
- make_tuple(&vpx_highbd_10_masked_variance8x16_ssse3,
- &vpx_highbd_10_masked_variance8x16_c, VPX_BITS_10),
- make_tuple(&vpx_highbd_10_masked_variance8x8_ssse3,
- &vpx_highbd_10_masked_variance8x8_c, VPX_BITS_10),
- make_tuple(&vpx_highbd_10_masked_variance8x4_ssse3,
- &vpx_highbd_10_masked_variance8x4_c, VPX_BITS_10),
- make_tuple(&vpx_highbd_10_masked_variance4x8_ssse3,
- &vpx_highbd_10_masked_variance4x8_c, VPX_BITS_10),
- make_tuple(&vpx_highbd_10_masked_variance4x4_ssse3,
- &vpx_highbd_10_masked_variance4x4_c, VPX_BITS_10),
+ make_tuple(&vpx_highbd_10_masked_variance64x64_ssse3,
+ &vpx_highbd_10_masked_variance64x64_c, VPX_BITS_10),
+ make_tuple(&vpx_highbd_10_masked_variance64x32_ssse3,
+ &vpx_highbd_10_masked_variance64x32_c, VPX_BITS_10),
+ make_tuple(&vpx_highbd_10_masked_variance32x64_ssse3,
+ &vpx_highbd_10_masked_variance32x64_c, VPX_BITS_10),
+ make_tuple(&vpx_highbd_10_masked_variance32x32_ssse3,
+ &vpx_highbd_10_masked_variance32x32_c, VPX_BITS_10),
+ make_tuple(&vpx_highbd_10_masked_variance32x16_ssse3,
+ &vpx_highbd_10_masked_variance32x16_c, VPX_BITS_10),
+ make_tuple(&vpx_highbd_10_masked_variance16x32_ssse3,
+ &vpx_highbd_10_masked_variance16x32_c, VPX_BITS_10),
+ make_tuple(&vpx_highbd_10_masked_variance16x16_ssse3,
+ &vpx_highbd_10_masked_variance16x16_c, VPX_BITS_10),
+ make_tuple(&vpx_highbd_10_masked_variance16x8_ssse3,
+ &vpx_highbd_10_masked_variance16x8_c, VPX_BITS_10),
+ make_tuple(&vpx_highbd_10_masked_variance8x16_ssse3,
+ &vpx_highbd_10_masked_variance8x16_c, VPX_BITS_10),
+ make_tuple(&vpx_highbd_10_masked_variance8x8_ssse3,
+ &vpx_highbd_10_masked_variance8x8_c, VPX_BITS_10),
+ make_tuple(&vpx_highbd_10_masked_variance8x4_ssse3,
+ &vpx_highbd_10_masked_variance8x4_c, VPX_BITS_10),
+ make_tuple(&vpx_highbd_10_masked_variance4x8_ssse3,
+ &vpx_highbd_10_masked_variance4x8_c, VPX_BITS_10),
+ make_tuple(&vpx_highbd_10_masked_variance4x4_ssse3,
+ &vpx_highbd_10_masked_variance4x4_c, VPX_BITS_10),
#if CONFIG_EXT_PARTITION
- make_tuple(&vpx_highbd_12_masked_variance128x128_ssse3,
- &vpx_highbd_12_masked_variance128x128_c, VPX_BITS_12),
- make_tuple(&vpx_highbd_12_masked_variance128x64_ssse3,
- &vpx_highbd_12_masked_variance128x64_c, VPX_BITS_12),
- make_tuple(&vpx_highbd_12_masked_variance64x128_ssse3,
- &vpx_highbd_12_masked_variance64x128_c, VPX_BITS_12),
+ make_tuple(&vpx_highbd_12_masked_variance128x128_ssse3,
+ &vpx_highbd_12_masked_variance128x128_c, VPX_BITS_12),
+ make_tuple(&vpx_highbd_12_masked_variance128x64_ssse3,
+ &vpx_highbd_12_masked_variance128x64_c, VPX_BITS_12),
+ make_tuple(&vpx_highbd_12_masked_variance64x128_ssse3,
+ &vpx_highbd_12_masked_variance64x128_c, VPX_BITS_12),
#endif // CONFIG_EXT_PARTITION
- make_tuple(&vpx_highbd_12_masked_variance64x64_ssse3,
- &vpx_highbd_12_masked_variance64x64_c, VPX_BITS_12),
- make_tuple(&vpx_highbd_12_masked_variance64x32_ssse3,
- &vpx_highbd_12_masked_variance64x32_c, VPX_BITS_12),
- make_tuple(&vpx_highbd_12_masked_variance32x64_ssse3,
- &vpx_highbd_12_masked_variance32x64_c, VPX_BITS_12),
- make_tuple(&vpx_highbd_12_masked_variance32x32_ssse3,
- &vpx_highbd_12_masked_variance32x32_c, VPX_BITS_12),
- make_tuple(&vpx_highbd_12_masked_variance32x16_ssse3,
- &vpx_highbd_12_masked_variance32x16_c, VPX_BITS_12),
- make_tuple(&vpx_highbd_12_masked_variance16x32_ssse3,
- &vpx_highbd_12_masked_variance16x32_c, VPX_BITS_12),
- make_tuple(&vpx_highbd_12_masked_variance16x16_ssse3,
- &vpx_highbd_12_masked_variance16x16_c, VPX_BITS_12),
- make_tuple(&vpx_highbd_12_masked_variance16x8_ssse3,
- &vpx_highbd_12_masked_variance16x8_c, VPX_BITS_12),
- make_tuple(&vpx_highbd_12_masked_variance8x16_ssse3,
- &vpx_highbd_12_masked_variance8x16_c, VPX_BITS_12),
- make_tuple(&vpx_highbd_12_masked_variance8x8_ssse3,
- &vpx_highbd_12_masked_variance8x8_c, VPX_BITS_12),
- make_tuple(&vpx_highbd_12_masked_variance8x4_ssse3,
- &vpx_highbd_12_masked_variance8x4_c, VPX_BITS_12),
- make_tuple(&vpx_highbd_12_masked_variance4x8_ssse3,
- &vpx_highbd_12_masked_variance4x8_c, VPX_BITS_12),
- make_tuple(&vpx_highbd_12_masked_variance4x4_ssse3,
- &vpx_highbd_12_masked_variance4x4_c, VPX_BITS_12)));
+ make_tuple(&vpx_highbd_12_masked_variance64x64_ssse3,
+ &vpx_highbd_12_masked_variance64x64_c, VPX_BITS_12),
+ make_tuple(&vpx_highbd_12_masked_variance64x32_ssse3,
+ &vpx_highbd_12_masked_variance64x32_c, VPX_BITS_12),
+ make_tuple(&vpx_highbd_12_masked_variance32x64_ssse3,
+ &vpx_highbd_12_masked_variance32x64_c, VPX_BITS_12),
+ make_tuple(&vpx_highbd_12_masked_variance32x32_ssse3,
+ &vpx_highbd_12_masked_variance32x32_c, VPX_BITS_12),
+ make_tuple(&vpx_highbd_12_masked_variance32x16_ssse3,
+ &vpx_highbd_12_masked_variance32x16_c, VPX_BITS_12),
+ make_tuple(&vpx_highbd_12_masked_variance16x32_ssse3,
+ &vpx_highbd_12_masked_variance16x32_c, VPX_BITS_12),
+ make_tuple(&vpx_highbd_12_masked_variance16x16_ssse3,
+ &vpx_highbd_12_masked_variance16x16_c, VPX_BITS_12),
+ make_tuple(&vpx_highbd_12_masked_variance16x8_ssse3,
+ &vpx_highbd_12_masked_variance16x8_c, VPX_BITS_12),
+ make_tuple(&vpx_highbd_12_masked_variance8x16_ssse3,
+ &vpx_highbd_12_masked_variance8x16_c, VPX_BITS_12),
+ make_tuple(&vpx_highbd_12_masked_variance8x8_ssse3,
+ &vpx_highbd_12_masked_variance8x8_c, VPX_BITS_12),
+ make_tuple(&vpx_highbd_12_masked_variance8x4_ssse3,
+ &vpx_highbd_12_masked_variance8x4_c, VPX_BITS_12),
+ make_tuple(&vpx_highbd_12_masked_variance4x8_ssse3,
+ &vpx_highbd_12_masked_variance4x8_c, VPX_BITS_12),
+ make_tuple(&vpx_highbd_12_masked_variance4x4_ssse3,
+ &vpx_highbd_12_masked_variance4x4_c, VPX_BITS_12)));
INSTANTIATE_TEST_CASE_P(
- SSSE3_C_COMPARE, HighbdMaskedSubPixelVarianceTest,
- ::testing::Values(
+ SSSE3_C_COMPARE, HighbdMaskedSubPixelVarianceTest,
+ ::testing::Values(
#if CONFIG_EXT_PARTITION
- make_tuple(&vpx_highbd_masked_sub_pixel_variance128x128_ssse3,
- &vpx_highbd_masked_sub_pixel_variance128x128_c, VPX_BITS_8),
- make_tuple(&vpx_highbd_masked_sub_pixel_variance128x64_ssse3,
- &vpx_highbd_masked_sub_pixel_variance128x64_c, VPX_BITS_8),
- make_tuple(&vpx_highbd_masked_sub_pixel_variance64x128_ssse3,
- &vpx_highbd_masked_sub_pixel_variance64x128_c, VPX_BITS_8),
+ make_tuple(&vpx_highbd_masked_sub_pixel_variance128x128_ssse3,
+ &vpx_highbd_masked_sub_pixel_variance128x128_c, VPX_BITS_8),
+ make_tuple(&vpx_highbd_masked_sub_pixel_variance128x64_ssse3,
+ &vpx_highbd_masked_sub_pixel_variance128x64_c, VPX_BITS_8),
+ make_tuple(&vpx_highbd_masked_sub_pixel_variance64x128_ssse3,
+ &vpx_highbd_masked_sub_pixel_variance64x128_c, VPX_BITS_8),
#endif // CONFIG_EXT_PARTITION
- make_tuple(&vpx_highbd_masked_sub_pixel_variance64x64_ssse3,
- &vpx_highbd_masked_sub_pixel_variance64x64_c, VPX_BITS_8),
- make_tuple(&vpx_highbd_masked_sub_pixel_variance64x32_ssse3,
- &vpx_highbd_masked_sub_pixel_variance64x32_c, VPX_BITS_8),
- make_tuple(&vpx_highbd_masked_sub_pixel_variance32x64_ssse3,
- &vpx_highbd_masked_sub_pixel_variance32x64_c, VPX_BITS_8),
- make_tuple(&vpx_highbd_masked_sub_pixel_variance32x32_ssse3,
- &vpx_highbd_masked_sub_pixel_variance32x32_c, VPX_BITS_8),
- make_tuple(&vpx_highbd_masked_sub_pixel_variance32x16_ssse3,
- &vpx_highbd_masked_sub_pixel_variance32x16_c, VPX_BITS_8),
- make_tuple(&vpx_highbd_masked_sub_pixel_variance16x32_ssse3,
- &vpx_highbd_masked_sub_pixel_variance16x32_c, VPX_BITS_8),
- make_tuple(&vpx_highbd_masked_sub_pixel_variance16x16_ssse3,
- &vpx_highbd_masked_sub_pixel_variance16x16_c, VPX_BITS_8),
- make_tuple(&vpx_highbd_masked_sub_pixel_variance16x8_ssse3,
- &vpx_highbd_masked_sub_pixel_variance16x8_c, VPX_BITS_8),
- make_tuple(&vpx_highbd_masked_sub_pixel_variance8x16_ssse3,
- &vpx_highbd_masked_sub_pixel_variance8x16_c, VPX_BITS_8),
- make_tuple(&vpx_highbd_masked_sub_pixel_variance8x8_ssse3,
- &vpx_highbd_masked_sub_pixel_variance8x8_c, VPX_BITS_8),
- make_tuple(&vpx_highbd_masked_sub_pixel_variance8x4_ssse3,
- &vpx_highbd_masked_sub_pixel_variance8x4_c, VPX_BITS_8),
- make_tuple(&vpx_highbd_masked_sub_pixel_variance4x8_ssse3,
- &vpx_highbd_masked_sub_pixel_variance4x8_c, VPX_BITS_8),
- make_tuple(&vpx_highbd_masked_sub_pixel_variance4x4_ssse3,
- &vpx_highbd_masked_sub_pixel_variance4x4_c, VPX_BITS_8),
+ make_tuple(&vpx_highbd_masked_sub_pixel_variance64x64_ssse3,
+ &vpx_highbd_masked_sub_pixel_variance64x64_c, VPX_BITS_8),
+ make_tuple(&vpx_highbd_masked_sub_pixel_variance64x32_ssse3,
+ &vpx_highbd_masked_sub_pixel_variance64x32_c, VPX_BITS_8),
+ make_tuple(&vpx_highbd_masked_sub_pixel_variance32x64_ssse3,
+ &vpx_highbd_masked_sub_pixel_variance32x64_c, VPX_BITS_8),
+ make_tuple(&vpx_highbd_masked_sub_pixel_variance32x32_ssse3,
+ &vpx_highbd_masked_sub_pixel_variance32x32_c, VPX_BITS_8),
+ make_tuple(&vpx_highbd_masked_sub_pixel_variance32x16_ssse3,
+ &vpx_highbd_masked_sub_pixel_variance32x16_c, VPX_BITS_8),
+ make_tuple(&vpx_highbd_masked_sub_pixel_variance16x32_ssse3,
+ &vpx_highbd_masked_sub_pixel_variance16x32_c, VPX_BITS_8),
+ make_tuple(&vpx_highbd_masked_sub_pixel_variance16x16_ssse3,
+ &vpx_highbd_masked_sub_pixel_variance16x16_c, VPX_BITS_8),
+ make_tuple(&vpx_highbd_masked_sub_pixel_variance16x8_ssse3,
+ &vpx_highbd_masked_sub_pixel_variance16x8_c, VPX_BITS_8),
+ make_tuple(&vpx_highbd_masked_sub_pixel_variance8x16_ssse3,
+ &vpx_highbd_masked_sub_pixel_variance8x16_c, VPX_BITS_8),
+ make_tuple(&vpx_highbd_masked_sub_pixel_variance8x8_ssse3,
+ &vpx_highbd_masked_sub_pixel_variance8x8_c, VPX_BITS_8),
+ make_tuple(&vpx_highbd_masked_sub_pixel_variance8x4_ssse3,
+ &vpx_highbd_masked_sub_pixel_variance8x4_c, VPX_BITS_8),
+ make_tuple(&vpx_highbd_masked_sub_pixel_variance4x8_ssse3,
+ &vpx_highbd_masked_sub_pixel_variance4x8_c, VPX_BITS_8),
+ make_tuple(&vpx_highbd_masked_sub_pixel_variance4x4_ssse3,
+ &vpx_highbd_masked_sub_pixel_variance4x4_c, VPX_BITS_8),
#if CONFIG_EXT_PARTITION
- make_tuple(&vpx_highbd_10_masked_sub_pixel_variance128x128_ssse3,
- &vpx_highbd_10_masked_sub_pixel_variance128x128_c, VPX_BITS_10),
- make_tuple(&vpx_highbd_10_masked_sub_pixel_variance128x64_ssse3,
- &vpx_highbd_10_masked_sub_pixel_variance128x64_c, VPX_BITS_10),
- make_tuple(&vpx_highbd_10_masked_sub_pixel_variance64x128_ssse3,
- &vpx_highbd_10_masked_sub_pixel_variance64x128_c, VPX_BITS_10),
+ make_tuple(&vpx_highbd_10_masked_sub_pixel_variance128x128_ssse3,
+ &vpx_highbd_10_masked_sub_pixel_variance128x128_c,
+ VPX_BITS_10),
+ make_tuple(&vpx_highbd_10_masked_sub_pixel_variance128x64_ssse3,
+ &vpx_highbd_10_masked_sub_pixel_variance128x64_c,
+ VPX_BITS_10),
+ make_tuple(&vpx_highbd_10_masked_sub_pixel_variance64x128_ssse3,
+ &vpx_highbd_10_masked_sub_pixel_variance64x128_c,
+ VPX_BITS_10),
#endif // CONFIG_EXT_PARTITION
- make_tuple(&vpx_highbd_10_masked_sub_pixel_variance64x64_ssse3,
- &vpx_highbd_10_masked_sub_pixel_variance64x64_c, VPX_BITS_10),
- make_tuple(&vpx_highbd_10_masked_sub_pixel_variance64x32_ssse3,
- &vpx_highbd_10_masked_sub_pixel_variance64x32_c, VPX_BITS_10),
- make_tuple(&vpx_highbd_10_masked_sub_pixel_variance32x64_ssse3,
- &vpx_highbd_10_masked_sub_pixel_variance32x64_c, VPX_BITS_10),
- make_tuple(&vpx_highbd_10_masked_sub_pixel_variance32x32_ssse3,
- &vpx_highbd_10_masked_sub_pixel_variance32x32_c, VPX_BITS_10),
- make_tuple(&vpx_highbd_10_masked_sub_pixel_variance32x16_ssse3,
- &vpx_highbd_10_masked_sub_pixel_variance32x16_c, VPX_BITS_10),
- make_tuple(&vpx_highbd_10_masked_sub_pixel_variance16x32_ssse3,
- &vpx_highbd_10_masked_sub_pixel_variance16x32_c, VPX_BITS_10),
- make_tuple(&vpx_highbd_10_masked_sub_pixel_variance16x16_ssse3,
- &vpx_highbd_10_masked_sub_pixel_variance16x16_c, VPX_BITS_10),
- make_tuple(&vpx_highbd_10_masked_sub_pixel_variance16x8_ssse3,
- &vpx_highbd_10_masked_sub_pixel_variance16x8_c, VPX_BITS_10),
- make_tuple(&vpx_highbd_10_masked_sub_pixel_variance8x16_ssse3,
- &vpx_highbd_10_masked_sub_pixel_variance8x16_c, VPX_BITS_10),
- make_tuple(&vpx_highbd_10_masked_sub_pixel_variance8x8_ssse3,
- &vpx_highbd_10_masked_sub_pixel_variance8x8_c, VPX_BITS_10),
- make_tuple(&vpx_highbd_10_masked_sub_pixel_variance8x4_ssse3,
- &vpx_highbd_10_masked_sub_pixel_variance8x4_c, VPX_BITS_10),
- make_tuple(&vpx_highbd_10_masked_sub_pixel_variance4x8_ssse3,
- &vpx_highbd_10_masked_sub_pixel_variance4x8_c, VPX_BITS_10),
- make_tuple(&vpx_highbd_10_masked_sub_pixel_variance4x4_ssse3,
- &vpx_highbd_10_masked_sub_pixel_variance4x4_c, VPX_BITS_10),
+ make_tuple(&vpx_highbd_10_masked_sub_pixel_variance64x64_ssse3,
+ &vpx_highbd_10_masked_sub_pixel_variance64x64_c,
+ VPX_BITS_10),
+ make_tuple(&vpx_highbd_10_masked_sub_pixel_variance64x32_ssse3,
+ &vpx_highbd_10_masked_sub_pixel_variance64x32_c,
+ VPX_BITS_10),
+ make_tuple(&vpx_highbd_10_masked_sub_pixel_variance32x64_ssse3,
+ &vpx_highbd_10_masked_sub_pixel_variance32x64_c,
+ VPX_BITS_10),
+ make_tuple(&vpx_highbd_10_masked_sub_pixel_variance32x32_ssse3,
+ &vpx_highbd_10_masked_sub_pixel_variance32x32_c,
+ VPX_BITS_10),
+ make_tuple(&vpx_highbd_10_masked_sub_pixel_variance32x16_ssse3,
+ &vpx_highbd_10_masked_sub_pixel_variance32x16_c,
+ VPX_BITS_10),
+ make_tuple(&vpx_highbd_10_masked_sub_pixel_variance16x32_ssse3,
+ &vpx_highbd_10_masked_sub_pixel_variance16x32_c,
+ VPX_BITS_10),
+ make_tuple(&vpx_highbd_10_masked_sub_pixel_variance16x16_ssse3,
+ &vpx_highbd_10_masked_sub_pixel_variance16x16_c,
+ VPX_BITS_10),
+ make_tuple(&vpx_highbd_10_masked_sub_pixel_variance16x8_ssse3,
+ &vpx_highbd_10_masked_sub_pixel_variance16x8_c, VPX_BITS_10),
+ make_tuple(&vpx_highbd_10_masked_sub_pixel_variance8x16_ssse3,
+ &vpx_highbd_10_masked_sub_pixel_variance8x16_c, VPX_BITS_10),
+ make_tuple(&vpx_highbd_10_masked_sub_pixel_variance8x8_ssse3,
+ &vpx_highbd_10_masked_sub_pixel_variance8x8_c, VPX_BITS_10),
+ make_tuple(&vpx_highbd_10_masked_sub_pixel_variance8x4_ssse3,
+ &vpx_highbd_10_masked_sub_pixel_variance8x4_c, VPX_BITS_10),
+ make_tuple(&vpx_highbd_10_masked_sub_pixel_variance4x8_ssse3,
+ &vpx_highbd_10_masked_sub_pixel_variance4x8_c, VPX_BITS_10),
+ make_tuple(&vpx_highbd_10_masked_sub_pixel_variance4x4_ssse3,
+ &vpx_highbd_10_masked_sub_pixel_variance4x4_c, VPX_BITS_10),
#if CONFIG_EXT_PARTITION
- make_tuple(&vpx_highbd_12_masked_sub_pixel_variance128x128_ssse3,
- &vpx_highbd_12_masked_sub_pixel_variance128x128_c, VPX_BITS_12),
- make_tuple(&vpx_highbd_12_masked_sub_pixel_variance128x64_ssse3,
- &vpx_highbd_12_masked_sub_pixel_variance128x64_c, VPX_BITS_12),
- make_tuple(&vpx_highbd_12_masked_sub_pixel_variance64x128_ssse3,
- &vpx_highbd_12_masked_sub_pixel_variance64x128_c, VPX_BITS_12),
+ make_tuple(&vpx_highbd_12_masked_sub_pixel_variance128x128_ssse3,
+ &vpx_highbd_12_masked_sub_pixel_variance128x128_c,
+ VPX_BITS_12),
+ make_tuple(&vpx_highbd_12_masked_sub_pixel_variance128x64_ssse3,
+ &vpx_highbd_12_masked_sub_pixel_variance128x64_c,
+ VPX_BITS_12),
+ make_tuple(&vpx_highbd_12_masked_sub_pixel_variance64x128_ssse3,
+ &vpx_highbd_12_masked_sub_pixel_variance64x128_c,
+ VPX_BITS_12),
#endif // CONFIG_EXT_PARTITION
- make_tuple(&vpx_highbd_12_masked_sub_pixel_variance64x64_ssse3,
- &vpx_highbd_12_masked_sub_pixel_variance64x64_c, VPX_BITS_12),
- make_tuple(&vpx_highbd_12_masked_sub_pixel_variance64x32_ssse3,
- &vpx_highbd_12_masked_sub_pixel_variance64x32_c, VPX_BITS_12),
- make_tuple(&vpx_highbd_12_masked_sub_pixel_variance32x64_ssse3,
- &vpx_highbd_12_masked_sub_pixel_variance32x64_c, VPX_BITS_12),
- make_tuple(&vpx_highbd_12_masked_sub_pixel_variance32x32_ssse3,
- &vpx_highbd_12_masked_sub_pixel_variance32x32_c, VPX_BITS_12),
- make_tuple(&vpx_highbd_12_masked_sub_pixel_variance32x16_ssse3,
- &vpx_highbd_12_masked_sub_pixel_variance32x16_c, VPX_BITS_12),
- make_tuple(&vpx_highbd_12_masked_sub_pixel_variance16x32_ssse3,
- &vpx_highbd_12_masked_sub_pixel_variance16x32_c, VPX_BITS_12),
- make_tuple(&vpx_highbd_12_masked_sub_pixel_variance16x16_ssse3,
- &vpx_highbd_12_masked_sub_pixel_variance16x16_c, VPX_BITS_12),
- make_tuple(&vpx_highbd_12_masked_sub_pixel_variance16x8_ssse3,
- &vpx_highbd_12_masked_sub_pixel_variance16x8_c, VPX_BITS_12),
- make_tuple(&vpx_highbd_12_masked_sub_pixel_variance8x16_ssse3,
- &vpx_highbd_12_masked_sub_pixel_variance8x16_c, VPX_BITS_12),
- make_tuple(&vpx_highbd_12_masked_sub_pixel_variance8x8_ssse3,
- &vpx_highbd_12_masked_sub_pixel_variance8x8_c, VPX_BITS_12),
- make_tuple(&vpx_highbd_12_masked_sub_pixel_variance8x4_ssse3,
- &vpx_highbd_12_masked_sub_pixel_variance8x4_c, VPX_BITS_12) ,
- make_tuple(&vpx_highbd_12_masked_sub_pixel_variance4x8_ssse3,
- &vpx_highbd_12_masked_sub_pixel_variance4x8_c, VPX_BITS_12),
- make_tuple(&vpx_highbd_12_masked_sub_pixel_variance4x4_ssse3,
- &vpx_highbd_12_masked_sub_pixel_variance4x4_c, VPX_BITS_12)));
+ make_tuple(&vpx_highbd_12_masked_sub_pixel_variance64x64_ssse3,
+ &vpx_highbd_12_masked_sub_pixel_variance64x64_c,
+ VPX_BITS_12),
+ make_tuple(&vpx_highbd_12_masked_sub_pixel_variance64x32_ssse3,
+ &vpx_highbd_12_masked_sub_pixel_variance64x32_c,
+ VPX_BITS_12),
+ make_tuple(&vpx_highbd_12_masked_sub_pixel_variance32x64_ssse3,
+ &vpx_highbd_12_masked_sub_pixel_variance32x64_c,
+ VPX_BITS_12),
+ make_tuple(&vpx_highbd_12_masked_sub_pixel_variance32x32_ssse3,
+ &vpx_highbd_12_masked_sub_pixel_variance32x32_c,
+ VPX_BITS_12),
+ make_tuple(&vpx_highbd_12_masked_sub_pixel_variance32x16_ssse3,
+ &vpx_highbd_12_masked_sub_pixel_variance32x16_c,
+ VPX_BITS_12),
+ make_tuple(&vpx_highbd_12_masked_sub_pixel_variance16x32_ssse3,
+ &vpx_highbd_12_masked_sub_pixel_variance16x32_c,
+ VPX_BITS_12),
+ make_tuple(&vpx_highbd_12_masked_sub_pixel_variance16x16_ssse3,
+ &vpx_highbd_12_masked_sub_pixel_variance16x16_c,
+ VPX_BITS_12),
+ make_tuple(&vpx_highbd_12_masked_sub_pixel_variance16x8_ssse3,
+ &vpx_highbd_12_masked_sub_pixel_variance16x8_c, VPX_BITS_12),
+ make_tuple(&vpx_highbd_12_masked_sub_pixel_variance8x16_ssse3,
+ &vpx_highbd_12_masked_sub_pixel_variance8x16_c, VPX_BITS_12),
+ make_tuple(&vpx_highbd_12_masked_sub_pixel_variance8x8_ssse3,
+ &vpx_highbd_12_masked_sub_pixel_variance8x8_c, VPX_BITS_12),
+ make_tuple(&vpx_highbd_12_masked_sub_pixel_variance8x4_ssse3,
+ &vpx_highbd_12_masked_sub_pixel_variance8x4_c, VPX_BITS_12),
+ make_tuple(&vpx_highbd_12_masked_sub_pixel_variance4x8_ssse3,
+ &vpx_highbd_12_masked_sub_pixel_variance4x8_c, VPX_BITS_12),
+ make_tuple(&vpx_highbd_12_masked_sub_pixel_variance4x4_ssse3,
+ &vpx_highbd_12_masked_sub_pixel_variance4x4_c,
+ VPX_BITS_12)));
#endif // CONFIG_VP9_HIGHBITDEPTH
#endif // HAVE_SSSE3
diff --git a/test/md5_helper.h b/test/md5_helper.h
index 742cf0b7..ef310a2 100644
--- a/test/md5_helper.h
+++ b/test/md5_helper.h
@@ -17,9 +17,7 @@
namespace libvpx_test {
class MD5 {
public:
- MD5() {
- MD5Init(&md5_);
- }
+ MD5() { MD5Init(&md5_); }
void Add(const vpx_image_t *img) {
for (int plane = 0; plane < 3; ++plane) {
@@ -30,10 +28,13 @@
// This works only for chroma_shift of 0 and 1.
const int bytes_per_sample =
(img->fmt & VPX_IMG_FMT_HIGHBITDEPTH) ? 2 : 1;
- const int h = plane ? (img->d_h + img->y_chroma_shift) >>
- img->y_chroma_shift : img->d_h;
- const int w = (plane ? (img->d_w + img->x_chroma_shift) >>
- img->x_chroma_shift : img->d_w) * bytes_per_sample;
+ const int h =
+ plane ? (img->d_h + img->y_chroma_shift) >> img->y_chroma_shift
+ : img->d_h;
+ const int w =
+ (plane ? (img->d_w + img->x_chroma_shift) >> img->x_chroma_shift
+ : img->d_w) *
+ bytes_per_sample;
for (int y = 0; y < h; ++y) {
MD5Update(&md5_, buf, w);
@@ -56,8 +57,8 @@
MD5Final(tmp, &ctx_tmp);
for (int i = 0; i < 16; i++) {
- res_[i * 2 + 0] = hex[tmp[i] >> 4];
- res_[i * 2 + 1] = hex[tmp[i] & 0xf];
+ res_[i * 2 + 0] = hex[tmp[i] >> 4];
+ res_[i * 2 + 1] = hex[tmp[i] & 0xf];
}
res_[32] = 0;
diff --git a/test/minmax_test.cc b/test/minmax_test.cc
index dbe4342..e51c9fd 100644
--- a/test/minmax_test.cc
+++ b/test/minmax_test.cc
@@ -23,9 +23,8 @@
using ::libvpx_test::ACMRandom;
-typedef void (*MinMaxFunc)(const uint8_t *a, int a_stride,
- const uint8_t *b, int b_stride,
- int *min, int *max);
+typedef void (*MinMaxFunc)(const uint8_t *a, int a_stride, const uint8_t *b,
+ int b_stride, int *min, int *max);
class MinMaxTest : public ::testing::TestWithParam<MinMaxFunc> {
public:
@@ -39,9 +38,8 @@
ACMRandom rnd_;
};
-void reference_minmax(const uint8_t *a, int a_stride,
- const uint8_t *b, int b_stride,
- int *min_ret, int *max_ret) {
+void reference_minmax(const uint8_t *a, int a_stride, const uint8_t *b,
+ int b_stride, int *min_ret, int *max_ret) {
int min = 255;
int max = 0;
for (int i = 0; i < 8; i++) {
@@ -110,9 +108,9 @@
reference_minmax(a, a_stride, b, b_stride, &min_ref, &max_ref);
ASM_REGISTER_STATE_CHECK(mm_func_(a, a_stride, b, b_stride, &min, &max));
EXPECT_EQ(max_ref, max) << "when a_stride = " << a_stride
- << " and b_stride = " << b_stride;;
+ << " and b_stride = " << b_stride;
EXPECT_EQ(min_ref, min) << "when a_stride = " << a_stride
- << " and b_stride = " << b_stride;;
+ << " and b_stride = " << b_stride;
}
}
}
diff --git a/test/obmc_sad_test.cc b/test/obmc_sad_test.cc
index beb7106..de80373 100644
--- a/test/obmc_sad_test.cc
+++ b/test/obmc_sad_test.cc
@@ -41,10 +41,10 @@
DECLARE_ALIGNED(32, int32_t, wsrc[MAX_SB_SQUARE]);
DECLARE_ALIGNED(32, int32_t, mask[MAX_SB_SQUARE]);
- for (int iter = 0 ; iter < kIterations && !HasFatalFailure() ; ++iter) {
+ for (int iter = 0; iter < kIterations && !HasFatalFailure(); ++iter) {
const int pre_stride = rng_(MAX_SB_SIZE + 1);
- for (int i = 0 ; i < MAX_SB_SQUARE ; ++i) {
+ for (int i = 0; i < MAX_SB_SQUARE; ++i) {
pre[i] = rng_.Rand8();
wsrc[i] = rng_.Rand8() * rng_(kMaskMax * kMaskMax + 1);
mask[i] = rng_(kMaskMax * kMaskMax + 1);
@@ -53,7 +53,7 @@
const unsigned int ref_res = params_.ref_func(pre, pre_stride, wsrc, mask);
unsigned int tst_res;
ASM_REGISTER_STATE_CHECK(tst_res =
- params_.tst_func(pre, pre_stride, wsrc, mask));
+ params_.tst_func(pre, pre_stride, wsrc, mask));
ASSERT_EQ(ref_res, tst_res);
}
@@ -64,10 +64,10 @@
DECLARE_ALIGNED(32, int32_t, wsrc[MAX_SB_SQUARE]);
DECLARE_ALIGNED(32, int32_t, mask[MAX_SB_SQUARE]);
- for (int iter = 0 ; iter < MAX_SB_SIZE && !HasFatalFailure() ; ++iter) {
+ for (int iter = 0; iter < MAX_SB_SIZE && !HasFatalFailure(); ++iter) {
const int pre_stride = iter;
- for (int i = 0 ; i < MAX_SB_SQUARE ; ++i) {
+ for (int i = 0; i < MAX_SB_SQUARE; ++i) {
pre[i] = UINT8_MAX;
wsrc[i] = UINT8_MAX * kMaskMax * kMaskMax;
mask[i] = kMaskMax * kMaskMax;
@@ -76,7 +76,7 @@
const unsigned int ref_res = params_.ref_func(pre, pre_stride, wsrc, mask);
unsigned int tst_res;
ASM_REGISTER_STATE_CHECK(tst_res =
- params_.tst_func(pre, pre_stride, wsrc, mask));
+ params_.tst_func(pre, pre_stride, wsrc, mask));
ASSERT_EQ(ref_res, tst_res);
}
@@ -120,20 +120,21 @@
DECLARE_ALIGNED(32, int32_t, wsrc[MAX_SB_SQUARE]);
DECLARE_ALIGNED(32, int32_t, mask[MAX_SB_SQUARE]);
- for (int iter = 0 ; iter < kIterations && !HasFatalFailure() ; ++iter) {
+ for (int iter = 0; iter < kIterations && !HasFatalFailure(); ++iter) {
const int pre_stride = rng_(MAX_SB_SIZE + 1);
- for (int i = 0 ; i < MAX_SB_SQUARE ; ++i) {
- pre[i] = rng_(1<<12);
- wsrc[i] = rng_(1<<12) * rng_(kMaskMax * kMaskMax + 1);
+ for (int i = 0; i < MAX_SB_SQUARE; ++i) {
+ pre[i] = rng_(1 << 12);
+ wsrc[i] = rng_(1 << 12) * rng_(kMaskMax * kMaskMax + 1);
mask[i] = rng_(kMaskMax * kMaskMax + 1);
}
const unsigned int ref_res =
params_.ref_func(CONVERT_TO_BYTEPTR(pre), pre_stride, wsrc, mask);
unsigned int tst_res;
- ASM_REGISTER_STATE_CHECK(tst_res =
- params_.tst_func(CONVERT_TO_BYTEPTR(pre), pre_stride, wsrc, mask));
+ ASM_REGISTER_STATE_CHECK(
+ tst_res =
+ params_.tst_func(CONVERT_TO_BYTEPTR(pre), pre_stride, wsrc, mask));
ASSERT_EQ(ref_res, tst_res);
}
@@ -144,10 +145,10 @@
DECLARE_ALIGNED(32, int32_t, wsrc[MAX_SB_SQUARE]);
DECLARE_ALIGNED(32, int32_t, mask[MAX_SB_SQUARE]);
- for (int iter = 0 ; iter < MAX_SB_SIZE && !HasFatalFailure() ; ++iter) {
+ for (int iter = 0; iter < MAX_SB_SIZE && !HasFatalFailure(); ++iter) {
const int pre_stride = iter;
- for (int i = 0 ; i < MAX_SB_SQUARE ; ++i) {
+ for (int i = 0; i < MAX_SB_SQUARE; ++i) {
pre[i] = (1 << 12) - 1;
wsrc[i] = ((1 << 12) - 1) * kMaskMax * kMaskMax;
mask[i] = kMaskMax * kMaskMax;
@@ -156,8 +157,9 @@
const unsigned int ref_res =
params_.ref_func(CONVERT_TO_BYTEPTR(pre), pre_stride, wsrc, mask);
unsigned int tst_res;
- ASM_REGISTER_STATE_CHECK(tst_res =
- params_.tst_func(CONVERT_TO_BYTEPTR(pre), pre_stride, wsrc, mask));
+ ASM_REGISTER_STATE_CHECK(
+ tst_res =
+ params_.tst_func(CONVERT_TO_BYTEPTR(pre), pre_stride, wsrc, mask));
ASSERT_EQ(ref_res, tst_res);
}
diff --git a/test/obmc_variance_test.cc b/test/obmc_variance_test.cc
index b7de9ed..2bddcd2 100644
--- a/test/obmc_variance_test.cc
+++ b/test/obmc_variance_test.cc
@@ -44,18 +44,18 @@
DECLARE_ALIGNED(32, int32_t, wsrc[MAX_SB_SQUARE]);
DECLARE_ALIGNED(32, int32_t, mask[MAX_SB_SQUARE]);
- for (int iter = 0 ; iter < kIterations && !HasFatalFailure() ; ++iter) {
+ for (int iter = 0; iter < kIterations && !HasFatalFailure(); ++iter) {
const int pre_stride = this->rng_(MAX_SB_SIZE + 1);
- for (int i = 0 ; i < MAX_SB_SQUARE ; ++i) {
+ for (int i = 0; i < MAX_SB_SQUARE; ++i) {
pre[i] = this->rng_.Rand8();
wsrc[i] = this->rng_.Rand8() * this->rng_(kMaskMax * kMaskMax + 1);
mask[i] = this->rng_(kMaskMax * kMaskMax + 1);
}
unsigned int ref_sse, tst_sse;
- const unsigned int ref_res = params_.ref_func(pre, pre_stride, wsrc, mask,
- &ref_sse);
+ const unsigned int ref_res =
+ params_.ref_func(pre, pre_stride, wsrc, mask, &ref_sse);
unsigned int tst_res;
ASM_REGISTER_STATE_CHECK(
tst_res = params_.tst_func(pre, pre_stride, wsrc, mask, &tst_sse));
@@ -70,18 +70,18 @@
DECLARE_ALIGNED(32, int32_t, wsrc[MAX_SB_SQUARE]);
DECLARE_ALIGNED(32, int32_t, mask[MAX_SB_SQUARE]);
- for (int iter = 0 ; iter < MAX_SB_SIZE && !HasFatalFailure() ; ++iter) {
+ for (int iter = 0; iter < MAX_SB_SIZE && !HasFatalFailure(); ++iter) {
const int pre_stride = iter;
- for (int i = 0 ; i < MAX_SB_SQUARE ; ++i) {
+ for (int i = 0; i < MAX_SB_SQUARE; ++i) {
pre[i] = UINT8_MAX;
wsrc[i] = UINT8_MAX * kMaskMax * kMaskMax;
mask[i] = kMaskMax * kMaskMax;
}
unsigned int ref_sse, tst_sse;
- const unsigned int ref_res = params_.ref_func(pre, pre_stride, wsrc, mask,
- &ref_sse);
+ const unsigned int ref_res =
+ params_.ref_func(pre, pre_stride, wsrc, mask, &ref_sse);
unsigned int tst_res;
ASM_REGISTER_STATE_CHECK(
tst_res = params_.tst_func(pre, pre_stride, wsrc, mask, &tst_sse));
@@ -129,10 +129,10 @@
DECLARE_ALIGNED(32, int32_t, wsrc[MAX_SB_SQUARE]);
DECLARE_ALIGNED(32, int32_t, mask[MAX_SB_SQUARE]);
- for (int iter = 0 ; iter < kIterations && !HasFatalFailure() ; ++iter) {
+ for (int iter = 0; iter < kIterations && !HasFatalFailure(); ++iter) {
const int pre_stride = this->rng_(MAX_SB_SIZE + 1);
- for (int i = 0 ; i < MAX_SB_SQUARE ; ++i) {
+ for (int i = 0; i < MAX_SB_SQUARE; ++i) {
pre[i] = this->rng_(1 << params_.bit_depth);
wsrc[i] = this->rng_(1 << params_.bit_depth) *
this->rng_(kMaskMax * kMaskMax + 1);
@@ -140,13 +140,12 @@
}
unsigned int ref_sse, tst_sse;
- const unsigned int ref_res = params_.ref_func(CONVERT_TO_BYTEPTR(pre),
- pre_stride,
- wsrc, mask, &ref_sse);
+ const unsigned int ref_res = params_.ref_func(
+ CONVERT_TO_BYTEPTR(pre), pre_stride, wsrc, mask, &ref_sse);
unsigned int tst_res;
- ASM_REGISTER_STATE_CHECK(
- tst_res = params_.tst_func(CONVERT_TO_BYTEPTR(pre),
- pre_stride, wsrc, mask, &tst_sse));
+ ASM_REGISTER_STATE_CHECK(tst_res = params_.tst_func(CONVERT_TO_BYTEPTR(pre),
+ pre_stride, wsrc, mask,
+ &tst_sse));
ASSERT_EQ(ref_res, tst_res);
ASSERT_EQ(ref_sse, tst_sse);
@@ -158,23 +157,22 @@
DECLARE_ALIGNED(32, int32_t, wsrc[MAX_SB_SQUARE]);
DECLARE_ALIGNED(32, int32_t, mask[MAX_SB_SQUARE]);
- for (int iter = 0 ; iter < MAX_SB_SIZE && !HasFatalFailure() ; ++iter) {
+ for (int iter = 0; iter < MAX_SB_SIZE && !HasFatalFailure(); ++iter) {
const int pre_stride = iter;
- for (int i = 0 ; i < MAX_SB_SQUARE ; ++i) {
+ for (int i = 0; i < MAX_SB_SQUARE; ++i) {
pre[i] = (1 << params_.bit_depth) - 1;
wsrc[i] = ((1 << params_.bit_depth) - 1) * kMaskMax * kMaskMax;
mask[i] = kMaskMax * kMaskMax;
}
unsigned int ref_sse, tst_sse;
- const unsigned int ref_res = params_.ref_func(CONVERT_TO_BYTEPTR(pre),
- pre_stride,
- wsrc, mask, &ref_sse);
+ const unsigned int ref_res = params_.ref_func(
+ CONVERT_TO_BYTEPTR(pre), pre_stride, wsrc, mask, &ref_sse);
unsigned int tst_res;
- ASM_REGISTER_STATE_CHECK(
- tst_res = params_.tst_func(CONVERT_TO_BYTEPTR(pre), pre_stride,
- wsrc, mask, &tst_sse));
+ ASM_REGISTER_STATE_CHECK(tst_res = params_.tst_func(CONVERT_TO_BYTEPTR(pre),
+ pre_stride, wsrc, mask,
+ &tst_sse));
ASSERT_EQ(ref_res, tst_res);
ASSERT_EQ(ref_sse, tst_sse);
@@ -205,18 +203,18 @@
vpx_highbd_obmc_variance16x32_sse4_1, 8),
TestFuncs(vpx_highbd_obmc_variance16x16_c,
vpx_highbd_obmc_variance16x16_sse4_1, 8),
- TestFuncs(vpx_highbd_obmc_variance16x8_c,
- vpx_highbd_obmc_variance16x8_sse4_1, 8),
- TestFuncs(vpx_highbd_obmc_variance8x16_c,
- vpx_highbd_obmc_variance8x16_sse4_1, 8),
- TestFuncs(vpx_highbd_obmc_variance8x8_c,
- vpx_highbd_obmc_variance8x8_sse4_1, 8),
- TestFuncs(vpx_highbd_obmc_variance8x4_c,
- vpx_highbd_obmc_variance8x4_sse4_1, 8),
- TestFuncs(vpx_highbd_obmc_variance4x8_c,
- vpx_highbd_obmc_variance4x8_sse4_1, 8),
- TestFuncs(vpx_highbd_obmc_variance4x4_c,
- vpx_highbd_obmc_variance4x4_sse4_1, 8),
+ TestFuncs(vpx_highbd_obmc_variance16x8_c, vpx_highbd_obmc_variance16x8_sse4_1,
+ 8),
+ TestFuncs(vpx_highbd_obmc_variance8x16_c, vpx_highbd_obmc_variance8x16_sse4_1,
+ 8),
+ TestFuncs(vpx_highbd_obmc_variance8x8_c, vpx_highbd_obmc_variance8x8_sse4_1,
+ 8),
+ TestFuncs(vpx_highbd_obmc_variance8x4_c, vpx_highbd_obmc_variance8x4_sse4_1,
+ 8),
+ TestFuncs(vpx_highbd_obmc_variance4x8_c, vpx_highbd_obmc_variance4x8_sse4_1,
+ 8),
+ TestFuncs(vpx_highbd_obmc_variance4x4_c, vpx_highbd_obmc_variance4x4_sse4_1,
+ 8),
#if CONFIG_EXT_PARTITION
TestFuncs(vpx_highbd_10_obmc_variance128x128_c,
vpx_highbd_10_obmc_variance128x128_sse4_1, 10),
diff --git a/test/partial_idct_test.cc b/test/partial_idct_test.cc
index 461fa9c..439888d 100644
--- a/test/partial_idct_test.cc
+++ b/test/partial_idct_test.cc
@@ -29,10 +29,8 @@
namespace {
typedef void (*FwdTxfmFunc)(const int16_t *in, tran_low_t *out, int stride);
typedef void (*InvTxfmFunc)(const tran_low_t *in, uint8_t *out, int stride);
-typedef std::tr1::tuple<FwdTxfmFunc,
- InvTxfmFunc,
- InvTxfmFunc,
- TX_SIZE, int> PartialInvTxfmParam;
+typedef std::tr1::tuple<FwdTxfmFunc, InvTxfmFunc, InvTxfmFunc, TX_SIZE, int>
+ PartialInvTxfmParam;
const int kMaxNumCoeffs = 1024;
class PartialIDctTest : public ::testing::TestWithParam<PartialInvTxfmParam> {
public:
@@ -41,7 +39,7 @@
ftxfm_ = GET_PARAM(0);
full_itxfm_ = GET_PARAM(1);
partial_itxfm_ = GET_PARAM(2);
- tx_size_ = GET_PARAM(3);
+ tx_size_ = GET_PARAM(3);
last_nonzero_ = GET_PARAM(4);
}
@@ -59,21 +57,11 @@
ACMRandom rnd(ACMRandom::DeterministicSeed());
int size;
switch (tx_size_) {
- case TX_4X4:
- size = 4;
- break;
- case TX_8X8:
- size = 8;
- break;
- case TX_16X16:
- size = 16;
- break;
- case TX_32X32:
- size = 32;
- break;
- default:
- FAIL() << "Wrong Size!";
- break;
+ case TX_4X4: size = 4; break;
+ case TX_8X8: size = 8; break;
+ case TX_16X16: size = 16; break;
+ case TX_32X32: size = 32; break;
+ default: FAIL() << "Wrong Size!"; break;
}
DECLARE_ALIGNED(16, tran_low_t, test_coef_block1[kMaxNumCoeffs]);
DECLARE_ALIGNED(16, tran_low_t, test_coef_block2[kMaxNumCoeffs]);
@@ -99,11 +87,9 @@
for (int i = 0; i < count_test_block; ++i) {
// Initialize a test block with input range [-255, 255].
if (i == 0) {
- for (int j = 0; j < block_size; ++j)
- input_extreme_block[j] = 255;
+ for (int j = 0; j < block_size; ++j) input_extreme_block[j] = 255;
} else if (i == 1) {
- for (int j = 0; j < block_size; ++j)
- input_extreme_block[j] = -255;
+ for (int j = 0; j < block_size; ++j) input_extreme_block[j] = -255;
} else {
for (int j = 0; j < block_size; ++j) {
input_extreme_block[j] = rnd.Rand8() % 2 ? 255 : -255;
@@ -115,8 +101,8 @@
// quantization with maximum allowed step sizes
test_coef_block1[0] = (output_ref_block[0] / 1336) * 1336;
for (int j = 1; j < last_nonzero_; ++j)
- test_coef_block1[vp10_default_scan_orders[tx_size_].scan[j]]
- = (output_ref_block[j] / 1828) * 1828;
+ test_coef_block1[vp10_default_scan_orders[tx_size_].scan[j]] =
+ (output_ref_block[j] / 1828) * 1828;
}
ASM_REGISTER_STATE_CHECK(full_itxfm_(test_coef_block1, dst1, size));
@@ -125,8 +111,7 @@
for (int j = 0; j < block_size; ++j) {
const int diff = dst1[j] - dst2[j];
const int error = diff * diff;
- if (max_error < error)
- max_error = error;
+ if (max_error < error) max_error = error;
}
}
@@ -138,21 +123,11 @@
ACMRandom rnd(ACMRandom::DeterministicSeed());
int size;
switch (tx_size_) {
- case TX_4X4:
- size = 4;
- break;
- case TX_8X8:
- size = 8;
- break;
- case TX_16X16:
- size = 16;
- break;
- case TX_32X32:
- size = 32;
- break;
- default:
- FAIL() << "Wrong Size!";
- break;
+ case TX_4X4: size = 4; break;
+ case TX_8X8: size = 8; break;
+ case TX_16X16: size = 16; break;
+ case TX_32X32: size = 32; break;
+ default: FAIL() << "Wrong Size!"; break;
}
DECLARE_ALIGNED(16, tran_low_t, test_coef_block1[kMaxNumCoeffs]);
DECLARE_ALIGNED(16, tran_low_t, test_coef_block2[kMaxNumCoeffs]);
@@ -189,8 +164,7 @@
for (int j = 0; j < block_size; ++j) {
const int diff = dst1[j] - dst2[j];
const int error = diff * diff;
- if (max_error < error)
- max_error = error;
+ if (max_error < error) max_error = error;
}
}
@@ -201,143 +175,82 @@
INSTANTIATE_TEST_CASE_P(
C, PartialIDctTest,
- ::testing::Values(
- make_tuple(&vpx_fdct32x32_c,
- &vpx_idct32x32_1024_add_c,
- &vpx_idct32x32_34_add_c,
- TX_32X32, 34),
- make_tuple(&vpx_fdct32x32_c,
- &vpx_idct32x32_1024_add_c,
- &vpx_idct32x32_1_add_c,
- TX_32X32, 1),
- make_tuple(&vpx_fdct16x16_c,
- &vpx_idct16x16_256_add_c,
- &vpx_idct16x16_10_add_c,
- TX_16X16, 10),
- make_tuple(&vpx_fdct16x16_c,
- &vpx_idct16x16_256_add_c,
- &vpx_idct16x16_1_add_c,
- TX_16X16, 1),
- make_tuple(&vpx_fdct8x8_c,
- &vpx_idct8x8_64_add_c,
- &vpx_idct8x8_12_add_c,
- TX_8X8, 12),
- make_tuple(&vpx_fdct8x8_c,
- &vpx_idct8x8_64_add_c,
- &vpx_idct8x8_1_add_c,
- TX_8X8, 1),
- make_tuple(&vpx_fdct4x4_c,
- &vpx_idct4x4_16_add_c,
- &vpx_idct4x4_1_add_c,
- TX_4X4, 1)));
+ ::testing::Values(make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c,
+ &vpx_idct32x32_34_add_c, TX_32X32, 34),
+ make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c,
+ &vpx_idct32x32_1_add_c, TX_32X32, 1),
+ make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c,
+ &vpx_idct16x16_10_add_c, TX_16X16, 10),
+ make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c,
+ &vpx_idct16x16_1_add_c, TX_16X16, 1),
+ make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c,
+ &vpx_idct8x8_12_add_c, TX_8X8, 12),
+ make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c,
+ &vpx_idct8x8_1_add_c, TX_8X8, 1),
+ make_tuple(&vpx_fdct4x4_c, &vpx_idct4x4_16_add_c,
+ &vpx_idct4x4_1_add_c, TX_4X4, 1)));
#if HAVE_NEON && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
NEON, PartialIDctTest,
- ::testing::Values(
- make_tuple(&vpx_fdct32x32_c,
- &vpx_idct32x32_1024_add_c,
- &vpx_idct32x32_1_add_neon,
- TX_32X32, 1),
- make_tuple(&vpx_fdct16x16_c,
- &vpx_idct16x16_256_add_c,
- &vpx_idct16x16_10_add_neon,
- TX_16X16, 10),
- make_tuple(&vpx_fdct16x16_c,
- &vpx_idct16x16_256_add_c,
- &vpx_idct16x16_1_add_neon,
- TX_16X16, 1),
- make_tuple(&vpx_fdct8x8_c,
- &vpx_idct8x8_64_add_c,
- &vpx_idct8x8_12_add_neon,
- TX_8X8, 12),
- make_tuple(&vpx_fdct8x8_c,
- &vpx_idct8x8_64_add_c,
- &vpx_idct8x8_1_add_neon,
- TX_8X8, 1),
- make_tuple(&vpx_fdct4x4_c,
- &vpx_idct4x4_16_add_c,
- &vpx_idct4x4_1_add_neon,
- TX_4X4, 1)));
+ ::testing::Values(make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c,
+ &vpx_idct32x32_1_add_neon, TX_32X32, 1),
+ make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c,
+ &vpx_idct16x16_10_add_neon, TX_16X16, 10),
+ make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c,
+ &vpx_idct16x16_1_add_neon, TX_16X16, 1),
+ make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c,
+ &vpx_idct8x8_12_add_neon, TX_8X8, 12),
+ make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c,
+ &vpx_idct8x8_1_add_neon, TX_8X8, 1),
+ make_tuple(&vpx_fdct4x4_c, &vpx_idct4x4_16_add_c,
+ &vpx_idct4x4_1_add_neon, TX_4X4, 1)));
#endif // HAVE_NEON && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
#if HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
SSE2, PartialIDctTest,
- ::testing::Values(
- make_tuple(&vpx_fdct32x32_c,
- &vpx_idct32x32_1024_add_c,
- &vpx_idct32x32_34_add_sse2,
- TX_32X32, 34),
- make_tuple(&vpx_fdct32x32_c,
- &vpx_idct32x32_1024_add_c,
- &vpx_idct32x32_1_add_sse2,
- TX_32X32, 1),
- make_tuple(&vpx_fdct16x16_c,
- &vpx_idct16x16_256_add_c,
- &vpx_idct16x16_10_add_sse2,
- TX_16X16, 10),
- make_tuple(&vpx_fdct16x16_c,
- &vpx_idct16x16_256_add_c,
- &vpx_idct16x16_1_add_sse2,
- TX_16X16, 1),
- make_tuple(&vpx_fdct8x8_c,
- &vpx_idct8x8_64_add_c,
- &vpx_idct8x8_12_add_sse2,
- TX_8X8, 12),
- make_tuple(&vpx_fdct8x8_c,
- &vpx_idct8x8_64_add_c,
- &vpx_idct8x8_1_add_sse2,
- TX_8X8, 1),
- make_tuple(&vpx_fdct4x4_c,
- &vpx_idct4x4_16_add_c,
- &vpx_idct4x4_1_add_sse2,
- TX_4X4, 1)));
+ ::testing::Values(make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c,
+ &vpx_idct32x32_34_add_sse2, TX_32X32, 34),
+ make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c,
+ &vpx_idct32x32_1_add_sse2, TX_32X32, 1),
+ make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c,
+ &vpx_idct16x16_10_add_sse2, TX_16X16, 10),
+ make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c,
+ &vpx_idct16x16_1_add_sse2, TX_16X16, 1),
+ make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c,
+ &vpx_idct8x8_12_add_sse2, TX_8X8, 12),
+ make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c,
+ &vpx_idct8x8_1_add_sse2, TX_8X8, 1),
+ make_tuple(&vpx_fdct4x4_c, &vpx_idct4x4_16_add_c,
+ &vpx_idct4x4_1_add_sse2, TX_4X4, 1)));
#endif
-#if HAVE_SSSE3 && ARCH_X86_64 && \
- !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_SSSE3 && ARCH_X86_64 && !CONFIG_VP9_HIGHBITDEPTH && \
+ !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
SSSE3_64, PartialIDctTest,
- ::testing::Values(
- make_tuple(&vpx_fdct8x8_c,
- &vpx_idct8x8_64_add_c,
- &vpx_idct8x8_12_add_ssse3,
- TX_8X8, 12)));
+ ::testing::Values(make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c,
+ &vpx_idct8x8_12_add_ssse3, TX_8X8, 12)));
#endif
#if HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
MSA, PartialIDctTest,
- ::testing::Values(
- make_tuple(&vpx_fdct32x32_c,
- &vpx_idct32x32_1024_add_c,
- &vpx_idct32x32_34_add_msa,
- TX_32X32, 34),
- make_tuple(&vpx_fdct32x32_c,
- &vpx_idct32x32_1024_add_c,
- &vpx_idct32x32_1_add_msa,
- TX_32X32, 1),
- make_tuple(&vpx_fdct16x16_c,
- &vpx_idct16x16_256_add_c,
- &vpx_idct16x16_10_add_msa,
- TX_16X16, 10),
- make_tuple(&vpx_fdct16x16_c,
- &vpx_idct16x16_256_add_c,
- &vpx_idct16x16_1_add_msa,
- TX_16X16, 1),
- make_tuple(&vpx_fdct8x8_c,
- &vpx_idct8x8_64_add_c,
- &vpx_idct8x8_12_add_msa,
- TX_8X8, 10),
- make_tuple(&vpx_fdct8x8_c,
- &vpx_idct8x8_64_add_c,
- &vpx_idct8x8_1_add_msa,
- TX_8X8, 1),
- make_tuple(&vpx_fdct4x4_c,
- &vpx_idct4x4_16_add_c,
- &vpx_idct4x4_1_add_msa,
- TX_4X4, 1)));
+ ::testing::Values(make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c,
+ &vpx_idct32x32_34_add_msa, TX_32X32, 34),
+ make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c,
+ &vpx_idct32x32_1_add_msa, TX_32X32, 1),
+ make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c,
+ &vpx_idct16x16_10_add_msa, TX_16X16, 10),
+ make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c,
+ &vpx_idct16x16_1_add_msa, TX_16X16, 1),
+ make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c,
+ &vpx_idct8x8_12_add_msa, TX_8X8, 10),
+ make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c,
+ &vpx_idct8x8_1_add_msa, TX_8X8, 1),
+ make_tuple(&vpx_fdct4x4_c, &vpx_idct4x4_16_add_c,
+ &vpx_idct4x4_1_add_msa, TX_4X4, 1)));
#endif // HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
} // namespace
diff --git a/test/realtime_test.cc b/test/realtime_test.cc
index 24749e4..63f1ac3 100644
--- a/test/realtime_test.cc
+++ b/test/realtime_test.cc
@@ -23,8 +23,7 @@
: public ::libvpx_test::EncoderTest,
public ::libvpx_test::CodecTestWithParam<libvpx_test::TestMode> {
protected:
- RealtimeTest()
- : EncoderTest(GET_PARAM(0)), frame_packets_(0) {}
+ RealtimeTest() : EncoderTest(GET_PARAM(0)), frame_packets_(0) {}
virtual ~RealtimeTest() {}
virtual void SetUp() {
diff --git a/test/reconintra_predictors_test.cc b/test/reconintra_predictors_test.cc
index 0ebfbae..7f9fc8d 100644
--- a/test/reconintra_predictors_test.cc
+++ b/test/reconintra_predictors_test.cc
@@ -132,10 +132,9 @@
void DiffPred(int testNum) const {
int i = 0;
while (i < blockSize_ * blockSize_) {
- EXPECT_EQ(predRef_[i], pred_[i])
- << "Error at position: " << i << " "
- << "Block size: " << blockSize_ << " "
- << "Test number: " << testNum;
+ EXPECT_EQ(predRef_[i], pred_[i]) << "Error at position: " << i << " "
+ << "Block size: " << blockSize_ << " "
+ << "Test number: " << testNum;
i += 1;
}
}
@@ -150,8 +149,8 @@
};
#if CONFIG_VP9_HIGHBITDEPTH
-class VP10HbdIntraPredOptimzTest :
- public ::testing::TestWithParam<HbdPredParams> {
+class VP10HbdIntraPredOptimzTest
+ : public ::testing::TestWithParam<HbdPredParams> {
public:
virtual ~VP10HbdIntraPredOptimzTest() {}
virtual void SetUp() {
@@ -227,11 +226,10 @@
void DiffPred(int testNum) const {
int i = 0;
while (i < blockSize_ * blockSize_) {
- EXPECT_EQ(predRef_[i], pred_[i])
- << "Error at position: " << i << " "
- << "Block size: " << blockSize_ << " "
- << "Bit depth: " << bd_ << " "
- << "Test number: " << testNum;
+ EXPECT_EQ(predRef_[i], pred_[i]) << "Error at position: " << i << " "
+ << "Block size: " << blockSize_ << " "
+ << "Bit depth: " << bd_ << " "
+ << "Test number: " << testNum;
i += 1;
}
}
@@ -247,33 +245,21 @@
};
#endif // CONFIG_VP9_HIGHBITDEPTH
-TEST_P(VP10IntraPredOptimzTest, BitExactCheck) {
- RunTest();
-}
+TEST_P(VP10IntraPredOptimzTest, BitExactCheck) { RunTest(); }
#if PREDICTORS_SPEED_TEST
-TEST_P(VP10IntraPredOptimzTest, SpeedCheckC) {
- RunSpeedTestC();
-}
+TEST_P(VP10IntraPredOptimzTest, SpeedCheckC) { RunSpeedTestC(); }
-TEST_P(VP10IntraPredOptimzTest, SpeedCheckSSE) {
- RunSpeedTestSSE();
-}
+TEST_P(VP10IntraPredOptimzTest, SpeedCheckSSE) { RunSpeedTestSSE(); }
#endif
#if CONFIG_VP9_HIGHBITDEPTH
-TEST_P(VP10HbdIntraPredOptimzTest, BitExactCheck) {
- RunTest();
-}
+TEST_P(VP10HbdIntraPredOptimzTest, BitExactCheck) { RunTest(); }
#if PREDICTORS_SPEED_TEST
-TEST_P(VP10HbdIntraPredOptimzTest, SpeedCheckC) {
- RunSpeedTestC();
-}
+TEST_P(VP10HbdIntraPredOptimzTest, SpeedCheckC) { RunSpeedTestC(); }
-TEST_P(VP10HbdIntraPredOptimzTest, SpeedCheckSSE) {
- RunSpeedTestSSE();
-}
+TEST_P(VP10HbdIntraPredOptimzTest, SpeedCheckSSE) { RunSpeedTestSSE(); }
#endif // PREDICTORS_SPEED_TEST
#endif // CONFIG_VP9_HIGHBITDEPTH
@@ -282,10 +268,8 @@
const PredFuncMode kPredFuncMdArray[] = {
make_tuple(vp10_dc_filter_predictor_c, vp10_dc_filter_predictor_sse4_1,
DC_PRED),
- make_tuple(vp10_v_filter_predictor_c, vp10_v_filter_predictor_sse4_1,
- V_PRED),
- make_tuple(vp10_h_filter_predictor_c, vp10_h_filter_predictor_sse4_1,
- H_PRED),
+ make_tuple(vp10_v_filter_predictor_c, vp10_v_filter_predictor_sse4_1, V_PRED),
+ make_tuple(vp10_h_filter_predictor_c, vp10_h_filter_predictor_sse4_1, H_PRED),
make_tuple(vp10_d45_filter_predictor_c, vp10_d45_filter_predictor_sse4_1,
D45_PRED),
make_tuple(vp10_d135_filter_predictor_c, vp10_d135_filter_predictor_sse4_1,
@@ -302,13 +286,12 @@
TM_PRED),
};
-const int kBlkSize[] = {4, 8, 16, 32};
+const int kBlkSize[] = { 4, 8, 16, 32 };
INSTANTIATE_TEST_CASE_P(
SSE4_1, VP10IntraPredOptimzTest,
- ::testing::Combine(
- ::testing::ValuesIn(kPredFuncMdArray),
- ::testing::ValuesIn(kBlkSize)));
+ ::testing::Combine(::testing::ValuesIn(kPredFuncMdArray),
+ ::testing::ValuesIn(kBlkSize)));
#if CONFIG_VP9_HIGHBITDEPTH
const HbdPredFuncMode kHbdPredFuncMdArray[] = {
@@ -334,14 +317,13 @@
vp10_highbd_tm_filter_predictor_sse4_1, TM_PRED),
};
-const int kBd[] = {10, 12};
+const int kBd[] = { 10, 12 };
INSTANTIATE_TEST_CASE_P(
SSE4_1, VP10HbdIntraPredOptimzTest,
- ::testing::Combine(
- ::testing::ValuesIn(kHbdPredFuncMdArray),
- ::testing::ValuesIn(kBlkSize),
- ::testing::ValuesIn(kBd)));
+ ::testing::Combine(::testing::ValuesIn(kHbdPredFuncMdArray),
+ ::testing::ValuesIn(kBlkSize),
+ ::testing::ValuesIn(kBd)));
#endif // CONFIG_VP9_HIGHBITDEPTH
} // namespace
diff --git a/test/register_state_check.h b/test/register_state_check.h
index d6540f8..a6744b7 100644
--- a/test/register_state_check.h
+++ b/test/register_state_check.h
@@ -36,7 +36,7 @@
#include <windows.h>
#include <winnt.h>
-inline bool operator==(const M128A& lhs, const M128A& rhs) {
+inline bool operator==(const M128A &lhs, const M128A &rhs) {
return (lhs.Low == rhs.Low && lhs.High == rhs.High);
}
@@ -51,7 +51,7 @@
~RegisterStateCheck() { EXPECT_TRUE(Check()); }
private:
- static bool StoreRegisters(CONTEXT* const context) {
+ static bool StoreRegisters(CONTEXT *const context) {
const HANDLE this_thread = GetCurrentThread();
EXPECT_TRUE(this_thread != NULL);
context->ContextFlags = CONTEXT_FLOATING_POINT;
@@ -66,8 +66,8 @@
CONTEXT post_context;
if (!StoreRegisters(&post_context)) return false;
- const M128A* xmm_pre = &pre_context_.Xmm6;
- const M128A* xmm_post = &post_context.Xmm6;
+ const M128A *xmm_pre = &pre_context_.Xmm6;
+ const M128A *xmm_post = &post_context.Xmm6;
for (int i = 6; i <= 15; ++i) {
EXPECT_EQ(*xmm_pre, *xmm_post) << "xmm" << i << " has been modified!";
++xmm_pre;
@@ -80,15 +80,16 @@
CONTEXT pre_context_;
};
-#define ASM_REGISTER_STATE_CHECK(statement) do { \
- libvpx_test::RegisterStateCheck reg_check; \
- statement; \
-} while (false)
+#define ASM_REGISTER_STATE_CHECK(statement) \
+ do { \
+ libvpx_test::RegisterStateCheck reg_check; \
+ statement; \
+ } while (false)
} // namespace libvpx_test
-#elif defined(CONFIG_SHARED) && defined(HAVE_NEON_ASM) && defined(CONFIG_VP10) \
- && !CONFIG_SHARED && HAVE_NEON_ASM && CONFIG_VP10
+#elif defined(CONFIG_SHARED) && defined(HAVE_NEON_ASM) && \
+ defined(CONFIG_VP10) && !CONFIG_SHARED && HAVE_NEON_ASM && CONFIG_VP10
extern "C" {
// Save the d8-d15 registers into store.
@@ -117,8 +118,8 @@
int64_t post_store[8];
vpx_push_neon(post_store);
for (int i = 0; i < 8; ++i) {
- EXPECT_EQ(pre_store_[i], post_store[i]) << "d"
- << i + 8 << " has been modified";
+ EXPECT_EQ(pre_store_[i], post_store[i]) << "d" << i + 8
+ << " has been modified";
}
return !testing::Test::HasNonfatalFailure();
}
@@ -127,10 +128,11 @@
int64_t pre_store_[8];
};
-#define ASM_REGISTER_STATE_CHECK(statement) do { \
- libvpx_test::RegisterStateCheck reg_check; \
- statement; \
-} while (false)
+#define ASM_REGISTER_STATE_CHECK(statement) \
+ do { \
+ libvpx_test::RegisterStateCheck reg_check; \
+ statement; \
+ } while (false)
} // namespace libvpx_test
@@ -175,10 +177,11 @@
uint16_t pre_fpu_env_[14];
};
-#define API_REGISTER_STATE_CHECK(statement) do { \
- libvpx_test::RegisterStateCheckMMX reg_check; \
- ASM_REGISTER_STATE_CHECK(statement); \
-} while (false)
+#define API_REGISTER_STATE_CHECK(statement) \
+ do { \
+ libvpx_test::RegisterStateCheckMMX reg_check; \
+ ASM_REGISTER_STATE_CHECK(statement); \
+ } while (false)
} // namespace libvpx_test
diff --git a/test/resize_test.cc b/test/resize_test.cc
index 46a3b6e..3738a09 100644
--- a/test/resize_test.cc
+++ b/test/resize_test.cc
@@ -44,9 +44,9 @@
header[1] = 'K';
header[2] = 'I';
header[3] = 'F';
- mem_put_le16(header + 4, 0); /* version */
- mem_put_le16(header + 6, 32); /* headersize */
- mem_put_le32(header + 8, 0x30395056); /* fourcc (vp9) */
+ mem_put_le16(header + 4, 0); /* version */
+ mem_put_le16(header + 6, 32); /* headersize */
+ mem_put_le32(header + 8, 0x30395056); /* fourcc (vp9) */
mem_put_le16(header + 12, cfg->g_w); /* width */
mem_put_le16(header + 14, cfg->g_h); /* height */
mem_put_le32(header + 16, cfg->g_timebase.den); /* rate */
@@ -68,8 +68,7 @@
char header[12];
vpx_codec_pts_t pts;
- if (pkt->kind != VPX_CODEC_CX_FRAME_PKT)
- return;
+ if (pkt->kind != VPX_CODEC_CX_FRAME_PKT) return;
pts = pkt->data.frame.pts;
mem_put_le32(header, static_cast<unsigned int>(pkt->data.frame.sz));
@@ -92,12 +91,9 @@
unsigned int h;
};
-void ScaleForFrameNumber(unsigned int frame,
- unsigned int initial_w,
- unsigned int initial_h,
- unsigned int *w,
- unsigned int *h,
- int flag_codec) {
+void ScaleForFrameNumber(unsigned int frame, unsigned int initial_w,
+ unsigned int initial_h, unsigned int *w,
+ unsigned int *h, int flag_codec) {
if (frame < 10) {
*w = initial_w;
*h = initial_h;
@@ -219,7 +215,7 @@
return;
}
if (frame < 250) {
- *w = initial_w / 2;
+ *w = initial_w / 2;
*h = initial_h / 2;
return;
}
@@ -268,8 +264,9 @@
}
};
-class ResizeTest : public ::libvpx_test::EncoderTest,
- public ::libvpx_test::CodecTestWithParam<libvpx_test::TestMode> {
+class ResizeTest
+ : public ::libvpx_test::EncoderTest,
+ public ::libvpx_test::CodecTestWithParam<libvpx_test::TestMode> {
protected:
ResizeTest() : EncoderTest(GET_PARAM(0)) {}
@@ -285,7 +282,7 @@
frame_info_list_.push_back(FrameInfo(pts, img.d_w, img.d_h));
}
- std::vector< FrameInfo > frame_info_list_;
+ std::vector<FrameInfo> frame_info_list_;
};
TEST_P(ResizeTest, TestExternalResizeWorks) {
@@ -299,12 +296,12 @@
const unsigned int frame = static_cast<unsigned>(info->pts);
unsigned int expected_w;
unsigned int expected_h;
- ScaleForFrameNumber(frame, kInitialWidth, kInitialHeight,
- &expected_w, &expected_h, 0);
- EXPECT_EQ(expected_w, info->w)
- << "Frame " << frame << " had unexpected width";
- EXPECT_EQ(expected_h, info->h)
- << "Frame " << frame << " had unexpected height";
+ ScaleForFrameNumber(frame, kInitialWidth, kInitialHeight, &expected_w,
+ &expected_h, 0);
+ EXPECT_EQ(expected_w, info->w) << "Frame " << frame
+ << " had unexpected width";
+ EXPECT_EQ(expected_h, info->h) << "Frame " << frame
+ << " had unexpected height";
}
}
@@ -315,10 +312,7 @@
protected:
#if WRITE_COMPRESSED_STREAM
ResizeInternalTest()
- : ResizeTest(),
- frame0_psnr_(0.0),
- outfile_(NULL),
- out_frames_(0) {}
+ : ResizeTest(), frame0_psnr_(0.0), outfile_(NULL), out_frames_(0) {}
#else
ResizeInternalTest() : ResizeTest(), frame0_psnr_(0.0) {}
#endif
@@ -347,30 +341,29 @@
if (change_config_) {
int new_q = 60;
if (video->frame() == 0) {
- struct vpx_scaling_mode mode = {VP8E_ONETWO, VP8E_ONETWO};
+ struct vpx_scaling_mode mode = { VP8E_ONETWO, VP8E_ONETWO };
encoder->Control(VP8E_SET_SCALEMODE, &mode);
}
if (video->frame() == 1) {
- struct vpx_scaling_mode mode = {VP8E_NORMAL, VP8E_NORMAL};
+ struct vpx_scaling_mode mode = { VP8E_NORMAL, VP8E_NORMAL };
encoder->Control(VP8E_SET_SCALEMODE, &mode);
cfg_.rc_min_quantizer = cfg_.rc_max_quantizer = new_q;
encoder->Config(&cfg_);
}
} else {
if (video->frame() == kStepDownFrame) {
- struct vpx_scaling_mode mode = {VP8E_FOURFIVE, VP8E_THREEFIVE};
+ struct vpx_scaling_mode mode = { VP8E_FOURFIVE, VP8E_THREEFIVE };
encoder->Control(VP8E_SET_SCALEMODE, &mode);
}
if (video->frame() == kStepUpFrame) {
- struct vpx_scaling_mode mode = {VP8E_NORMAL, VP8E_NORMAL};
+ struct vpx_scaling_mode mode = { VP8E_NORMAL, VP8E_NORMAL };
encoder->Control(VP8E_SET_SCALEMODE, &mode);
}
}
}
virtual void PSNRPktHook(const vpx_codec_cx_pkt_t *pkt) {
- if (frame0_psnr_ == 0.)
- frame0_psnr_ = pkt->data.psnr.psnr[0];
+ if (frame0_psnr_ == 0.) frame0_psnr_ = pkt->data.psnr.psnr[0];
EXPECT_NEAR(pkt->data.psnr.psnr[0], frame0_psnr_, 2.0);
}
@@ -379,8 +372,7 @@
++out_frames_;
// Write initial file header if first frame.
- if (pkt->data.frame.pts == 0)
- write_ivf_file_header(&cfg_, 0, outfile_);
+ if (pkt->data.frame.pts == 0) write_ivf_file_header(&cfg_, 0, outfile_);
// Write frame header and data.
write_ivf_frame_header(pkt, outfile_);
@@ -434,8 +426,9 @@
ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
}
-class ResizeRealtimeTest : public ::libvpx_test::EncoderTest,
- public ::libvpx_test::CodecTestWith2Params<libvpx_test::TestMode, int> {
+class ResizeRealtimeTest
+ : public ::libvpx_test::EncoderTest,
+ public ::libvpx_test::CodecTestWith2Params<libvpx_test::TestMode, int> {
protected:
ResizeRealtimeTest() : EncoderTest(GET_PARAM(0)) {}
virtual ~ResizeRealtimeTest() {}
@@ -465,16 +458,13 @@
frame_info_list_.push_back(FrameInfo(pts, img.d_w, img.d_h));
}
- virtual void MismatchHook(const vpx_image_t *img1,
- const vpx_image_t *img2) {
+ virtual void MismatchHook(const vpx_image_t *img1, const vpx_image_t *img2) {
double mismatch_psnr = compute_psnr(img1, img2);
mismatch_psnr_ += mismatch_psnr;
++mismatch_nframes_;
}
- unsigned int GetMismatchFrames() {
- return mismatch_nframes_;
- }
+ unsigned int GetMismatchFrames() { return mismatch_nframes_; }
void DefaultConfig() {
cfg_.rc_buf_initial_sz = 500;
@@ -491,14 +481,14 @@
// Enable dropped frames.
cfg_.rc_dropframe_thresh = 1;
// Enable error_resilience mode.
- cfg_.g_error_resilient = 1;
+ cfg_.g_error_resilient = 1;
// Enable dynamic resizing.
cfg_.rc_resize_allowed = 1;
// Run at low bitrate.
cfg_.rc_target_bitrate = 200;
}
- std::vector< FrameInfo > frame_info_list_;
+ std::vector<FrameInfo> frame_info_list_;
int set_cpu_used_;
bool change_bitrate_;
double mismatch_psnr_;
@@ -521,12 +511,12 @@
const unsigned int frame = static_cast<unsigned>(info->pts);
unsigned int expected_w;
unsigned int expected_h;
- ScaleForFrameNumber(frame, kInitialWidth, kInitialHeight,
- &expected_w, &expected_h, 1);
- EXPECT_EQ(expected_w, info->w)
- << "Frame " << frame << " had unexpected width";
- EXPECT_EQ(expected_h, info->h)
- << "Frame " << frame << " had unexpected height";
+ ScaleForFrameNumber(frame, kInitialWidth, kInitialHeight, &expected_w,
+ &expected_h, 1);
+ EXPECT_EQ(expected_w, info->w) << "Frame " << frame
+ << " had unexpected width";
+ EXPECT_EQ(expected_h, info->h) << "Frame " << frame
+ << " had unexpected height";
EXPECT_EQ(static_cast<unsigned int>(0), GetMismatchFrames());
}
}
@@ -618,10 +608,8 @@
}
vpx_img_fmt_t CspForFrameNumber(int frame) {
- if (frame < 10)
- return VPX_IMG_FMT_I420;
- if (frame < 20)
- return VPX_IMG_FMT_I444;
+ if (frame < 10) return VPX_IMG_FMT_I420;
+ if (frame < 20) return VPX_IMG_FMT_I444;
return VPX_IMG_FMT_I420;
}
@@ -629,10 +617,7 @@
protected:
#if WRITE_COMPRESSED_STREAM
ResizeCspTest()
- : ResizeTest(),
- frame0_psnr_(0.0),
- outfile_(NULL),
- out_frames_(0) {}
+ : ResizeTest(), frame0_psnr_(0.0), outfile_(NULL), out_frames_(0) {}
#else
ResizeCspTest() : ResizeTest(), frame0_psnr_(0.0) {}
#endif
@@ -671,8 +656,7 @@
}
virtual void PSNRPktHook(const vpx_codec_cx_pkt_t *pkt) {
- if (frame0_psnr_ == 0.)
- frame0_psnr_ = pkt->data.psnr.psnr[0];
+ if (frame0_psnr_ == 0.) frame0_psnr_ = pkt->data.psnr.psnr[0];
EXPECT_NEAR(pkt->data.psnr.psnr[0], frame0_psnr_, 2.0);
}
@@ -681,8 +665,7 @@
++out_frames_;
// Write initial file header if first frame.
- if (pkt->data.frame.pts == 0)
- write_ivf_file_header(&cfg_, 0, outfile_);
+ if (pkt->data.frame.pts == 0) write_ivf_file_header(&cfg_, 0, outfile_);
// Write frame header and data.
write_ivf_frame_header(pkt, outfile_);
@@ -723,12 +706,12 @@
}
VP10_INSTANTIATE_TEST_CASE(ResizeTest,
- ::testing::Values(::libvpx_test::kRealTime));
+ ::testing::Values(::libvpx_test::kRealTime));
VP10_INSTANTIATE_TEST_CASE(ResizeInternalTest,
- ::testing::Values(::libvpx_test::kOnePassBest));
+ ::testing::Values(::libvpx_test::kOnePassBest));
VP10_INSTANTIATE_TEST_CASE(ResizeRealtimeTest,
- ::testing::Values(::libvpx_test::kRealTime),
- ::testing::Range(5, 9));
+ ::testing::Values(::libvpx_test::kRealTime),
+ ::testing::Range(5, 9));
VP10_INSTANTIATE_TEST_CASE(ResizeCspTest,
- ::testing::Values(::libvpx_test::kRealTime));
+ ::testing::Values(::libvpx_test::kRealTime));
} // namespace
diff --git a/test/sad_test.cc b/test/sad_test.cc
index 36f777d..1e6295d 100644
--- a/test/sad_test.cc
+++ b/test/sad_test.cc
@@ -8,7 +8,6 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
#include <string.h>
#include <limits.h>
#include <stdio.h>
@@ -25,23 +24,17 @@
#include "vpx_mem/vpx_mem.h"
#include "vpx_ports/mem.h"
-typedef unsigned int (*SadMxNFunc)(const uint8_t *src_ptr,
- int src_stride,
- const uint8_t *ref_ptr,
- int ref_stride);
+typedef unsigned int (*SadMxNFunc)(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride);
typedef std::tr1::tuple<int, int, SadMxNFunc, int> SadMxNParam;
-typedef uint32_t (*SadMxNAvgFunc)(const uint8_t *src_ptr,
- int src_stride,
- const uint8_t *ref_ptr,
- int ref_stride,
+typedef uint32_t (*SadMxNAvgFunc)(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride,
const uint8_t *second_pred);
typedef std::tr1::tuple<int, int, SadMxNAvgFunc, int> SadMxNAvgParam;
-typedef void (*SadMxNx4Func)(const uint8_t *src_ptr,
- int src_stride,
- const uint8_t *const ref_ptr[],
- int ref_stride,
+typedef void (*SadMxNx4Func)(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *const ref_ptr[], int ref_stride,
uint32_t *sad_array);
typedef std::tr1::tuple<int, int, SadMxNx4Func, int> SadMxNx4Param;
@@ -50,22 +43,22 @@
namespace {
class SADTestBase : public ::testing::Test {
public:
- SADTestBase(int width, int height, int bit_depth) :
- width_(width), height_(height), bd_(bit_depth) {}
+ SADTestBase(int width, int height, int bit_depth)
+ : width_(width), height_(height), bd_(bit_depth) {}
static void SetUpTestCase() {
- source_data8_ = reinterpret_cast<uint8_t*>(
+ source_data8_ = reinterpret_cast<uint8_t *>(
vpx_memalign(kDataAlignment, kDataBlockSize));
- reference_data8_ = reinterpret_cast<uint8_t*>(
+ reference_data8_ = reinterpret_cast<uint8_t *>(
vpx_memalign(kDataAlignment, kDataBufferSize));
- second_pred8_ = reinterpret_cast<uint8_t*>(
- vpx_memalign(kDataAlignment, 128*128));
- source_data16_ = reinterpret_cast<uint16_t*>(
- vpx_memalign(kDataAlignment, kDataBlockSize*sizeof(uint16_t)));
- reference_data16_ = reinterpret_cast<uint16_t*>(
- vpx_memalign(kDataAlignment, kDataBufferSize*sizeof(uint16_t)));
- second_pred16_ = reinterpret_cast<uint16_t*>(
- vpx_memalign(kDataAlignment, 128*128*sizeof(uint16_t)));
+ second_pred8_ =
+ reinterpret_cast<uint8_t *>(vpx_memalign(kDataAlignment, 128 * 128));
+ source_data16_ = reinterpret_cast<uint16_t *>(
+ vpx_memalign(kDataAlignment, kDataBlockSize * sizeof(uint16_t)));
+ reference_data16_ = reinterpret_cast<uint16_t *>(
+ vpx_memalign(kDataAlignment, kDataBufferSize * sizeof(uint16_t)));
+ second_pred16_ = reinterpret_cast<uint16_t *>(
+ vpx_memalign(kDataAlignment, 128 * 128 * sizeof(uint16_t)));
}
static void TearDownTestCase() {
@@ -83,9 +76,7 @@
second_pred16_ = NULL;
}
- virtual void TearDown() {
- libvpx_test::ClearSystemState();
- }
+ virtual void TearDown() { libvpx_test::ClearSystemState(); }
protected:
// Handle up to 4 128x128 blocks, with stride up to 256
@@ -128,12 +119,12 @@
// difference between two pixels in the same relative location; accumulate.
unsigned int ReferenceSAD(int block_idx) {
unsigned int sad = 0;
- const uint8_t *const reference8 = GetReference(block_idx);
- const uint8_t *const source8 = source_data_;
+ const uint8_t *const reference8 = GetReference(block_idx);
+ const uint8_t *const source8 = source_data_;
#if CONFIG_VP9_HIGHBITDEPTH
- const uint16_t *const reference16 =
- CONVERT_TO_SHORTPTR(GetReference(block_idx));
- const uint16_t *const source16 = CONVERT_TO_SHORTPTR(source_data_);
+ const uint16_t *const reference16 =
+ CONVERT_TO_SHORTPTR(GetReference(block_idx));
+ const uint16_t *const source16 = CONVERT_TO_SHORTPTR(source_data_);
#endif // CONFIG_VP9_HIGHBITDEPTH
for (int h = 0; h < height_; ++h) {
for (int w = 0; w < width_; ++w) {
@@ -169,13 +160,13 @@
for (int w = 0; w < width_; ++w) {
if (!use_high_bit_depth_) {
const int tmp = second_pred8[h * width_ + w] +
- reference8[h * reference_stride_ + w];
+ reference8[h * reference_stride_ + w];
const uint8_t comp_pred = ROUND_POWER_OF_TWO(tmp, 1);
sad += abs(source8[h * source_stride_ + w] - comp_pred);
#if CONFIG_VP9_HIGHBITDEPTH
} else {
const int tmp = second_pred16[h * width_ + w] +
- reference16[h * reference_stride_ + w];
+ reference16[h * reference_stride_ + w];
const uint16_t comp_pred = ROUND_POWER_OF_TWO(tmp, 1);
sad += abs(source16[h * source_stride_ + w] - comp_pred);
#endif // CONFIG_VP9_HIGHBITDEPTH
@@ -239,20 +230,18 @@
ACMRandom rnd_;
};
-class SADx4Test
- : public SADTestBase,
- public ::testing::WithParamInterface<SadMxNx4Param> {
+class SADx4Test : public SADTestBase,
+ public ::testing::WithParamInterface<SadMxNx4Param> {
public:
SADx4Test() : SADTestBase(GET_PARAM(0), GET_PARAM(1), GET_PARAM(3)) {}
protected:
void SADs(unsigned int *results) {
- const uint8_t *references[] = {GetReference(0), GetReference(1),
- GetReference(2), GetReference(3)};
+ const uint8_t *references[] = { GetReference(0), GetReference(1),
+ GetReference(2), GetReference(3) };
- ASM_REGISTER_STATE_CHECK(GET_PARAM(2)(source_data_, source_stride_,
- references, reference_stride_,
- results));
+ ASM_REGISTER_STATE_CHECK(GET_PARAM(2)(
+ source_data_, source_stride_, references, reference_stride_, results));
}
void CheckSADs() {
@@ -267,9 +256,8 @@
}
};
-class SADTest
- : public SADTestBase,
- public ::testing::WithParamInterface<SadMxNParam> {
+class SADTest : public SADTestBase,
+ public ::testing::WithParamInterface<SadMxNParam> {
public:
SADTest() : SADTestBase(GET_PARAM(0), GET_PARAM(1), GET_PARAM(3)) {}
@@ -291,9 +279,8 @@
}
};
-class SADavgTest
- : public SADTestBase,
- public ::testing::WithParamInterface<SadMxNAvgParam> {
+class SADavgTest : public SADTestBase,
+ public ::testing::WithParamInterface<SadMxNAvgParam> {
public:
SADavgTest() : SADTestBase(GET_PARAM(0), GET_PARAM(1), GET_PARAM(3)) {}
@@ -469,7 +456,7 @@
}
TEST_P(SADx4Test, SrcAlignedByWidth) {
- uint8_t * tmp_source_data = source_data_;
+ uint8_t *tmp_source_data = source_data_;
source_data_ += width_;
FillRandom(source_data_, source_stride_);
FillRandom(GetReference(0), reference_stride_);
diff --git a/test/subtract_test.cc b/test/subtract_test.cc
index 2e2f162..498b6fa 100644
--- a/test/subtract_test.cc
+++ b/test/subtract_test.cc
@@ -24,18 +24,16 @@
#define USE_SPEED_TEST (0)
-typedef void (*SubtractFunc)(int rows, int cols,
- int16_t *diff_ptr, ptrdiff_t diff_stride,
- const uint8_t *src_ptr, ptrdiff_t src_stride,
- const uint8_t *pred_ptr, ptrdiff_t pred_stride);
+typedef void (*SubtractFunc)(int rows, int cols, int16_t *diff_ptr,
+ ptrdiff_t diff_stride, const uint8_t *src_ptr,
+ ptrdiff_t src_stride, const uint8_t *pred_ptr,
+ ptrdiff_t pred_stride);
namespace {
class VP9SubtractBlockTest : public ::testing::TestWithParam<SubtractFunc> {
public:
- virtual void TearDown() {
- libvpx_test::ClearSystemState();
- }
+ virtual void TearDown() { libvpx_test::ClearSystemState(); }
};
using libvpx_test::ACMRandom;
@@ -52,7 +50,7 @@
vpx_memalign(16, sizeof(*diff) * block_width * block_height * 2));
uint8_t *pred = reinterpret_cast<uint8_t *>(
vpx_memalign(16, block_width * block_height * 2));
- uint8_t *src = reinterpret_cast<uint8_t *>(
+ uint8_t *src = reinterpret_cast<uint8_t *>(
vpx_memalign(16, block_width * block_height * 2));
for (int n = 0; n < 100; n++) {
@@ -63,29 +61,26 @@
}
}
- GetParam()(block_height, block_width, diff, block_width,
- src, block_width, pred, block_width);
+ GetParam()(block_height, block_width, diff, block_width, src, block_width,
+ pred, block_width);
for (int r = 0; r < block_height; ++r) {
for (int c = 0; c < block_width; ++c) {
EXPECT_EQ(diff[r * block_width + c],
- (src[r * block_width + c] -
- pred[r * block_width + c])) << "r = " << r
- << ", c = " << c
- << ", bs = " << bsize;
+ (src[r * block_width + c] - pred[r * block_width + c]))
+ << "r = " << r << ", c = " << c << ", bs = " << bsize;
}
}
- GetParam()(block_height, block_width, diff, block_width * 2,
- src, block_width * 2, pred, block_width * 2);
+ GetParam()(block_height, block_width, diff, block_width * 2, src,
+ block_width * 2, pred, block_width * 2);
for (int r = 0; r < block_height; ++r) {
for (int c = 0; c < block_width; ++c) {
- EXPECT_EQ(diff[r * block_width * 2 + c],
- (src[r * block_width * 2 + c] -
- pred[r * block_width * 2 + c])) << "r = " << r
- << ", c = " << c
- << ", bs = " << bsize;
+ EXPECT_EQ(
+ diff[r * block_width * 2 + c],
+ (src[r * block_width * 2 + c] - pred[r * block_width * 2 + c]))
+ << "r = " << r << ", c = " << c << ", bs = " << bsize;
}
}
}
@@ -111,11 +106,10 @@
::testing::Values(vpx_subtract_block_msa));
#endif
-typedef void (*HBDSubtractFunc)(int rows, int cols,
- int16_t *diff_ptr, ptrdiff_t diff_stride,
- const uint8_t *src_ptr, ptrdiff_t src_stride,
- const uint8_t *pred_ptr, ptrdiff_t pred_stride,
- int bd);
+typedef void (*HBDSubtractFunc)(int rows, int cols, int16_t *diff_ptr,
+ ptrdiff_t diff_stride, const uint8_t *src_ptr,
+ ptrdiff_t src_stride, const uint8_t *pred_ptr,
+ ptrdiff_t pred_stride, int bd);
using ::std::tr1::get;
using ::std::tr1::make_tuple;
@@ -179,8 +173,8 @@
}
for (i = 0; i < test_num; ++i) {
- func_(block_height_, block_width_, diff_, block_width_,
- src_, block_width_, pred_, block_width_, bit_depth_);
+ func_(block_height_, block_width_, diff_, block_width_, src_, block_width_,
+ pred_, block_width_, bit_depth_);
}
}
@@ -197,8 +191,8 @@
CONVERT_TO_SHORTPTR(pred_)[j] = rnd_.Rand16() & mask;
}
- func_(block_height_, block_width_, diff_, block_width_,
- src_, block_width_, pred_, block_width_, bit_depth_);
+ func_(block_height_, block_width_, diff_, block_width_, src_, block_width_,
+ pred_, block_width_, bit_depth_);
for (int r = 0; r < block_height_; ++r) {
for (int c = 0; c < block_width_; ++c) {
@@ -211,50 +205,47 @@
}
}
-TEST_P(VP10HBDSubtractBlockTest, CheckResult) {
- CheckResult();
-}
+TEST_P(VP10HBDSubtractBlockTest, CheckResult) { CheckResult(); }
#if USE_SPEED_TEST
-TEST_P(VP10HBDSubtractBlockTest, CheckSpeed) {
- RunForSpeed();
-}
+TEST_P(VP10HBDSubtractBlockTest, CheckSpeed) { RunForSpeed(); }
#endif // USE_SPEED_TEST
#if HAVE_SSE2
-INSTANTIATE_TEST_CASE_P(SSE2, VP10HBDSubtractBlockTest, ::testing::Values(
- make_tuple(4, 4, 12, vpx_highbd_subtract_block_sse2),
- make_tuple(4, 4, 12, vpx_highbd_subtract_block_c),
- make_tuple(4, 8, 12, vpx_highbd_subtract_block_sse2),
- make_tuple(4, 8, 12, vpx_highbd_subtract_block_c),
- make_tuple(8, 4, 12, vpx_highbd_subtract_block_sse2),
- make_tuple(8, 4, 12, vpx_highbd_subtract_block_c),
- make_tuple(8, 8, 12, vpx_highbd_subtract_block_sse2),
- make_tuple(8, 8, 12, vpx_highbd_subtract_block_c),
- make_tuple(8, 16, 12, vpx_highbd_subtract_block_sse2),
- make_tuple(8, 16, 12, vpx_highbd_subtract_block_c),
- make_tuple(16, 8, 12, vpx_highbd_subtract_block_sse2),
- make_tuple(16, 8, 12, vpx_highbd_subtract_block_c),
- make_tuple(16, 16, 12, vpx_highbd_subtract_block_sse2),
- make_tuple(16, 16, 12, vpx_highbd_subtract_block_c),
- make_tuple(16, 32, 12, vpx_highbd_subtract_block_sse2),
- make_tuple(16, 32, 12, vpx_highbd_subtract_block_c),
- make_tuple(32, 16, 12, vpx_highbd_subtract_block_sse2),
- make_tuple(32, 16, 12, vpx_highbd_subtract_block_c),
- make_tuple(32, 32, 12, vpx_highbd_subtract_block_sse2),
- make_tuple(32, 32, 12, vpx_highbd_subtract_block_c),
- make_tuple(32, 64, 12, vpx_highbd_subtract_block_sse2),
- make_tuple(32, 64, 12, vpx_highbd_subtract_block_c),
- make_tuple(64, 32, 12, vpx_highbd_subtract_block_sse2),
- make_tuple(64, 32, 12, vpx_highbd_subtract_block_c),
- make_tuple(64, 64, 12, vpx_highbd_subtract_block_sse2),
- make_tuple(64, 64, 12, vpx_highbd_subtract_block_c),
- make_tuple(64, 128, 12, vpx_highbd_subtract_block_sse2),
- make_tuple(64, 128, 12, vpx_highbd_subtract_block_c),
- make_tuple(128, 64, 12, vpx_highbd_subtract_block_sse2),
- make_tuple(128, 64, 12, vpx_highbd_subtract_block_c),
- make_tuple(128, 128, 12, vpx_highbd_subtract_block_sse2),
- make_tuple(128, 128, 12, vpx_highbd_subtract_block_c)));
+INSTANTIATE_TEST_CASE_P(
+ SSE2, VP10HBDSubtractBlockTest,
+ ::testing::Values(make_tuple(4, 4, 12, vpx_highbd_subtract_block_sse2),
+ make_tuple(4, 4, 12, vpx_highbd_subtract_block_c),
+ make_tuple(4, 8, 12, vpx_highbd_subtract_block_sse2),
+ make_tuple(4, 8, 12, vpx_highbd_subtract_block_c),
+ make_tuple(8, 4, 12, vpx_highbd_subtract_block_sse2),
+ make_tuple(8, 4, 12, vpx_highbd_subtract_block_c),
+ make_tuple(8, 8, 12, vpx_highbd_subtract_block_sse2),
+ make_tuple(8, 8, 12, vpx_highbd_subtract_block_c),
+ make_tuple(8, 16, 12, vpx_highbd_subtract_block_sse2),
+ make_tuple(8, 16, 12, vpx_highbd_subtract_block_c),
+ make_tuple(16, 8, 12, vpx_highbd_subtract_block_sse2),
+ make_tuple(16, 8, 12, vpx_highbd_subtract_block_c),
+ make_tuple(16, 16, 12, vpx_highbd_subtract_block_sse2),
+ make_tuple(16, 16, 12, vpx_highbd_subtract_block_c),
+ make_tuple(16, 32, 12, vpx_highbd_subtract_block_sse2),
+ make_tuple(16, 32, 12, vpx_highbd_subtract_block_c),
+ make_tuple(32, 16, 12, vpx_highbd_subtract_block_sse2),
+ make_tuple(32, 16, 12, vpx_highbd_subtract_block_c),
+ make_tuple(32, 32, 12, vpx_highbd_subtract_block_sse2),
+ make_tuple(32, 32, 12, vpx_highbd_subtract_block_c),
+ make_tuple(32, 64, 12, vpx_highbd_subtract_block_sse2),
+ make_tuple(32, 64, 12, vpx_highbd_subtract_block_c),
+ make_tuple(64, 32, 12, vpx_highbd_subtract_block_sse2),
+ make_tuple(64, 32, 12, vpx_highbd_subtract_block_c),
+ make_tuple(64, 64, 12, vpx_highbd_subtract_block_sse2),
+ make_tuple(64, 64, 12, vpx_highbd_subtract_block_c),
+ make_tuple(64, 128, 12, vpx_highbd_subtract_block_sse2),
+ make_tuple(64, 128, 12, vpx_highbd_subtract_block_c),
+ make_tuple(128, 64, 12, vpx_highbd_subtract_block_sse2),
+ make_tuple(128, 64, 12, vpx_highbd_subtract_block_c),
+ make_tuple(128, 128, 12, vpx_highbd_subtract_block_sse2),
+ make_tuple(128, 128, 12, vpx_highbd_subtract_block_c)));
#endif // HAVE_SSE2
#endif // CONFIG_VP9_HIGHBITDEPTH
} // namespace
diff --git a/test/sum_squares_test.cc b/test/sum_squares_test.cc
index 9adb86e..4991a04 100644
--- a/test/sum_squares_test.cc
+++ b/test/sum_squares_test.cc
@@ -34,13 +34,10 @@
typedef uint64_t (*SSI16Func)(const int16_t *src, int stride, int size);
typedef libvpx_test::FuncParam<SSI16Func> TestFuncs;
-class SumSquaresTest :
- public ::testing::TestWithParam<TestFuncs> {
+class SumSquaresTest : public ::testing::TestWithParam<TestFuncs> {
public:
virtual ~SumSquaresTest() {}
- virtual void SetUp() {
- params_ = this->GetParam();
- }
+ virtual void SetUp() { params_ = this->GetParam(); }
virtual void TearDown() { libvpx_test::ClearSystemState(); }
@@ -50,23 +47,23 @@
TEST_P(SumSquaresTest, OperationCheck) {
ACMRandom rnd(ACMRandom::DeterministicSeed());
- DECLARE_ALIGNED(16, int16_t, src[256*256]);
+ DECLARE_ALIGNED(16, int16_t, src[256 * 256]);
int failed = 0;
- const int msb = 11; // Up to 12 bit input
- const int limit = 1 << (msb+1);
+ const int msb = 11; // Up to 12 bit input
+ const int limit = 1 << (msb + 1);
for (int k = 0; k < kNumIterations; k++) {
- int size = 4 << rnd(6); // Up to 128x128
- int stride = 4 << rnd(7); // Up to 256 stride
- while (stride < size) { // Make sure it's valid
+ int size = 4 << rnd(6); // Up to 128x128
+ int stride = 4 << rnd(7); // Up to 256 stride
+ while (stride < size) { // Make sure it's valid
stride = 4 << rnd(7);
}
- for (int ii = 0 ; ii < size; ii++) {
+ for (int ii = 0; ii < size; ii++) {
for (int jj = 0; jj < size; jj++) {
- src[ii*stride+jj] = rnd(2) ? rnd(limit) : -rnd(limit);
+ src[ii * stride + jj] = rnd(2) ? rnd(limit) : -rnd(limit);
}
}
@@ -77,32 +74,32 @@
if (!failed) {
failed = res_ref != res_tst;
EXPECT_EQ(res_ref, res_tst)
- << "Error: Sum Squares Test"
- << " C output does not match optimized output.";
+ << "Error: Sum Squares Test"
+ << " C output does not match optimized output.";
}
}
}
TEST_P(SumSquaresTest, ExtremeValues) {
ACMRandom rnd(ACMRandom::DeterministicSeed());
- DECLARE_ALIGNED(16, int16_t, src[256*256]);
+ DECLARE_ALIGNED(16, int16_t, src[256 * 256]);
int failed = 0;
- const int msb = 11; // Up to 12 bit input
- const int limit = 1 << (msb+1);
+ const int msb = 11; // Up to 12 bit input
+ const int limit = 1 << (msb + 1);
for (int k = 0; k < kNumIterations; k++) {
- int size = 4 << rnd(6); // Up to 128x128
- int stride = 4 << rnd(7); // Up to 256 stride
- while (stride < size) { // Make sure it's valid
+ int size = 4 << rnd(6); // Up to 128x128
+ int stride = 4 << rnd(7); // Up to 256 stride
+ while (stride < size) { // Make sure it's valid
stride = 4 << rnd(7);
}
- int val = rnd(2) ? limit-1 : -(limit-1);
- for (int ii = 0 ; ii < size; ii++) {
+ int val = rnd(2) ? limit - 1 : -(limit - 1);
+ for (int ii = 0; ii < size; ii++) {
for (int jj = 0; jj < size; jj++) {
- src[ii*stride+jj] = val;
+ src[ii * stride + jj] = val;
}
}
@@ -113,8 +110,8 @@
if (!failed) {
failed = res_ref != res_tst;
EXPECT_EQ(res_ref, res_tst)
- << "Error: Sum Squares Test"
- << " C output does not match optimized output.";
+ << "Error: Sum Squares Test"
+ << " C output does not match optimized output.";
}
}
}
@@ -123,8 +120,8 @@
INSTANTIATE_TEST_CASE_P(
SSE2, SumSquaresTest,
- ::testing::Values(
- TestFuncs(&vpx_sum_squares_2d_i16_c, &vpx_sum_squares_2d_i16_sse2)));
+ ::testing::Values(TestFuncs(&vpx_sum_squares_2d_i16_c,
+ &vpx_sum_squares_2d_i16_sse2)));
#endif // HAVE_SSE2
@@ -144,8 +141,8 @@
TEST_P(SumSquares1DTest, RandomValues) {
DECLARE_ALIGNED(16, int16_t, src[kMaxSize * kMaxSize]);
- for (int iter = 0 ; iter < kIterations && !HasFatalFailure(); ++iter) {
- for (int i = 0 ; i < kMaxSize * kMaxSize ; ++i)
+ for (int iter = 0; iter < kIterations && !HasFatalFailure(); ++iter) {
+ for (int i = 0; i < kMaxSize * kMaxSize; ++i)
src[i] = rng_(kInt13Max * 2 + 1) - kInt13Max;
const int N = rng_(2) ? rng_(kMaxSize * kMaxSize + 1 - kMaxSize) + kMaxSize
@@ -162,13 +159,11 @@
TEST_P(SumSquares1DTest, ExtremeValues) {
DECLARE_ALIGNED(16, int16_t, src[kMaxSize * kMaxSize]);
- for (int iter = 0 ; iter < kIterations && !HasFatalFailure(); ++iter) {
+ for (int iter = 0; iter < kIterations && !HasFatalFailure(); ++iter) {
if (rng_(2)) {
- for (int i = 0 ; i < kMaxSize * kMaxSize ; ++i)
- src[i] = kInt13Max;
+ for (int i = 0; i < kMaxSize * kMaxSize; ++i) src[i] = kInt13Max;
} else {
- for (int i = 0 ; i < kMaxSize * kMaxSize ; ++i)
- src[i] = -kInt13Max;
+ for (int i = 0; i < kMaxSize * kMaxSize; ++i) src[i] = -kInt13Max;
}
const int N = rng_(2) ? rng_(kMaxSize * kMaxSize + 1 - kMaxSize) + kMaxSize
@@ -183,10 +178,9 @@
}
#if HAVE_SSE2
-INSTANTIATE_TEST_CASE_P(
- SSE2, SumSquares1DTest,
- ::testing::Values(
- TestFuncs1D(vpx_sum_squares_i16_c, vpx_sum_squares_i16_sse2)));
+INSTANTIATE_TEST_CASE_P(SSE2, SumSquares1DTest,
+ ::testing::Values(TestFuncs1D(
+ vpx_sum_squares_i16_c, vpx_sum_squares_i16_sse2)));
#endif // HAVE_SSE2
} // namespace
diff --git a/test/superframe_test.cc b/test/superframe_test.cc
index 610c69b..7580375 100644
--- a/test/superframe_test.cc
+++ b/test/superframe_test.cc
@@ -21,14 +21,15 @@
const int kTileCols = 2;
const int kTileRows = 3;
-typedef std::tr1::tuple<libvpx_test::TestMode, int,
- int, int> SuperframeTestParam;
+typedef std::tr1::tuple<libvpx_test::TestMode, int, int, int>
+ SuperframeTestParam;
-class SuperframeTest : public ::libvpx_test::EncoderTest,
- public ::libvpx_test::CodecTestWithParam<SuperframeTestParam> {
+class SuperframeTest
+ : public ::libvpx_test::EncoderTest,
+ public ::libvpx_test::CodecTestWithParam<SuperframeTestParam> {
protected:
- SuperframeTest() : EncoderTest(GET_PARAM(0)), modified_buf_(NULL),
- last_sf_pts_(0) {}
+ SuperframeTest()
+ : EncoderTest(GET_PARAM(0)), modified_buf_(NULL), last_sf_pts_(0) {}
virtual ~SuperframeTest() {}
virtual void SetUp() {
@@ -44,9 +45,7 @@
n_tile_rows_ = std::tr1::get<kTileRows>(input);
}
- virtual void TearDown() {
- delete[] modified_buf_;
- }
+ virtual void TearDown() { delete[] modified_buf_; }
virtual void PreEncodeFrameHook(libvpx_test::VideoSource *video,
libvpx_test::Encoder *encoder) {
@@ -58,26 +57,22 @@
}
}
- virtual const vpx_codec_cx_pkt_t * MutateEncoderOutputHook(
+ virtual const vpx_codec_cx_pkt_t *MutateEncoderOutputHook(
const vpx_codec_cx_pkt_t *pkt) {
- if (pkt->kind != VPX_CODEC_CX_FRAME_PKT)
- return pkt;
+ if (pkt->kind != VPX_CODEC_CX_FRAME_PKT) return pkt;
- const uint8_t *buffer = reinterpret_cast<uint8_t*>(pkt->data.frame.buf);
+ const uint8_t *buffer = reinterpret_cast<uint8_t *>(pkt->data.frame.buf);
const uint8_t marker = buffer[pkt->data.frame.sz - 1];
const int frames = (marker & 0x7) + 1;
const int mag = ((marker >> 3) & 3) + 1;
const unsigned int index_sz =
2 + mag * (frames - is_vp10_style_superframe_);
- if ((marker & 0xe0) == 0xc0 &&
- pkt->data.frame.sz >= index_sz &&
+ if ((marker & 0xe0) == 0xc0 && pkt->data.frame.sz >= index_sz &&
buffer[pkt->data.frame.sz - index_sz] == marker) {
// frame is a superframe. strip off the index.
- if (modified_buf_)
- delete[] modified_buf_;
+ if (modified_buf_) delete[] modified_buf_;
modified_buf_ = new uint8_t[pkt->data.frame.sz - index_sz];
- memcpy(modified_buf_, pkt->data.frame.buf,
- pkt->data.frame.sz - index_sz);
+ memcpy(modified_buf_, pkt->data.frame.buf, pkt->data.frame.sz - index_sz);
modified_pkt_ = *pkt;
modified_pkt_.data.frame.buf = modified_buf_;
modified_pkt_.data.frame.sz -= index_sz;
@@ -88,8 +83,8 @@
}
// Make sure we do a few frames after the last SF
- abort_ |= sf_count_ > sf_count_max_ &&
- pkt->data.frame.pts - last_sf_pts_ >= 5;
+ abort_ |=
+ sf_count_ > sf_count_max_ && pkt->data.frame.pts - last_sf_pts_ >= 5;
return pkt;
}
@@ -131,16 +126,19 @@
const int tile_col_values[] = { 1, 2, 32 };
#endif
const int tile_row_values[] = { 1, 2, 32 };
-VP10_INSTANTIATE_TEST_CASE(SuperframeTest, ::testing::Combine(
- ::testing::Values(::libvpx_test::kTwoPassGood),
- ::testing::Values(1),
- ::testing::ValuesIn(tile_col_values),
- ::testing::ValuesIn(tile_row_values)));
+VP10_INSTANTIATE_TEST_CASE(
+ SuperframeTest,
+ ::testing::Combine(::testing::Values(::libvpx_test::kTwoPassGood),
+ ::testing::Values(1),
+ ::testing::ValuesIn(tile_col_values),
+ ::testing::ValuesIn(tile_row_values)));
#else
#if !CONFIG_ANS
-VP10_INSTANTIATE_TEST_CASE(SuperframeTest, ::testing::Combine(
- ::testing::Values(::libvpx_test::kTwoPassGood),
- ::testing::Values(1), ::testing::Values(0), ::testing::Values(0)));
+VP10_INSTANTIATE_TEST_CASE(
+ SuperframeTest,
+ ::testing::Combine(::testing::Values(::libvpx_test::kTwoPassGood),
+ ::testing::Values(1), ::testing::Values(0),
+ ::testing::Values(0)));
#endif // !CONFIG_ANS
#endif // CONFIG_EXT_TILE
} // namespace
diff --git a/test/test_intra_pred_speed.cc b/test/test_intra_pred_speed.cc
index 8928bf8..a896307 100644
--- a/test/test_intra_pred_speed.cc
+++ b/test/test_intra_pred_speed.cc
@@ -31,9 +31,9 @@
const int kNumVp9IntraPredFuncs = 13;
const char *kVp9IntraPredNames[kNumVp9IntraPredFuncs] = {
- "DC_PRED", "DC_LEFT_PRED", "DC_TOP_PRED", "DC_128_PRED", "V_PRED", "H_PRED",
- "D45_PRED", "D135_PRED", "D117_PRED", "D153_PRED", "D207_PRED", "D63_PRED",
- "TM_PRED"
+ "DC_PRED", "DC_LEFT_PRED", "DC_TOP_PRED", "DC_128_PRED", "V_PRED",
+ "H_PRED", "D45_PRED", "D135_PRED", "D117_PRED", "D153_PRED",
+ "D207_PRED", "D63_PRED", "TM_PRED"
};
void TestIntraPred(const char name[], VpxPredFunc const *pred_funcs,
@@ -82,18 +82,12 @@
void TestIntraPred4(VpxPredFunc const *pred_funcs) {
static const int kNumVp9IntraFuncs = 13;
static const char *const kSignatures[kNumVp9IntraFuncs] = {
- "4334156168b34ab599d9b5b30f522fe9",
- "bc4649d5ba47c7ff178d92e475960fb0",
- "8d316e5933326dcac24e1064794b5d12",
- "a27270fed024eafd762c95de85f4da51",
- "c33dff000d4256c2b8f3bf9e9bab14d2",
- "44d8cddc2ad8f79b8ed3306051722b4f",
- "eb54839b2bad6699d8946f01ec041cd0",
- "ecb0d56ae5f677ea45127ce9d5c058e4",
- "0b7936841f6813da818275944895b574",
- "9117972ef64f91a58ff73e1731c81db2",
- "c56d5e8c729e46825f46dd5d3b5d508a",
- "c0889e2039bcf7bcb5d2f33cdca69adc",
+ "4334156168b34ab599d9b5b30f522fe9", "bc4649d5ba47c7ff178d92e475960fb0",
+ "8d316e5933326dcac24e1064794b5d12", "a27270fed024eafd762c95de85f4da51",
+ "c33dff000d4256c2b8f3bf9e9bab14d2", "44d8cddc2ad8f79b8ed3306051722b4f",
+ "eb54839b2bad6699d8946f01ec041cd0", "ecb0d56ae5f677ea45127ce9d5c058e4",
+ "0b7936841f6813da818275944895b574", "9117972ef64f91a58ff73e1731c81db2",
+ "c56d5e8c729e46825f46dd5d3b5d508a", "c0889e2039bcf7bcb5d2f33cdca69adc",
"309a618577b27c648f9c5ee45252bc8f",
};
TestIntraPred("Intra4", pred_funcs, kVp9IntraPredNames, kNumVp9IntraFuncs,
@@ -103,18 +97,12 @@
void TestIntraPred8(VpxPredFunc const *pred_funcs) {
static const int kNumVp9IntraFuncs = 13;
static const char *const kSignatures[kNumVp9IntraFuncs] = {
- "7694ddeeefed887faf9d339d18850928",
- "7d726b1213591b99f736be6dec65065b",
- "19c5711281357a485591aaf9c96c0a67",
- "ba6b66877a089e71cd938e3b8c40caac",
- "802440c93317e0f8ba93fab02ef74265",
- "9e09a47a15deb0b9d8372824f9805080",
- "b7c2d8c662268c0c427da412d7b0311d",
- "78339c1c60bb1d67d248ab8c4da08b7f",
- "5c97d70f7d47de1882a6cd86c165c8a9",
- "8182bf60688b42205acd95e59e967157",
- "08323400005a297f16d7e57e7fe1eaac",
- "95f7bfc262329a5849eda66d8f7c68ce",
+ "7694ddeeefed887faf9d339d18850928", "7d726b1213591b99f736be6dec65065b",
+ "19c5711281357a485591aaf9c96c0a67", "ba6b66877a089e71cd938e3b8c40caac",
+ "802440c93317e0f8ba93fab02ef74265", "9e09a47a15deb0b9d8372824f9805080",
+ "b7c2d8c662268c0c427da412d7b0311d", "78339c1c60bb1d67d248ab8c4da08b7f",
+ "5c97d70f7d47de1882a6cd86c165c8a9", "8182bf60688b42205acd95e59e967157",
+ "08323400005a297f16d7e57e7fe1eaac", "95f7bfc262329a5849eda66d8f7c68ce",
"815b75c8e0d91cc1ae766dc5d3e445a3",
};
TestIntraPred("Intra8", pred_funcs, kVp9IntraPredNames, kNumVp9IntraFuncs,
@@ -124,18 +112,12 @@
void TestIntraPred16(VpxPredFunc const *pred_funcs) {
static const int kNumVp9IntraFuncs = 13;
static const char *const kSignatures[kNumVp9IntraFuncs] = {
- "b40dbb555d5d16a043dc361e6694fe53",
- "fb08118cee3b6405d64c1fd68be878c6",
- "6c190f341475c837cc38c2e566b64875",
- "db5c34ccbe2c7f595d9b08b0dc2c698c",
- "a62cbfd153a1f0b9fed13e62b8408a7a",
- "143df5b4c89335e281103f610f5052e4",
- "d87feb124107cdf2cfb147655aa0bb3c",
- "7841fae7d4d47b519322e6a03eeed9dc",
- "f6ebed3f71cbcf8d6d0516ce87e11093",
- "3cc480297dbfeed01a1c2d78dd03d0c5",
- "b9f69fa6532b372c545397dcb78ef311",
- "a8fe1c70432f09d0c20c67bdb6432c4d",
+ "b40dbb555d5d16a043dc361e6694fe53", "fb08118cee3b6405d64c1fd68be878c6",
+ "6c190f341475c837cc38c2e566b64875", "db5c34ccbe2c7f595d9b08b0dc2c698c",
+ "a62cbfd153a1f0b9fed13e62b8408a7a", "143df5b4c89335e281103f610f5052e4",
+ "d87feb124107cdf2cfb147655aa0bb3c", "7841fae7d4d47b519322e6a03eeed9dc",
+ "f6ebed3f71cbcf8d6d0516ce87e11093", "3cc480297dbfeed01a1c2d78dd03d0c5",
+ "b9f69fa6532b372c545397dcb78ef311", "a8fe1c70432f09d0c20c67bdb6432c4d",
"b8a41aa968ec108af447af4217cba91b",
};
TestIntraPred("Intra16", pred_funcs, kVp9IntraPredNames, kNumVp9IntraFuncs,
@@ -145,18 +127,12 @@
void TestIntraPred32(VpxPredFunc const *pred_funcs) {
static const int kNumVp9IntraFuncs = 13;
static const char *const kSignatures[kNumVp9IntraFuncs] = {
- "558541656d84f9ae7896db655826febe",
- "b3587a1f9a01495fa38c8cd3c8e2a1bf",
- "4c6501e64f25aacc55a2a16c7e8f0255",
- "b3b01379ba08916ef6b1b35f7d9ad51c",
- "0f1eb38b6cbddb3d496199ef9f329071",
- "911c06efb9ed1c3b4c104b232b55812f",
- "9225beb0ddfa7a1d24eaa1be430a6654",
- "0a6d584a44f8db9aa7ade2e2fdb9fc9e",
- "b01c9076525216925f3456f034fb6eee",
- "d267e20ad9e5cd2915d1a47254d3d149",
- "ed012a4a5da71f36c2393023184a0e59",
- "f162b51ed618d28b936974cff4391da5",
+ "558541656d84f9ae7896db655826febe", "b3587a1f9a01495fa38c8cd3c8e2a1bf",
+ "4c6501e64f25aacc55a2a16c7e8f0255", "b3b01379ba08916ef6b1b35f7d9ad51c",
+ "0f1eb38b6cbddb3d496199ef9f329071", "911c06efb9ed1c3b4c104b232b55812f",
+ "9225beb0ddfa7a1d24eaa1be430a6654", "0a6d584a44f8db9aa7ade2e2fdb9fc9e",
+ "b01c9076525216925f3456f034fb6eee", "d267e20ad9e5cd2915d1a47254d3d149",
+ "ed012a4a5da71f36c2393023184a0e59", "f162b51ed618d28b936974cff4391da5",
"9e1370c6d42e08d357d9612c93a71cfc",
};
TestIntraPred("Intra32", pred_funcs, kVp9IntraPredNames, kNumVp9IntraFuncs,
@@ -167,13 +143,13 @@
// Defines a test case for |arch| (e.g., C, SSE2, ...) passing the predictors
// to |test_func|. The test name is 'arch.test_func', e.g., C.TestIntraPred4.
-#define INTRA_PRED_TEST(arch, test_func, dc, dc_left, dc_top, dc_128, v, h, \
- d45, d135, d117, d153, d207, d63, tm) \
- TEST(arch, test_func) { \
- static const VpxPredFunc vpx_intra_pred[] = { \
- dc, dc_left, dc_top, dc_128, v, h, d45, \
- d135, d117, d153, d207, d63, tm}; \
- test_func(vpx_intra_pred); \
+#define INTRA_PRED_TEST(arch, test_func, dc, dc_left, dc_top, dc_128, v, h, \
+ d45, d135, d117, d153, d207, d63, tm) \
+ TEST(arch, test_func) { \
+ static const VpxPredFunc vpx_intra_pred[] = { \
+ dc, dc_left, dc_top, dc_128, v, h, d45, d135, d117, d153, d207, d63, tm \
+ }; \
+ test_func(vpx_intra_pred); \
}
// -----------------------------------------------------------------------------
@@ -197,9 +173,8 @@
#endif // HAVE_SSE2
#if HAVE_SSSE3
-INTRA_PRED_TEST(SSSE3, TestIntraPred4, NULL, NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- vpx_d153_predictor_4x4_ssse3, NULL,
+INTRA_PRED_TEST(SSSE3, TestIntraPred4, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, vpx_d153_predictor_4x4_ssse3, NULL,
vpx_d63_predictor_4x4_ssse3, NULL)
#endif // HAVE_SSSE3
@@ -222,8 +197,8 @@
INTRA_PRED_TEST(MSA, TestIntraPred4, vpx_dc_predictor_4x4_msa,
vpx_dc_left_predictor_4x4_msa, vpx_dc_top_predictor_4x4_msa,
vpx_dc_128_predictor_4x4_msa, vpx_v_predictor_4x4_msa,
- vpx_h_predictor_4x4_msa, NULL, NULL, NULL, NULL, NULL,
- NULL, vpx_tm_predictor_4x4_msa)
+ vpx_h_predictor_4x4_msa, NULL, NULL, NULL, NULL, NULL, NULL,
+ vpx_tm_predictor_4x4_msa)
#endif // HAVE_MSA
// -----------------------------------------------------------------------------
@@ -246,10 +221,9 @@
#endif // HAVE_SSE2
#if HAVE_SSSE3
-INTRA_PRED_TEST(SSSE3, TestIntraPred8, NULL, NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- vpx_d153_predictor_8x8_ssse3, vpx_d207_predictor_8x8_ssse3,
- vpx_d63_predictor_8x8_ssse3, NULL)
+INTRA_PRED_TEST(SSSE3, TestIntraPred8, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, vpx_d153_predictor_8x8_ssse3,
+ vpx_d207_predictor_8x8_ssse3, vpx_d63_predictor_8x8_ssse3, NULL)
#endif // HAVE_SSSE3
#if HAVE_DSPR2
@@ -271,8 +245,8 @@
INTRA_PRED_TEST(MSA, TestIntraPred8, vpx_dc_predictor_8x8_msa,
vpx_dc_left_predictor_8x8_msa, vpx_dc_top_predictor_8x8_msa,
vpx_dc_128_predictor_8x8_msa, vpx_v_predictor_8x8_msa,
- vpx_h_predictor_8x8_msa, NULL, NULL, NULL, NULL, NULL,
- NULL, vpx_tm_predictor_8x8_msa)
+ vpx_h_predictor_8x8_msa, NULL, NULL, NULL, NULL, NULL, NULL,
+ vpx_tm_predictor_8x8_msa)
#endif // HAVE_MSA
// -----------------------------------------------------------------------------
@@ -296,11 +270,10 @@
#endif // HAVE_SSE2
#if HAVE_SSSE3
-INTRA_PRED_TEST(SSSE3, TestIntraPred16, NULL, NULL, NULL, NULL, NULL,
- NULL, vpx_d45_predictor_16x16_ssse3,
- NULL, NULL, vpx_d153_predictor_16x16_ssse3,
- vpx_d207_predictor_16x16_ssse3, vpx_d63_predictor_16x16_ssse3,
- NULL)
+INTRA_PRED_TEST(SSSE3, TestIntraPred16, NULL, NULL, NULL, NULL, NULL, NULL,
+ vpx_d45_predictor_16x16_ssse3, NULL, NULL,
+ vpx_d153_predictor_16x16_ssse3, vpx_d207_predictor_16x16_ssse3,
+ vpx_d63_predictor_16x16_ssse3, NULL)
#endif // HAVE_SSSE3
#if HAVE_DSPR2
@@ -322,8 +295,8 @@
INTRA_PRED_TEST(MSA, TestIntraPred16, vpx_dc_predictor_16x16_msa,
vpx_dc_left_predictor_16x16_msa, vpx_dc_top_predictor_16x16_msa,
vpx_dc_128_predictor_16x16_msa, vpx_v_predictor_16x16_msa,
- vpx_h_predictor_16x16_msa, NULL, NULL, NULL, NULL, NULL,
- NULL, vpx_tm_predictor_16x16_msa)
+ vpx_h_predictor_16x16_msa, NULL, NULL, NULL, NULL, NULL, NULL,
+ vpx_tm_predictor_16x16_msa)
#endif // HAVE_MSA
// -----------------------------------------------------------------------------
@@ -342,13 +315,13 @@
vpx_dc_left_predictor_32x32_sse2,
vpx_dc_top_predictor_32x32_sse2,
vpx_dc_128_predictor_32x32_sse2, vpx_v_predictor_32x32_sse2,
- vpx_h_predictor_32x32_sse2, NULL, NULL, NULL, NULL, NULL,
- NULL, vpx_tm_predictor_32x32_sse2)
+ vpx_h_predictor_32x32_sse2, NULL, NULL, NULL, NULL, NULL, NULL,
+ vpx_tm_predictor_32x32_sse2)
#endif // HAVE_SSE2
#if HAVE_SSSE3
-INTRA_PRED_TEST(SSSE3, TestIntraPred32, NULL, NULL, NULL, NULL, NULL,
- NULL, vpx_d45_predictor_32x32_ssse3, NULL, NULL,
+INTRA_PRED_TEST(SSSE3, TestIntraPred32, NULL, NULL, NULL, NULL, NULL, NULL,
+ vpx_d45_predictor_32x32_ssse3, NULL, NULL,
vpx_d153_predictor_32x32_ssse3, vpx_d207_predictor_32x32_ssse3,
vpx_d63_predictor_32x32_ssse3, NULL)
#endif // HAVE_SSSE3
@@ -366,8 +339,8 @@
INTRA_PRED_TEST(MSA, TestIntraPred32, vpx_dc_predictor_32x32_msa,
vpx_dc_left_predictor_32x32_msa, vpx_dc_top_predictor_32x32_msa,
vpx_dc_128_predictor_32x32_msa, vpx_v_predictor_32x32_msa,
- vpx_h_predictor_32x32_msa, NULL, NULL, NULL, NULL, NULL,
- NULL, vpx_tm_predictor_32x32_msa)
+ vpx_h_predictor_32x32_msa, NULL, NULL, NULL, NULL, NULL, NULL,
+ vpx_tm_predictor_32x32_msa)
#endif // HAVE_MSA
#include "test/test_libvpx.cc"
diff --git a/test/test_libvpx.cc b/test/test_libvpx.cc
index b59d876..9867f9d 100644
--- a/test/test_libvpx.cc
+++ b/test/test_libvpx.cc
@@ -38,22 +38,16 @@
#if ARCH_X86 || ARCH_X86_64
const int simd_caps = x86_simd_caps();
- if (!(simd_caps & HAS_MMX))
- append_negative_gtest_filter(":MMX.*:MMX/*");
- if (!(simd_caps & HAS_SSE))
- append_negative_gtest_filter(":SSE.*:SSE/*");
- if (!(simd_caps & HAS_SSE2))
- append_negative_gtest_filter(":SSE2.*:SSE2/*");
- if (!(simd_caps & HAS_SSE3))
- append_negative_gtest_filter(":SSE3.*:SSE3/*");
+ if (!(simd_caps & HAS_MMX)) append_negative_gtest_filter(":MMX.*:MMX/*");
+ if (!(simd_caps & HAS_SSE)) append_negative_gtest_filter(":SSE.*:SSE/*");
+ if (!(simd_caps & HAS_SSE2)) append_negative_gtest_filter(":SSE2.*:SSE2/*");
+ if (!(simd_caps & HAS_SSE3)) append_negative_gtest_filter(":SSE3.*:SSE3/*");
if (!(simd_caps & HAS_SSSE3))
append_negative_gtest_filter(":SSSE3.*:SSSE3/*");
if (!(simd_caps & HAS_SSE4_1))
append_negative_gtest_filter(":SSE4_1.*:SSE4_1/*");
- if (!(simd_caps & HAS_AVX))
- append_negative_gtest_filter(":AVX.*:AVX/*");
- if (!(simd_caps & HAS_AVX2))
- append_negative_gtest_filter(":AVX2.*:AVX2/*");
+ if (!(simd_caps & HAS_AVX)) append_negative_gtest_filter(":AVX.*:AVX/*");
+ if (!(simd_caps & HAS_AVX2)) append_negative_gtest_filter(":AVX2.*:AVX2/*");
#endif // ARCH_X86 || ARCH_X86_64
#if !CONFIG_SHARED
diff --git a/test/tile_independence_test.cc b/test/tile_independence_test.cc
index 1fa14a2..6fb8adb 100644
--- a/test/tile_independence_test.cc
+++ b/test/tile_independence_test.cc
@@ -20,16 +20,13 @@
#include "vpx_mem/vpx_mem.h"
namespace {
-class TileIndependenceTest : public ::libvpx_test::EncoderTest,
- public ::libvpx_test::CodecTestWith2Params<int,
- int> {
+class TileIndependenceTest
+ : public ::libvpx_test::EncoderTest,
+ public ::libvpx_test::CodecTestWith2Params<int, int> {
protected:
TileIndependenceTest()
- : EncoderTest(GET_PARAM(0)),
- md5_fw_order_(),
- md5_inv_order_(),
- n_tile_cols_(GET_PARAM(1)),
- n_tile_rows_(GET_PARAM(2)) {
+ : EncoderTest(GET_PARAM(0)), md5_fw_order_(), md5_inv_order_(),
+ n_tile_cols_(GET_PARAM(1)), n_tile_rows_(GET_PARAM(2)) {
init_flags_ = VPX_CODEC_USE_PSNR;
vpx_codec_dec_cfg_t cfg = vpx_codec_dec_cfg_t();
cfg.w = 704;
@@ -76,7 +73,7 @@
void UpdateMD5(::libvpx_test::Decoder *dec, const vpx_codec_cx_pkt_t *pkt,
::libvpx_test::MD5 *md5) {
const vpx_codec_err_t res = dec->DecodeFrame(
- reinterpret_cast<uint8_t*>(pkt->data.frame.buf), pkt->data.frame.sz);
+ reinterpret_cast<uint8_t *>(pkt->data.frame.buf), pkt->data.frame.sz);
if (res != VPX_CODEC_OK) {
abort_ = true;
ASSERT_EQ(VPX_CODEC_OK, res);
@@ -117,9 +114,7 @@
// run an encode with 2 or 4 tiles, and do the decode both in normal and
// inverted tile ordering. Ensure that the MD5 of the output in both cases
// is identical. If so, tiles are considered independent and the test passes.
-TEST_P(TileIndependenceTest, MD5Match) {
- DoTest();
-}
+TEST_P(TileIndependenceTest, MD5Match) { DoTest(); }
class TileIndependenceTestLarge : public TileIndependenceTest {
virtual void SetCpuUsed(libvpx_test::Encoder *encoder) {
@@ -128,22 +123,18 @@
}
};
-TEST_P(TileIndependenceTestLarge, MD5Match) {
- DoTest();
-}
-
+TEST_P(TileIndependenceTestLarge, MD5Match) { DoTest(); }
#if CONFIG_EXT_TILE
VP10_INSTANTIATE_TEST_CASE(TileIndependenceTest, ::testing::Values(1, 2, 32),
- ::testing::Values(1, 2, 32));
+ ::testing::Values(1, 2, 32));
VP10_INSTANTIATE_TEST_CASE(TileIndependenceTestLarge,
::testing::Values(1, 2, 32),
::testing::Values(1, 2, 32));
#else
VP10_INSTANTIATE_TEST_CASE(TileIndependenceTest, ::testing::Values(0, 1),
- ::testing::Values(0, 1));
-VP10_INSTANTIATE_TEST_CASE(TileIndependenceTestLarge,
- ::testing::Values(0, 1),
+ ::testing::Values(0, 1));
+VP10_INSTANTIATE_TEST_CASE(TileIndependenceTestLarge, ::testing::Values(0, 1),
::testing::Values(0, 1));
#endif // CONFIG_EXT_TILE
} // namespace
diff --git a/test/transform_test_base.h b/test/transform_test_base.h
index cf2facd..071018c 100644
--- a/test/transform_test_base.h
+++ b/test/transform_test_base.h
@@ -14,7 +14,6 @@
#include "vpx_mem/vpx_mem.h"
#include "vpx/vpx_codec.h"
-
namespace libvpx_test {
// Note:
@@ -44,19 +43,19 @@
int64_t total_error = 0;
const int count_test_block = 10000;
- int16_t *test_input_block = reinterpret_cast<int16_t *>
- (vpx_memalign(16, sizeof(int16_t) * num_coeffs_));
- tran_low_t *test_temp_block = reinterpret_cast<tran_low_t *>
- (vpx_memalign(16, sizeof(tran_low_t) * num_coeffs_));
- uint8_t *dst = reinterpret_cast<uint8_t *>
- (vpx_memalign(16, sizeof(uint8_t) * num_coeffs_));
- uint8_t *src = reinterpret_cast<uint8_t *>
- (vpx_memalign(16, sizeof(uint8_t) * num_coeffs_));
+ int16_t *test_input_block = reinterpret_cast<int16_t *>(
+ vpx_memalign(16, sizeof(int16_t) * num_coeffs_));
+ tran_low_t *test_temp_block = reinterpret_cast<tran_low_t *>(
+ vpx_memalign(16, sizeof(tran_low_t) * num_coeffs_));
+ uint8_t *dst = reinterpret_cast<uint8_t *>(
+ vpx_memalign(16, sizeof(uint8_t) * num_coeffs_));
+ uint8_t *src = reinterpret_cast<uint8_t *>(
+ vpx_memalign(16, sizeof(uint8_t) * num_coeffs_));
#if CONFIG_VP9_HIGHBITDEPTH
- uint16_t *dst16 = reinterpret_cast<uint16_t *>
- (vpx_memalign(16, sizeof(uint16_t) * num_coeffs_));
- uint16_t *src16 = reinterpret_cast<uint16_t *>
- (vpx_memalign(16, sizeof(uint16_t) * num_coeffs_));
+ uint16_t *dst16 = reinterpret_cast<uint16_t *>(
+ vpx_memalign(16, sizeof(uint16_t) * num_coeffs_));
+ uint16_t *src16 = reinterpret_cast<uint16_t *>(
+ vpx_memalign(16, sizeof(uint16_t) * num_coeffs_));
#endif
for (int i = 0; i < count_test_block; ++i) {
@@ -75,14 +74,14 @@
}
}
- ASM_REGISTER_STATE_CHECK(RunFwdTxfm(test_input_block,
- test_temp_block, pitch_));
+ ASM_REGISTER_STATE_CHECK(
+ RunFwdTxfm(test_input_block, test_temp_block, pitch_));
if (bit_depth_ == VPX_BITS_8) {
ASM_REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block, dst, pitch_));
#if CONFIG_VP9_HIGHBITDEPTH
} else {
- ASM_REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block,
- CONVERT_TO_BYTEPTR(dst16), pitch_));
+ ASM_REGISTER_STATE_CHECK(
+ RunInvTxfm(test_temp_block, CONVERT_TO_BYTEPTR(dst16), pitch_));
#endif
}
@@ -95,15 +94,13 @@
const uint32_t diff = dst[j] - src[j];
#endif
const uint32_t error = diff * diff;
- if (max_error < error)
- max_error = error;
+ if (max_error < error) max_error = error;
total_error += error;
}
}
EXPECT_GE(static_cast<uint32_t>(limit), max_error)
- << "Error: 4x4 FHT/IHT has an individual round trip error > "
- << limit;
+ << "Error: 4x4 FHT/IHT has an individual round trip error > " << limit;
EXPECT_GE(count_test_block * limit, total_error)
<< "Error: 4x4 FHT/IHT has average round trip error > " << limit
@@ -123,12 +120,12 @@
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 5000;
- int16_t *input_block = reinterpret_cast<int16_t *>
- (vpx_memalign(16, sizeof(int16_t) * num_coeffs_));
- tran_low_t *output_ref_block = reinterpret_cast<tran_low_t *>
- (vpx_memalign(16, sizeof(tran_low_t) * num_coeffs_));
- tran_low_t *output_block = reinterpret_cast<tran_low_t *>
- (vpx_memalign(16, sizeof(tran_low_t) * num_coeffs_));
+ int16_t *input_block = reinterpret_cast<int16_t *>(
+ vpx_memalign(16, sizeof(int16_t) * num_coeffs_));
+ tran_low_t *output_ref_block = reinterpret_cast<tran_low_t *>(
+ vpx_memalign(16, sizeof(tran_low_t) * num_coeffs_));
+ tran_low_t *output_block = reinterpret_cast<tran_low_t *>(
+ vpx_memalign(16, sizeof(tran_low_t) * num_coeffs_));
for (int i = 0; i < count_test_block; ++i) {
// Initialize a test block with input range [-mask_, mask_].
@@ -154,12 +151,12 @@
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 5000;
- int16_t *input_extreme_block = reinterpret_cast<int16_t *>
- (vpx_memalign(16, sizeof(int16_t) * num_coeffs_));
- tran_low_t *output_ref_block = reinterpret_cast<tran_low_t *>
- (vpx_memalign(16, sizeof(tran_low_t) * num_coeffs_));
- tran_low_t *output_block = reinterpret_cast<tran_low_t *>
- (vpx_memalign(16, sizeof(tran_low_t) * num_coeffs_));
+ int16_t *input_extreme_block = reinterpret_cast<int16_t *>(
+ vpx_memalign(16, sizeof(int16_t) * num_coeffs_));
+ tran_low_t *output_ref_block = reinterpret_cast<tran_low_t *>(
+ vpx_memalign(16, sizeof(tran_low_t) * num_coeffs_));
+ tran_low_t *output_block = reinterpret_cast<tran_low_t *>(
+ vpx_memalign(16, sizeof(tran_low_t) * num_coeffs_));
for (int i = 0; i < count_test_block; ++i) {
// Initialize a test block with input range [-mask_, mask_].
@@ -167,16 +164,14 @@
input_extreme_block[j] = rnd.Rand8() % 2 ? mask_ : -mask_;
}
if (i == 0) {
- for (int j = 0; j < num_coeffs_; ++j)
- input_extreme_block[j] = mask_;
+ for (int j = 0; j < num_coeffs_; ++j) input_extreme_block[j] = mask_;
} else if (i == 1) {
- for (int j = 0; j < num_coeffs_; ++j)
- input_extreme_block[j] = -mask_;
+ for (int j = 0; j < num_coeffs_; ++j) input_extreme_block[j] = -mask_;
}
fwd_txfm_ref(input_extreme_block, output_ref_block, pitch_, tx_type_);
- ASM_REGISTER_STATE_CHECK(RunFwdTxfm(input_extreme_block,
- output_block, pitch_));
+ ASM_REGISTER_STATE_CHECK(
+ RunFwdTxfm(input_extreme_block, output_block, pitch_));
int row_length = FindRowLength();
// The minimum quant value is 4.
@@ -196,20 +191,20 @@
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 1000;
- int16_t *in = reinterpret_cast<int16_t *>
- (vpx_memalign(16, sizeof(int16_t) * num_coeffs_));
- tran_low_t *coeff = reinterpret_cast<tran_low_t *>
- (vpx_memalign(16, sizeof(tran_low_t) * num_coeffs_));
- uint8_t *dst = reinterpret_cast<uint8_t *>
- (vpx_memalign(16, sizeof(uint8_t) * num_coeffs_));
- uint8_t *src = reinterpret_cast<uint8_t *>
- (vpx_memalign(16, sizeof(uint8_t) * num_coeffs_));
+ int16_t *in = reinterpret_cast<int16_t *>(
+ vpx_memalign(16, sizeof(int16_t) * num_coeffs_));
+ tran_low_t *coeff = reinterpret_cast<tran_low_t *>(
+ vpx_memalign(16, sizeof(tran_low_t) * num_coeffs_));
+ uint8_t *dst = reinterpret_cast<uint8_t *>(
+ vpx_memalign(16, sizeof(uint8_t) * num_coeffs_));
+ uint8_t *src = reinterpret_cast<uint8_t *>(
+ vpx_memalign(16, sizeof(uint8_t) * num_coeffs_));
#if CONFIG_VP9_HIGHBITDEPTH
- uint16_t *dst16 = reinterpret_cast<uint16_t *>
- (vpx_memalign(16, sizeof(uint16_t) * num_coeffs_));
- uint16_t *src16 = reinterpret_cast<uint16_t *>
- (vpx_memalign(16, sizeof(uint16_t) * num_coeffs_));
+ uint16_t *dst16 = reinterpret_cast<uint16_t *>(
+ vpx_memalign(16, sizeof(uint16_t) * num_coeffs_));
+ uint16_t *src16 = reinterpret_cast<uint16_t *>(
+ vpx_memalign(16, sizeof(uint16_t) * num_coeffs_));
#endif
for (int i = 0; i < count_test_block; ++i) {
@@ -234,8 +229,8 @@
ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, pitch_));
#if CONFIG_VP9_HIGHBITDEPTH
} else {
- ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16),
- pitch_));
+ ASM_REGISTER_STATE_CHECK(
+ RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16), pitch_));
#endif
}
@@ -248,8 +243,7 @@
#endif
const uint32_t error = diff * diff;
EXPECT_GE(static_cast<uint32_t>(limit), error)
- << "Error: 4x4 IDCT has error " << error
- << " at index " << j;
+ << "Error: 4x4 IDCT has error " << error << " at index " << j;
}
}
vpx_free(in);
diff --git a/test/user_priv_test.cc b/test/user_priv_test.cc
index 8512d88..4b5de09 100644
--- a/test/user_priv_test.cc
+++ b/test/user_priv_test.cc
@@ -34,8 +34,8 @@
void CheckUserPrivateData(void *user_priv, int *target) {
// actual pointer value should be the same as expected.
- EXPECT_EQ(reinterpret_cast<void *>(target), user_priv) <<
- "user_priv pointer value does not match.";
+ EXPECT_EQ(reinterpret_cast<void *>(target), user_priv)
+ << "user_priv pointer value does not match.";
}
// Decodes |filename|. Passes in user_priv data when calling DecodeFrame and
diff --git a/test/util.h b/test/util.h
index b27bffa..0ef2ad8 100644
--- a/test/util.h
+++ b/test/util.h
@@ -17,14 +17,13 @@
#include "vpx/vpx_image.h"
// Macros
-#define GET_PARAM(k) std::tr1::get< k >(GetParam())
+#define GET_PARAM(k) std::tr1::get<k>(GetParam())
inline double compute_psnr(const vpx_image_t *img1, const vpx_image_t *img2) {
- assert((img1->fmt == img2->fmt) &&
- (img1->d_w == img2->d_w) &&
+ assert((img1->fmt == img2->fmt) && (img1->d_w == img2->d_w) &&
(img1->d_h == img2->d_h));
- const unsigned int width_y = img1->d_w;
+ const unsigned int width_y = img1->d_w;
const unsigned int height_y = img1->d_h;
unsigned int i, j;
diff --git a/test/variance_test.cc b/test/variance_test.cc
index 0c1d409..e8b5669 100644
--- a/test/variance_test.cc
+++ b/test/variance_test.cc
@@ -832,63 +832,62 @@
const VarianceParams kArrayHBDVariance_c[] = {
#if CONFIG_VP10 && CONFIG_EXT_PARTITION
- VarianceParams(7, 7, &vpx_highbd_12_variance128x128_c, 12),
- VarianceParams(7, 6, &vpx_highbd_12_variance128x64_c, 12),
- VarianceParams(6, 7, &vpx_highbd_12_variance64x128_c, 12),
+ VarianceParams(7, 7, &vpx_highbd_12_variance128x128_c, 12),
+ VarianceParams(7, 6, &vpx_highbd_12_variance128x64_c, 12),
+ VarianceParams(6, 7, &vpx_highbd_12_variance64x128_c, 12),
#endif // CONFIG_VP10 && CONFIG_EXT_PARTITION
- VarianceParams(6, 6, &vpx_highbd_12_variance64x64_c, 12),
- VarianceParams(6, 5, &vpx_highbd_12_variance64x32_c, 12),
- VarianceParams(5, 6, &vpx_highbd_12_variance32x64_c, 12),
- VarianceParams(5, 5, &vpx_highbd_12_variance32x32_c, 12),
- VarianceParams(5, 4, &vpx_highbd_12_variance32x16_c, 12),
- VarianceParams(4, 5, &vpx_highbd_12_variance16x32_c, 12),
- VarianceParams(4, 4, &vpx_highbd_12_variance16x16_c, 12),
- VarianceParams(4, 3, &vpx_highbd_12_variance16x8_c, 12),
- VarianceParams(3, 4, &vpx_highbd_12_variance8x16_c, 12),
- VarianceParams(3, 3, &vpx_highbd_12_variance8x8_c, 12),
- VarianceParams(3, 2, &vpx_highbd_12_variance8x4_c, 12),
- VarianceParams(2, 3, &vpx_highbd_12_variance4x8_c, 12),
- VarianceParams(2, 2, &vpx_highbd_12_variance4x4_c, 12),
+ VarianceParams(6, 6, &vpx_highbd_12_variance64x64_c, 12),
+ VarianceParams(6, 5, &vpx_highbd_12_variance64x32_c, 12),
+ VarianceParams(5, 6, &vpx_highbd_12_variance32x64_c, 12),
+ VarianceParams(5, 5, &vpx_highbd_12_variance32x32_c, 12),
+ VarianceParams(5, 4, &vpx_highbd_12_variance32x16_c, 12),
+ VarianceParams(4, 5, &vpx_highbd_12_variance16x32_c, 12),
+ VarianceParams(4, 4, &vpx_highbd_12_variance16x16_c, 12),
+ VarianceParams(4, 3, &vpx_highbd_12_variance16x8_c, 12),
+ VarianceParams(3, 4, &vpx_highbd_12_variance8x16_c, 12),
+ VarianceParams(3, 3, &vpx_highbd_12_variance8x8_c, 12),
+ VarianceParams(3, 2, &vpx_highbd_12_variance8x4_c, 12),
+ VarianceParams(2, 3, &vpx_highbd_12_variance4x8_c, 12),
+ VarianceParams(2, 2, &vpx_highbd_12_variance4x4_c, 12),
#if CONFIG_VP10 && CONFIG_EXT_PARTITION
- VarianceParams(7, 7, &vpx_highbd_10_variance128x128_c, 10),
- VarianceParams(7, 6, &vpx_highbd_10_variance128x64_c, 10),
- VarianceParams(6, 7, &vpx_highbd_10_variance64x128_c, 10),
+ VarianceParams(7, 7, &vpx_highbd_10_variance128x128_c, 10),
+ VarianceParams(7, 6, &vpx_highbd_10_variance128x64_c, 10),
+ VarianceParams(6, 7, &vpx_highbd_10_variance64x128_c, 10),
#endif // CONFIG_VP10 && CONFIG_EXT_PARTITION
- VarianceParams(6, 6, &vpx_highbd_10_variance64x64_c, 10),
- VarianceParams(6, 5, &vpx_highbd_10_variance64x32_c, 10),
- VarianceParams(5, 6, &vpx_highbd_10_variance32x64_c, 10),
- VarianceParams(5, 5, &vpx_highbd_10_variance32x32_c, 10),
- VarianceParams(5, 4, &vpx_highbd_10_variance32x16_c, 10),
- VarianceParams(4, 5, &vpx_highbd_10_variance16x32_c, 10),
- VarianceParams(4, 4, &vpx_highbd_10_variance16x16_c, 10),
- VarianceParams(4, 3, &vpx_highbd_10_variance16x8_c, 10),
- VarianceParams(3, 4, &vpx_highbd_10_variance8x16_c, 10),
- VarianceParams(3, 3, &vpx_highbd_10_variance8x8_c, 10),
- VarianceParams(3, 2, &vpx_highbd_10_variance8x4_c, 10),
- VarianceParams(2, 3, &vpx_highbd_10_variance4x8_c, 10),
- VarianceParams(2, 2, &vpx_highbd_10_variance4x4_c, 10),
+ VarianceParams(6, 6, &vpx_highbd_10_variance64x64_c, 10),
+ VarianceParams(6, 5, &vpx_highbd_10_variance64x32_c, 10),
+ VarianceParams(5, 6, &vpx_highbd_10_variance32x64_c, 10),
+ VarianceParams(5, 5, &vpx_highbd_10_variance32x32_c, 10),
+ VarianceParams(5, 4, &vpx_highbd_10_variance32x16_c, 10),
+ VarianceParams(4, 5, &vpx_highbd_10_variance16x32_c, 10),
+ VarianceParams(4, 4, &vpx_highbd_10_variance16x16_c, 10),
+ VarianceParams(4, 3, &vpx_highbd_10_variance16x8_c, 10),
+ VarianceParams(3, 4, &vpx_highbd_10_variance8x16_c, 10),
+ VarianceParams(3, 3, &vpx_highbd_10_variance8x8_c, 10),
+ VarianceParams(3, 2, &vpx_highbd_10_variance8x4_c, 10),
+ VarianceParams(2, 3, &vpx_highbd_10_variance4x8_c, 10),
+ VarianceParams(2, 2, &vpx_highbd_10_variance4x4_c, 10),
#if CONFIG_VP10 && CONFIG_EXT_PARTITION
- VarianceParams(7, 7, &vpx_highbd_8_variance128x128_c, 8),
- VarianceParams(7, 6, &vpx_highbd_8_variance128x64_c, 8),
- VarianceParams(6, 7, &vpx_highbd_8_variance64x128_c, 8),
+ VarianceParams(7, 7, &vpx_highbd_8_variance128x128_c, 8),
+ VarianceParams(7, 6, &vpx_highbd_8_variance128x64_c, 8),
+ VarianceParams(6, 7, &vpx_highbd_8_variance64x128_c, 8),
#endif // CONFIG_VP10 && CONFIG_EXT_PARTITION
- VarianceParams(6, 6, &vpx_highbd_8_variance64x64_c, 8),
- VarianceParams(6, 5, &vpx_highbd_8_variance64x32_c, 8),
- VarianceParams(5, 6, &vpx_highbd_8_variance32x64_c, 8),
- VarianceParams(5, 5, &vpx_highbd_8_variance32x32_c, 8),
- VarianceParams(5, 4, &vpx_highbd_8_variance32x16_c, 8),
- VarianceParams(4, 5, &vpx_highbd_8_variance16x32_c, 8),
- VarianceParams(4, 4, &vpx_highbd_8_variance16x16_c, 8),
- VarianceParams(4, 3, &vpx_highbd_8_variance16x8_c, 8),
- VarianceParams(3, 4, &vpx_highbd_8_variance8x16_c, 8),
- VarianceParams(3, 3, &vpx_highbd_8_variance8x8_c, 8),
- VarianceParams(3, 2, &vpx_highbd_8_variance8x4_c, 8),
- VarianceParams(2, 3, &vpx_highbd_8_variance4x8_c, 8),
- VarianceParams(2, 2, &vpx_highbd_8_variance4x4_c, 8)
+ VarianceParams(6, 6, &vpx_highbd_8_variance64x64_c, 8),
+ VarianceParams(6, 5, &vpx_highbd_8_variance64x32_c, 8),
+ VarianceParams(5, 6, &vpx_highbd_8_variance32x64_c, 8),
+ VarianceParams(5, 5, &vpx_highbd_8_variance32x32_c, 8),
+ VarianceParams(5, 4, &vpx_highbd_8_variance32x16_c, 8),
+ VarianceParams(4, 5, &vpx_highbd_8_variance16x32_c, 8),
+ VarianceParams(4, 4, &vpx_highbd_8_variance16x16_c, 8),
+ VarianceParams(4, 3, &vpx_highbd_8_variance16x8_c, 8),
+ VarianceParams(3, 4, &vpx_highbd_8_variance8x16_c, 8),
+ VarianceParams(3, 3, &vpx_highbd_8_variance8x8_c, 8),
+ VarianceParams(3, 2, &vpx_highbd_8_variance8x4_c, 8),
+ VarianceParams(2, 3, &vpx_highbd_8_variance4x8_c, 8),
+ VarianceParams(2, 2, &vpx_highbd_8_variance4x4_c, 8)
};
-INSTANTIATE_TEST_CASE_P(
- C, VpxHBDVarianceTest,
- ::testing::ValuesIn(kArrayHBDVariance_c));
+INSTANTIATE_TEST_CASE_P(C, VpxHBDVarianceTest,
+ ::testing::ValuesIn(kArrayHBDVariance_c));
#if HAVE_SSE4_1 && CONFIG_VP9_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
diff --git a/test/video_source.h b/test/video_source.h
index ade323e..94a95ce 100644
--- a/test/video_source.h
+++ b/test/video_source.h
@@ -51,7 +51,7 @@
#undef TO_STRING
#undef STRINGIFY
-inline FILE *OpenTestDataFile(const std::string& file_name) {
+inline FILE *OpenTestDataFile(const std::string &file_name) {
const std::string path_to_source = GetDataPath() + "/" + file_name;
return fopen(path_to_source.c_str(), "rb");
}
@@ -76,21 +76,15 @@
class TempOutFile {
public:
- TempOutFile() {
- file_ = GetTempOutFile(&file_name_);
- }
+ TempOutFile() { file_ = GetTempOutFile(&file_name_); }
~TempOutFile() {
CloseFile();
if (!file_name_.empty()) {
EXPECT_EQ(0, remove(file_name_.c_str()));
}
}
- FILE *file() {
- return file_;
- }
- const std::string& file_name() {
- return file_name_;
- }
+ FILE *file() { return file_; }
+ const std::string &file_name() { return file_name_; }
protected:
void CloseFile() {
@@ -134,14 +128,10 @@
virtual unsigned int limit() const = 0;
};
-
class DummyVideoSource : public VideoSource {
public:
DummyVideoSource()
- : img_(NULL),
- limit_(100),
- width_(80),
- height_(64),
+ : img_(NULL), limit_(100), width_(80), height_(64),
format_(VPX_IMG_FMT_I420) {
ReallocImage();
}
@@ -158,9 +148,7 @@
FillFrame();
}
- virtual vpx_image_t *img() const {
- return (frame_ < limit_) ? img_ : NULL;
- }
+ virtual vpx_image_t *img() const { return (frame_ < limit_) ? img_ : NULL; }
// Models a stream where Timebase = 1/FPS, so pts == frame.
virtual vpx_codec_pts_t pts() const { return frame_; }
@@ -168,7 +156,7 @@
virtual unsigned long duration() const { return 1; }
virtual vpx_rational_t timebase() const {
- const vpx_rational_t t = {1, 30};
+ const vpx_rational_t t = { 1, 30 };
return t;
}
@@ -176,9 +164,7 @@
virtual unsigned int limit() const { return limit_; }
- void set_limit(unsigned int limit) {
- limit_ = limit;
- }
+ void set_limit(unsigned int limit) { limit_ = limit; }
void SetSize(unsigned int width, unsigned int height) {
if (width != width_ || height != height_) {
@@ -196,7 +182,9 @@
}
protected:
- virtual void FillFrame() { if (img_) memset(img_->img_data, 0, raw_sz_); }
+ virtual void FillFrame() {
+ if (img_) memset(img_->img_data, 0, raw_sz_);
+ }
void ReallocImage() {
vpx_img_free(img_);
@@ -205,7 +193,7 @@
}
vpx_image_t *img_;
- size_t raw_sz_;
+ size_t raw_sz_;
unsigned int limit_;
unsigned int frame_;
unsigned int width_;
@@ -213,12 +201,10 @@
vpx_img_fmt_t format_;
};
-
class RandomVideoSource : public DummyVideoSource {
public:
RandomVideoSource(int seed = ACMRandom::DeterministicSeed())
- : rnd_(seed),
- seed_(seed) { }
+ : rnd_(seed), seed_(seed) {}
protected:
// Reset the RNG to get a matching stream for the second pass
@@ -233,8 +219,7 @@
virtual void FillFrame() {
if (img_) {
if (frame_ % 30 < 15)
- for (size_t i = 0; i < raw_sz_; ++i)
- img_->img_data[i] = rnd_.Rand8();
+ for (size_t i = 0; i < raw_sz_; ++i) img_->img_data[i] = rnd_.Rand8();
else
memset(img_->img_data, 0, raw_sz_);
}
diff --git a/test/vp10_ans_test.cc b/test/vp10_ans_test.cc
index 20aedba..ca89f20 100644
--- a/test/vp10_ans_test.cc
+++ b/test/vp10_ans_test.cc
@@ -151,7 +151,10 @@
// TODO(aconverse): replace this with a more representative distribution from
// the codec.
const rans_sym rans_sym_tab[] = {
- {16 * 4, 0 * 4}, {100 * 4, 16 * 4}, {70 * 4, 116 *4}, {70 * 4, 186 *4},
+ { 16 * 4, 0 * 4 },
+ { 100 * 4, 16 * 4 },
+ { 70 * 4, 116 * 4 },
+ { 70 * 4, 186 * 4 },
};
const int kDistinctSyms = sizeof(rans_sym_tab) / sizeof(rans_sym_tab[0]);
@@ -172,8 +175,7 @@
return ret;
}
-void rans_build_dec_tab(const struct rans_sym sym_tab[],
- rans_dec_lut dec_tab) {
+void rans_build_dec_tab(const struct rans_sym sym_tab[], rans_dec_lut dec_tab) {
dec_tab[0] = 0;
for (int i = 1; dec_tab[i - 1] < rans_precision; ++i) {
dec_tab[i] = dec_tab[i - 1] + sym_tab[i - 1].prob;
diff --git a/test/vp10_convolve_optimz_test.cc b/test/vp10_convolve_optimz_test.cc
index ec77035..2235088 100644
--- a/test/vp10_convolve_optimz_test.cc
+++ b/test/vp10_convolve_optimz_test.cc
@@ -21,21 +21,20 @@
using std::tr1::tuple;
using libvpx_test::ACMRandom;
-typedef void (*conv_filter_t)(const uint8_t*, int, uint8_t*, int,
- int, int, const InterpFilterParams,
- const int, int, int);
+typedef void (*conv_filter_t)(const uint8_t *, int, uint8_t *, int, int, int,
+ const InterpFilterParams, const int, int, int);
#if CONFIG_VP9_HIGHBITDEPTH
-typedef void (*hbd_conv_filter_t)(const uint16_t*, int, uint16_t*, int,
- int, int, const InterpFilterParams,
- const int, int, int, int);
+typedef void (*hbd_conv_filter_t)(const uint16_t *, int, uint16_t *, int, int,
+ int, const InterpFilterParams, const int, int,
+ int, int);
#endif
// Test parameter list:
// <convolve_horiz_func, convolve_vert_func,
// <width, height>, filter_params, subpel_x_q4, avg>
typedef tuple<int, int> BlockDimension;
-typedef tuple<conv_filter_t, conv_filter_t, BlockDimension, INTERP_FILTER,
- int, int> ConvParams;
+typedef tuple<conv_filter_t, conv_filter_t, BlockDimension, INTERP_FILTER, int,
+ int> ConvParams;
#if CONFIG_VP9_HIGHBITDEPTH
// Test parameter list:
// <convolve_horiz_func, convolve_vert_func,
@@ -136,10 +135,11 @@
for (r = 0; r < height_; ++r) {
for (c = 0; c < width_; ++c) {
EXPECT_EQ((uint8_t)dst_ref_ptr[c], (uint8_t)dst_ptr[c])
- << "Error at row: " << r << " col: " << c << " "
- << "w = " << width_ << " " << "h = " << height_ << " "
- << "filter group index = " << filter_ << " "
- << "filter index = " << subpel_;
+ << "Error at row: " << r << " col: " << c << " "
+ << "w = " << width_ << " "
+ << "h = " << height_ << " "
+ << "filter group index = " << filter_ << " "
+ << "filter index = " << subpel_;
}
dst_ptr += stride;
dst_ref_ptr += stride;
@@ -154,8 +154,8 @@
vp10_convolve_horiz_c(src_ref_, stride, dst_ref_, stride, width_, height_,
filter_params, subpel_, x_step_q4, avg_);
- conv_horiz_(src_, stride, dst_, stride, width_, height_,
- filter_params, subpel_, x_step_q4, avg_);
+ conv_horiz_(src_, stride, dst_, stride, width_, height_, filter_params,
+ subpel_, x_step_q4, avg_);
DiffFilterBuffer();
@@ -170,9 +170,8 @@
intermediate_height, filter_params, subpel_, x_step_q4,
avg_);
- conv_horiz_(src_, stride, dst_, stride, width_,
- intermediate_height, filter_params, subpel_, x_step_q4,
- avg_);
+ conv_horiz_(src_, stride, dst_, stride, width_, intermediate_height,
+ filter_params, subpel_, x_step_q4, avg_);
DiffFilterBuffer();
}
@@ -185,8 +184,8 @@
vp10_convolve_vert_c(src_ref_, stride, dst_ref_, stride, width_, height_,
filter_params, subpel_, x_step_q4, avg_);
- conv_vert_(src_, stride, dst_, stride, width_, height_,
- filter_params, subpel_, x_step_q4, avg_);
+ conv_vert_(src_, stride, dst_, stride, width_, height_, filter_params,
+ subpel_, x_step_q4, avg_);
DiffFilterBuffer();
}
@@ -202,44 +201,31 @@
#if (HAVE_SSSE3 || HAVE_SSE4_1) && CONFIG_EXT_INTERP
const BlockDimension kBlockDim[] = {
- make_tuple(2, 2),
- make_tuple(2, 4),
- make_tuple(4, 4),
- make_tuple(4, 8),
- make_tuple(8, 4),
- make_tuple(8, 8),
- make_tuple(8, 16),
- make_tuple(16, 8),
- make_tuple(16, 16),
- make_tuple(16, 32),
- make_tuple(32, 16),
- make_tuple(32, 32),
- make_tuple(32, 64),
- make_tuple(64, 32),
- make_tuple(64, 64),
- make_tuple(64, 128),
- make_tuple(128, 64),
- make_tuple(128, 128),
+ make_tuple(2, 2), make_tuple(2, 4), make_tuple(4, 4),
+ make_tuple(4, 8), make_tuple(8, 4), make_tuple(8, 8),
+ make_tuple(8, 16), make_tuple(16, 8), make_tuple(16, 16),
+ make_tuple(16, 32), make_tuple(32, 16), make_tuple(32, 32),
+ make_tuple(32, 64), make_tuple(64, 32), make_tuple(64, 64),
+ make_tuple(64, 128), make_tuple(128, 64), make_tuple(128, 128),
};
// 10/12-tap filters
-const INTERP_FILTER kFilter[] = {6, 4, 2};
+const INTERP_FILTER kFilter[] = { 6, 4, 2 };
-const int kSubpelQ4[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
+const int kSubpelQ4[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 };
-const int kAvg[] = {0, 1};
+const int kAvg[] = { 0, 1 };
#endif
#if HAVE_SSSE3 && CONFIG_EXT_INTERP
INSTANTIATE_TEST_CASE_P(
SSSE3, VP10ConvolveOptimzTest,
- ::testing::Combine(
- ::testing::Values(vp10_convolve_horiz_ssse3),
- ::testing::Values(vp10_convolve_vert_ssse3),
- ::testing::ValuesIn(kBlockDim),
- ::testing::ValuesIn(kFilter),
- ::testing::ValuesIn(kSubpelQ4),
- ::testing::ValuesIn(kAvg)));
+ ::testing::Combine(::testing::Values(vp10_convolve_horiz_ssse3),
+ ::testing::Values(vp10_convolve_vert_ssse3),
+ ::testing::ValuesIn(kBlockDim),
+ ::testing::ValuesIn(kFilter),
+ ::testing::ValuesIn(kSubpelQ4),
+ ::testing::ValuesIn(kAvg)));
#endif // HAVE_SSSE3 && CONFIG_EXT_INTERP
#if CONFIG_VP9_HIGHBITDEPTH
@@ -324,11 +310,12 @@
for (r = 0; r < height_; ++r) {
for (c = 0; c < width_; ++c) {
EXPECT_EQ((uint16_t)dst_ref_ptr[c], (uint16_t)dst_ptr[c])
- << "Error at row: " << r << " col: " << c << " "
- << "w = " << width_ << " " << "h = " << height_ << " "
- << "filter group index = " << filter_ << " "
- << "filter index = " << subpel_ << " "
- << "bit depth = " << bit_depth_;
+ << "Error at row: " << r << " col: " << c << " "
+ << "w = " << width_ << " "
+ << "h = " << height_ << " "
+ << "filter group index = " << filter_ << " "
+ << "filter index = " << subpel_ << " "
+ << "bit depth = " << bit_depth_;
}
dst_ptr += stride;
dst_ref_ptr += stride;
@@ -340,12 +327,12 @@
InterpFilterParams filter_params = vp10_get_interp_filter_params(filter_);
- vp10_highbd_convolve_horiz_c(src_, stride, dst_ref_, stride, width_,
- height_, filter_params, subpel_, x_step_q4,
- avg_, bit_depth_);
+ vp10_highbd_convolve_horiz_c(src_, stride, dst_ref_, stride, width_, height_,
+ filter_params, subpel_, x_step_q4, avg_,
+ bit_depth_);
- conv_horiz_(src_, stride, dst_, stride, width_, height_,
- filter_params, subpel_, x_step_q4, avg_, bit_depth_);
+ conv_horiz_(src_, stride, dst_, stride, width_, height_, filter_params,
+ subpel_, x_step_q4, avg_, bit_depth_);
DiffFilterBuffer();
@@ -375,8 +362,8 @@
filter_params, subpel_, x_step_q4, avg_,
bit_depth_);
- conv_vert_(src_, stride, dst_, stride, width_, height_,
- filter_params, subpel_, x_step_q4, avg_, bit_depth_);
+ conv_vert_(src_, stride, dst_, stride, width_, height_, filter_params,
+ subpel_, x_step_q4, avg_, bit_depth_);
DiffFilterBuffer();
}
@@ -390,18 +377,17 @@
#if HAVE_SSE4_1 && CONFIG_EXT_INTERP
-const int kBitdepth[] = {10, 12};
+const int kBitdepth[] = { 10, 12 };
INSTANTIATE_TEST_CASE_P(
SSE4_1, VP10HbdConvolveOptimzTest,
- ::testing::Combine(
- ::testing::Values(vp10_highbd_convolve_horiz_sse4_1),
- ::testing::Values(vp10_highbd_convolve_vert_sse4_1),
- ::testing::ValuesIn(kBlockDim),
- ::testing::ValuesIn(kFilter),
- ::testing::ValuesIn(kSubpelQ4),
- ::testing::ValuesIn(kAvg),
- ::testing::ValuesIn(kBitdepth)));
+ ::testing::Combine(::testing::Values(vp10_highbd_convolve_horiz_sse4_1),
+ ::testing::Values(vp10_highbd_convolve_vert_sse4_1),
+ ::testing::ValuesIn(kBlockDim),
+ ::testing::ValuesIn(kFilter),
+ ::testing::ValuesIn(kSubpelQ4),
+ ::testing::ValuesIn(kAvg),
+ ::testing::ValuesIn(kBitdepth)));
#endif // HAVE_SSE4_1 && CONFIG_EXT_INTERP
#endif // CONFIG_VP9_HIGHBITDEPTH
} // namespace
diff --git a/test/vp10_convolve_test.cc b/test/vp10_convolve_test.cc
index 9d9ae7f..020c735 100644
--- a/test/vp10_convolve_test.cc
+++ b/test/vp10_convolve_test.cc
@@ -21,10 +21,8 @@
TEST(VP10ConvolveTest, vp10_convolve8) {
ACMRandom rnd(ACMRandom::DeterministicSeed());
#if CONFIG_DUAL_FILTER
- INTERP_FILTER interp_filter[4] = {
- EIGHTTAP_REGULAR, EIGHTTAP_REGULAR,
- EIGHTTAP_REGULAR, EIGHTTAP_REGULAR
- };
+ INTERP_FILTER interp_filter[4] = { EIGHTTAP_REGULAR, EIGHTTAP_REGULAR,
+ EIGHTTAP_REGULAR, EIGHTTAP_REGULAR };
InterpFilterParams filter_params =
vp10_get_interp_filter_params(interp_filter[0]);
#else
@@ -36,8 +34,8 @@
int filter_center = filter_size / 2 - 1;
uint8_t src[12 * 12];
int src_stride = filter_size;
- uint8_t dst[1] = {0};
- uint8_t dst1[1] = {0};
+ uint8_t dst[1] = { 0 };
+ uint8_t dst1[1] = { 0 };
int dst_stride = 1;
int x_step_q4 = 16;
int y_step_q4 = 16;
@@ -58,9 +56,9 @@
dst, dst_stride, w, h, interp_filter, subpel_x_q4, x_step_q4,
subpel_y_q4, y_step_q4, avg);
- const int16_t* x_filter =
+ const int16_t *x_filter =
vp10_get_interp_filter_subpel_kernel(filter_params, subpel_x_q4);
- const int16_t* y_filter =
+ const int16_t *y_filter =
vp10_get_interp_filter_subpel_kernel(filter_params, subpel_y_q4);
vpx_convolve8_c(src + src_stride * filter_center + filter_center, src_stride,
@@ -70,10 +68,8 @@
TEST(VP10ConvolveTest, vp10_convolve) {
ACMRandom rnd(ACMRandom::DeterministicSeed());
#if CONFIG_DUAL_FILTER
- INTERP_FILTER interp_filter[4] = {
- EIGHTTAP_REGULAR, EIGHTTAP_REGULAR,
- EIGHTTAP_REGULAR, EIGHTTAP_REGULAR
- };
+ INTERP_FILTER interp_filter[4] = { EIGHTTAP_REGULAR, EIGHTTAP_REGULAR,
+ EIGHTTAP_REGULAR, EIGHTTAP_REGULAR };
InterpFilterParams filter_params =
vp10_get_interp_filter_params(interp_filter[0]);
#else
@@ -85,7 +81,7 @@
int filter_center = filter_size / 2 - 1;
uint8_t src[12 * 12];
int src_stride = filter_size;
- uint8_t dst[1] = {0};
+ uint8_t dst[1] = { 0 };
int dst_stride = 1;
int x_step_q4 = 16;
int y_step_q4 = 16;
@@ -108,9 +104,9 @@
src_stride, dst, dst_stride, w, h, interp_filter,
subpel_x_q4, x_step_q4, subpel_y_q4, y_step_q4, avg);
- const int16_t* x_filter =
+ const int16_t *x_filter =
vp10_get_interp_filter_subpel_kernel(filter_params, subpel_x_q4);
- const int16_t* y_filter =
+ const int16_t *y_filter =
vp10_get_interp_filter_subpel_kernel(filter_params, subpel_y_q4);
int temp[12];
@@ -132,10 +128,8 @@
TEST(VP10ConvolveTest, vp10_convolve_avg) {
ACMRandom rnd(ACMRandom::DeterministicSeed());
#if CONFIG_DUAL_FILTER
- INTERP_FILTER interp_filter[4] = {
- EIGHTTAP_REGULAR, EIGHTTAP_REGULAR,
- EIGHTTAP_REGULAR, EIGHTTAP_REGULAR
- };
+ INTERP_FILTER interp_filter[4] = { EIGHTTAP_REGULAR, EIGHTTAP_REGULAR,
+ EIGHTTAP_REGULAR, EIGHTTAP_REGULAR };
InterpFilterParams filter_params =
vp10_get_interp_filter_params(interp_filter[0]);
#else
@@ -148,9 +142,9 @@
uint8_t src0[12 * 12];
uint8_t src1[12 * 12];
int src_stride = filter_size;
- uint8_t dst0[1] = {0};
- uint8_t dst1[1] = {0};
- uint8_t dst[1] = {0};
+ uint8_t dst0[1] = { 0 };
+ uint8_t dst1[1] = { 0 };
+ uint8_t dst[1] = { 0 };
int dst_stride = 1;
int x_step_q4 = 16;
int y_step_q4 = 16;
@@ -200,10 +194,8 @@
TEST(VP10ConvolveTest, vp10_highbd_convolve) {
ACMRandom rnd(ACMRandom::DeterministicSeed());
#if CONFIG_DUAL_FILTER
- INTERP_FILTER interp_filter[4] = {
- EIGHTTAP_REGULAR, EIGHTTAP_REGULAR,
- EIGHTTAP_REGULAR, EIGHTTAP_REGULAR
- };
+ INTERP_FILTER interp_filter[4] = { EIGHTTAP_REGULAR, EIGHTTAP_REGULAR,
+ EIGHTTAP_REGULAR, EIGHTTAP_REGULAR };
InterpFilterParams filter_params =
vp10_get_interp_filter_params(interp_filter[0]);
#else
@@ -215,7 +207,7 @@
int filter_center = filter_size / 2 - 1;
uint16_t src[12 * 12];
int src_stride = filter_size;
- uint16_t dst[1] = {0};
+ uint16_t dst[1] = { 0 };
int dst_stride = 1;
int x_step_q4 = 16;
int y_step_q4 = 16;
@@ -238,9 +230,9 @@
src_stride, CONVERT_TO_BYTEPTR(dst), dst_stride, w, h, interp_filter,
subpel_x_q4, x_step_q4, subpel_y_q4, y_step_q4, avg, bd);
- const int16_t* x_filter =
+ const int16_t *x_filter =
vp10_get_interp_filter_subpel_kernel(filter_params, subpel_x_q4);
- const int16_t* y_filter =
+ const int16_t *y_filter =
vp10_get_interp_filter_subpel_kernel(filter_params, subpel_y_q4);
int temp[12];
@@ -263,10 +255,8 @@
TEST(VP10ConvolveTest, vp10_highbd_convolve_avg) {
ACMRandom rnd(ACMRandom::DeterministicSeed());
#if CONFIG_DUAL_FILTER
- INTERP_FILTER interp_filter[4] = {
- EIGHTTAP_REGULAR, EIGHTTAP_REGULAR,
- EIGHTTAP_REGULAR, EIGHTTAP_REGULAR
- };
+ INTERP_FILTER interp_filter[4] = { EIGHTTAP_REGULAR, EIGHTTAP_REGULAR,
+ EIGHTTAP_REGULAR, EIGHTTAP_REGULAR };
InterpFilterParams filter_params =
vp10_get_interp_filter_params(interp_filter[0]);
#else
@@ -279,9 +269,9 @@
uint16_t src0[12 * 12];
uint16_t src1[12 * 12];
int src_stride = filter_size;
- uint16_t dst0[1] = {0};
- uint16_t dst1[1] = {0};
- uint16_t dst[1] = {0};
+ uint16_t dst0[1] = { 0 };
+ uint16_t dst1[1] = { 0 };
+ uint16_t dst[1] = { 0 };
int dst_stride = 1;
int x_step_q4 = 16;
int y_step_q4 = 16;
@@ -342,9 +332,9 @@
ptrdiff_t filter_size = filter_params.tap; \
int filter_center = filter_size / 2 - 1; \
DECLARE_ALIGNED(16, uint16_t, \
- src[(frame_size + 7) * (frame_size + 7)]) = {0}; \
+ src[(frame_size + 7) * (frame_size + 7)]) = { 0 }; \
int src_stride = frame_size + 7; \
- DECLARE_ALIGNED(16, uint16_t, dst[frame_size * frame_size]) = {0}; \
+ DECLARE_ALIGNED(16, uint16_t, dst[frame_size * frame_size]) = { 0 }; \
int dst_stride = frame_size; \
int x_step_q4 = 16; \
int y_step_q4 = 16; \
@@ -355,9 +345,9 @@
int w = block_size; \
int h = block_size; \
\
- const int16_t* filter_x = \
+ const int16_t *filter_x = \
vp10_get_interp_filter_kernel(filter_params, subpel_x_q4); \
- const int16_t* filter_y = \
+ const int16_t *filter_y = \
vp10_get_interp_filter_kernel(filter_params, subpel_y_q4); \
\
for (int i = 0; i < src_stride * src_stride; i++) { \
@@ -406,9 +396,9 @@
int w = block_size; \
int h = block_size; \
\
- const int16_t* filter_x = \
+ const int16_t *filter_x = \
vp10_get_interp_filter_kernel(filter_params, subpel_x_q4); \
- const int16_t* filter_y = \
+ const int16_t *filter_y = \
vp10_get_interp_filter_kernel(filter_params, subpel_y_q4); \
\
for (int i = 0; i < src_stride * src_stride; i++) { \
diff --git a/test/vp10_dct_test.cc b/test/vp10_dct_test.cc
index 8cf034f..63e0202 100644
--- a/test/vp10_dct_test.cc
+++ b/test/vp10_dct_test.cc
@@ -32,8 +32,7 @@
for (int n = 0; n < size; ++n) {
out[k] += in[n] * cos(PI * (2 * n + 1) * k / (2 * size));
}
- if (k == 0)
- out[k] = out[k] * kInvSqrt2;
+ if (k == 0) out[k] = out[k] * kInvSqrt2;
}
}
@@ -48,14 +47,14 @@
protected:
void RunFwdAccuracyCheck() {
- tran_low_t *input = new tran_low_t[txfm_size_];
+ tran_low_t *input = new tran_low_t[txfm_size_];
tran_low_t *output = new tran_low_t[txfm_size_];
- double *ref_input = new double[txfm_size_];
+ double *ref_input = new double[txfm_size_];
double *ref_output = new double[txfm_size_];
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 5000;
- for (int ti = 0; ti < count_test_block; ++ti) {
+ for (int ti = 0; ti < count_test_block; ++ti) {
for (int ni = 0; ni < txfm_size_; ++ni) {
input[ni] = rnd.Rand8() - rnd.Rand8();
ref_input[ni] = static_cast<double>(input[ni]);
@@ -84,9 +83,8 @@
};
typedef std::tr1::tuple<FdctFunc, FdctFuncRef, int, int> FdctParam;
-class Vp10FwdTxfm
- : public TransTestBase,
- public ::testing::TestWithParam<FdctParam> {
+class Vp10FwdTxfm : public TransTestBase,
+ public ::testing::TestWithParam<FdctParam> {
public:
virtual void SetUp() {
fwd_txfm_ = GET_PARAM(0);
@@ -97,14 +95,11 @@
virtual void TearDown() {}
};
-TEST_P(Vp10FwdTxfm, RunFwdAccuracyCheck) {
- RunFwdAccuracyCheck();
-}
+TEST_P(Vp10FwdTxfm, RunFwdAccuracyCheck) { RunFwdAccuracyCheck(); }
INSTANTIATE_TEST_CASE_P(
C, Vp10FwdTxfm,
- ::testing::Values(
- FdctParam(&fdct4, &reference_dct_1d, 4, 1),
- FdctParam(&fdct8, &reference_dct_1d, 8, 1),
- FdctParam(&fdct16, &reference_dct_1d, 16, 2)));
+ ::testing::Values(FdctParam(&fdct4, &reference_dct_1d, 4, 1),
+ FdctParam(&fdct8, &reference_dct_1d, 8, 1),
+ FdctParam(&fdct16, &reference_dct_1d, 16, 2)));
} // namespace
diff --git a/test/vp10_ext_tile_test.cc b/test/vp10_ext_tile_test.cc
index ad04eeb..e3b24e7 100644
--- a/test/vp10_ext_tile_test.cc
+++ b/test/vp10_ext_tile_test.cc
@@ -34,8 +34,7 @@
public ::libvpx_test::CodecTestWith2Params<libvpx_test::TestMode, int> {
protected:
VP10ExtTileTest()
- : EncoderTest(GET_PARAM(0)),
- encoding_mode_(GET_PARAM(1)),
+ : EncoderTest(GET_PARAM(0)), encoding_mode_(GET_PARAM(1)),
set_cpu_used_(GET_PARAM(2)) {
init_flags_ = VPX_CODEC_USE_PSNR;
vpx_codec_dec_cfg_t cfg = vpx_codec_dec_cfg_t();
@@ -70,7 +69,7 @@
cfg_.rc_min_quantizer = 0;
}
- virtual void PreEncodeFrameHook(::libvpx_test::VideoSource * video,
+ virtual void PreEncodeFrameHook(::libvpx_test::VideoSource *video,
::libvpx_test::Encoder *encoder) {
if (video->frame() == 0) {
// Encode setting
@@ -88,8 +87,8 @@
}
if (video->frame() == 1) {
- frame_flags_ = VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_GF |
- VP8_EFLAG_NO_UPD_ARF;
+ frame_flags_ =
+ VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF;
}
}
@@ -97,8 +96,7 @@
vpx_codec_pts_t pts) {
// Skip 1 already decoded frame to be consistent with the decoder in this
// test.
- if (pts == (vpx_codec_pts_t)kSkip)
- return;
+ if (pts == (vpx_codec_pts_t)kSkip) return;
// Calculate MD5 as the reference.
::libvpx_test::MD5 md5_res;
@@ -108,8 +106,7 @@
virtual void FramePktHook(const vpx_codec_cx_pkt_t *pkt) {
// Skip decoding 1 frame.
- if (pkt->data.frame.pts == (vpx_codec_pts_t)kSkip)
- return;
+ if (pkt->data.frame.pts == (vpx_codec_pts_t)kSkip) return;
bool IsLastFrame = (pkt->data.frame.pts == (vpx_codec_pts_t)(kLimit - 1));
@@ -126,7 +123,7 @@
}
const vpx_codec_err_t res = decoder_->DecodeFrame(
- reinterpret_cast<uint8_t*>(pkt->data.frame.buf),
+ reinterpret_cast<uint8_t *>(pkt->data.frame.buf),
pkt->data.frame.sz);
if (res != VPX_CODEC_OK) {
abort_ = true;
@@ -151,15 +148,14 @@
for (int tr = 0; tr < tile_height; ++tr) {
memcpy(tile_img_.planes[plane] +
- tile_img_.stride[plane] * (r * tile_height + tr) +
- c * tile_width,
+ tile_img_.stride[plane] * (r * tile_height + tr) +
+ c * tile_width,
img->planes[plane] + img->stride[plane] * tr, tile_width);
}
}
}
- if (!IsLastFrame)
- break;
+ if (!IsLastFrame) break;
}
if (IsLastFrame) {
@@ -178,8 +174,8 @@
};
TEST_P(VP10ExtTileTest, DecoderResultTest) {
- ::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv",
- kImgWidth, kImgHeight, 30, 1, 0, kLimit);
+ ::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", kImgWidth,
+ kImgHeight, 30, 1, 0, kLimit);
cfg_.rc_target_bitrate = 500;
cfg_.g_error_resilient = VPX_ERROR_RESILIENT_DEFAULT;
cfg_.g_lag_in_frames = 0;
@@ -195,7 +191,6 @@
VP10_INSTANTIATE_TEST_CASE(
// Now only test 2-pass mode.
- VP10ExtTileTest,
- ::testing::Values(::libvpx_test::kTwoPassGood),
+ VP10ExtTileTest, ::testing::Values(::libvpx_test::kTwoPassGood),
::testing::Range(0, 4));
} // namespace
diff --git a/test/vp10_fht16x16_test.cc b/test/vp10_fht16x16_test.cc
index deccc81..cc15353 100644
--- a/test/vp10_fht16x16_test.cc
+++ b/test/vp10_fht16x16_test.cc
@@ -29,8 +29,7 @@
using libvpx_test::FhtFunc;
typedef tuple<FhtFunc, IhtFunc, int, vpx_bit_depth_t, int> Ht16x16Param;
-void fht16x16_ref(const int16_t *in, tran_low_t *out, int stride,
- int tx_type) {
+void fht16x16_ref(const int16_t *in, tran_low_t *out, int stride, int tx_type) {
vp10_fht16x16_c(in, out, stride, tx_type);
}
@@ -49,17 +48,16 @@
}
#endif // CONFIG_VP9_HIGHBITDEPTH
-class VP10Trans16x16HT
- : public libvpx_test::TransformTestBase,
- public ::testing::TestWithParam<Ht16x16Param> {
+class VP10Trans16x16HT : public libvpx_test::TransformTestBase,
+ public ::testing::TestWithParam<Ht16x16Param> {
public:
virtual ~VP10Trans16x16HT() {}
virtual void SetUp() {
fwd_txfm_ = GET_PARAM(0);
inv_txfm_ = GET_PARAM(1);
- tx_type_ = GET_PARAM(2);
- pitch_ = 16;
+ tx_type_ = GET_PARAM(2);
+ pitch_ = 16;
fwd_txfm_ref = fht16x16_ref;
bit_depth_ = GET_PARAM(3);
mask_ = (1 << bit_depth_) - 1;
@@ -80,9 +78,7 @@
IhtFunc inv_txfm_;
};
-TEST_P(VP10Trans16x16HT, CoeffCheck) {
- RunCoeffCheck();
-}
+TEST_P(VP10Trans16x16HT, CoeffCheck) { RunCoeffCheck(); }
#if CONFIG_VP9_HIGHBITDEPTH
class VP10HighbdTrans16x16HT
@@ -93,7 +89,7 @@
virtual void SetUp() {
fwd_txfm_ = GET_PARAM(0);
fwd_txfm_ref_ = highbd_fht16x16_ref;
- tx_type_ = GET_PARAM(1);
+ tx_type_ = GET_PARAM(1);
bit_depth_ = GET_PARAM(2);
mask_ = (1 << bit_depth_) - 1;
num_coeffs_ = 256;
@@ -140,90 +136,85 @@
}
fwd_txfm_ref_(input_, output_ref_, stride, tx_type_, bit_depth_);
- ASM_REGISTER_STATE_CHECK(fwd_txfm_(input_, output_, stride, tx_type_,
- bit_depth_));
+ ASM_REGISTER_STATE_CHECK(
+ fwd_txfm_(input_, output_, stride, tx_type_, bit_depth_));
for (j = 0; j < num_coeffs_; ++j) {
EXPECT_EQ(output_ref_[j], output_[j])
- << "Not bit-exact result at index: " << j
- << " at test block: " << i;
+ << "Not bit-exact result at index: " << j << " at test block: " << i;
}
}
}
-TEST_P(VP10HighbdTrans16x16HT, HighbdCoeffCheck) {
- RunBitexactCheck();
-}
+TEST_P(VP10HighbdTrans16x16HT, HighbdCoeffCheck) { RunBitexactCheck(); }
#endif // CONFIG_VP9_HIGHBITDEPTH
using std::tr1::make_tuple;
#if HAVE_SSE2
const Ht16x16Param kArrayHt16x16Param_sse2[] = {
- make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_sse2, 0,
- VPX_BITS_8, 256),
- make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_sse2, 1,
- VPX_BITS_8, 256),
- make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_sse2, 2,
- VPX_BITS_8, 256),
- make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_sse2, 3,
- VPX_BITS_8, 256),
+ make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_sse2, 0, VPX_BITS_8,
+ 256),
+ make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_sse2, 1, VPX_BITS_8,
+ 256),
+ make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_sse2, 2, VPX_BITS_8,
+ 256),
+ make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_sse2, 3, VPX_BITS_8,
+ 256),
#if CONFIG_EXT_TX
- make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_sse2, 4,
- VPX_BITS_8, 256),
- make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_sse2, 5,
- VPX_BITS_8, 256),
- make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_sse2, 6,
- VPX_BITS_8, 256),
- make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_sse2, 7,
- VPX_BITS_8, 256),
- make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_sse2, 8,
- VPX_BITS_8, 256),
- make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_sse2, 10,
- VPX_BITS_8, 256),
- make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_sse2, 11,
- VPX_BITS_8, 256),
- make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_sse2, 12,
- VPX_BITS_8, 256),
- make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_sse2, 13,
- VPX_BITS_8, 256),
- make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_sse2, 14,
- VPX_BITS_8, 256),
- make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_sse2, 15,
- VPX_BITS_8, 256)
+ make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_sse2, 4, VPX_BITS_8,
+ 256),
+ make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_sse2, 5, VPX_BITS_8,
+ 256),
+ make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_sse2, 6, VPX_BITS_8,
+ 256),
+ make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_sse2, 7, VPX_BITS_8,
+ 256),
+ make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_sse2, 8, VPX_BITS_8,
+ 256),
+ make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_sse2, 10, VPX_BITS_8,
+ 256),
+ make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_sse2, 11, VPX_BITS_8,
+ 256),
+ make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_sse2, 12, VPX_BITS_8,
+ 256),
+ make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_sse2, 13, VPX_BITS_8,
+ 256),
+ make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_sse2, 14, VPX_BITS_8,
+ 256),
+ make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_sse2, 15, VPX_BITS_8,
+ 256)
#endif // CONFIG_EXT_TX
};
-INSTANTIATE_TEST_CASE_P(
- SSE2, VP10Trans16x16HT,
- ::testing::ValuesIn(kArrayHt16x16Param_sse2));
+INSTANTIATE_TEST_CASE_P(SSE2, VP10Trans16x16HT,
+ ::testing::ValuesIn(kArrayHt16x16Param_sse2));
#endif // HAVE_SSE2
#if HAVE_SSE4_1 && CONFIG_VP9_HIGHBITDEPTH
const HighbdHt16x16Param kArrayHBDHt16x16Param_sse4_1[] = {
- make_tuple(&vp10_fwd_txfm2d_16x16_sse4_1, 0, 10),
- make_tuple(&vp10_fwd_txfm2d_16x16_sse4_1, 0, 12),
- make_tuple(&vp10_fwd_txfm2d_16x16_sse4_1, 1, 10),
- make_tuple(&vp10_fwd_txfm2d_16x16_sse4_1, 1, 12),
- make_tuple(&vp10_fwd_txfm2d_16x16_sse4_1, 2, 10),
- make_tuple(&vp10_fwd_txfm2d_16x16_sse4_1, 2, 12),
- make_tuple(&vp10_fwd_txfm2d_16x16_sse4_1, 3, 10),
- make_tuple(&vp10_fwd_txfm2d_16x16_sse4_1, 3, 12),
+ make_tuple(&vp10_fwd_txfm2d_16x16_sse4_1, 0, 10),
+ make_tuple(&vp10_fwd_txfm2d_16x16_sse4_1, 0, 12),
+ make_tuple(&vp10_fwd_txfm2d_16x16_sse4_1, 1, 10),
+ make_tuple(&vp10_fwd_txfm2d_16x16_sse4_1, 1, 12),
+ make_tuple(&vp10_fwd_txfm2d_16x16_sse4_1, 2, 10),
+ make_tuple(&vp10_fwd_txfm2d_16x16_sse4_1, 2, 12),
+ make_tuple(&vp10_fwd_txfm2d_16x16_sse4_1, 3, 10),
+ make_tuple(&vp10_fwd_txfm2d_16x16_sse4_1, 3, 12),
#if CONFIG_EXT_TX
- make_tuple(&vp10_fwd_txfm2d_16x16_sse4_1, 4, 10),
- make_tuple(&vp10_fwd_txfm2d_16x16_sse4_1, 4, 12),
- make_tuple(&vp10_fwd_txfm2d_16x16_sse4_1, 5, 10),
- make_tuple(&vp10_fwd_txfm2d_16x16_sse4_1, 5, 12),
- make_tuple(&vp10_fwd_txfm2d_16x16_sse4_1, 6, 10),
- make_tuple(&vp10_fwd_txfm2d_16x16_sse4_1, 6, 12),
- make_tuple(&vp10_fwd_txfm2d_16x16_sse4_1, 7, 10),
- make_tuple(&vp10_fwd_txfm2d_16x16_sse4_1, 7, 12),
- make_tuple(&vp10_fwd_txfm2d_16x16_sse4_1, 8, 10),
- make_tuple(&vp10_fwd_txfm2d_16x16_sse4_1, 8, 12),
+ make_tuple(&vp10_fwd_txfm2d_16x16_sse4_1, 4, 10),
+ make_tuple(&vp10_fwd_txfm2d_16x16_sse4_1, 4, 12),
+ make_tuple(&vp10_fwd_txfm2d_16x16_sse4_1, 5, 10),
+ make_tuple(&vp10_fwd_txfm2d_16x16_sse4_1, 5, 12),
+ make_tuple(&vp10_fwd_txfm2d_16x16_sse4_1, 6, 10),
+ make_tuple(&vp10_fwd_txfm2d_16x16_sse4_1, 6, 12),
+ make_tuple(&vp10_fwd_txfm2d_16x16_sse4_1, 7, 10),
+ make_tuple(&vp10_fwd_txfm2d_16x16_sse4_1, 7, 12),
+ make_tuple(&vp10_fwd_txfm2d_16x16_sse4_1, 8, 10),
+ make_tuple(&vp10_fwd_txfm2d_16x16_sse4_1, 8, 12),
#endif // CONFIG_EXT_TX
};
-INSTANTIATE_TEST_CASE_P(
- SSE4_1, VP10HighbdTrans16x16HT,
- ::testing::ValuesIn(kArrayHBDHt16x16Param_sse4_1));
+INSTANTIATE_TEST_CASE_P(SSE4_1, VP10HighbdTrans16x16HT,
+ ::testing::ValuesIn(kArrayHBDHt16x16Param_sse4_1));
#endif // HAVE_SSE4_1 && CONFIG_VP9_HIGHBITDEPTH
} // namespace
diff --git a/test/vp10_fht4x4_test.cc b/test/vp10_fht4x4_test.cc
index c5a4382..f1db663 100644
--- a/test/vp10_fht4x4_test.cc
+++ b/test/vp10_fht4x4_test.cc
@@ -29,8 +29,7 @@
using libvpx_test::FhtFunc;
typedef tuple<FhtFunc, IhtFunc, int, vpx_bit_depth_t, int> Ht4x4Param;
-void fht4x4_ref(const int16_t *in, tran_low_t *out, int stride,
- int tx_type) {
+void fht4x4_ref(const int16_t *in, tran_low_t *out, int stride, int tx_type) {
vp10_fht4x4_c(in, out, stride, tx_type);
}
@@ -44,23 +43,22 @@
// <Target optimized function, tx_type, bit depth>
typedef tuple<HBDFhtFunc, int, int> HighbdHt4x4Param;
-void highbe_fht4x4_ref(const int16_t *in, int32_t *out, int stride,
- int tx_type, int bd) {
+void highbe_fht4x4_ref(const int16_t *in, int32_t *out, int stride, int tx_type,
+ int bd) {
vp10_fwd_txfm2d_4x4_c(in, out, stride, tx_type, bd);
}
#endif // CONFIG_VP9_HIGHBITDEPTH
-class VP10Trans4x4HT
- : public libvpx_test::TransformTestBase,
- public ::testing::TestWithParam<Ht4x4Param> {
+class VP10Trans4x4HT : public libvpx_test::TransformTestBase,
+ public ::testing::TestWithParam<Ht4x4Param> {
public:
virtual ~VP10Trans4x4HT() {}
virtual void SetUp() {
fwd_txfm_ = GET_PARAM(0);
inv_txfm_ = GET_PARAM(1);
- tx_type_ = GET_PARAM(2);
- pitch_ = 4;
+ tx_type_ = GET_PARAM(2);
+ pitch_ = 4;
fwd_txfm_ref = fht4x4_ref;
bit_depth_ = GET_PARAM(3);
mask_ = (1 << bit_depth_) - 1;
@@ -81,9 +79,7 @@
IhtFunc inv_txfm_;
};
-TEST_P(VP10Trans4x4HT, CoeffCheck) {
- RunCoeffCheck();
-}
+TEST_P(VP10Trans4x4HT, CoeffCheck) { RunCoeffCheck(); }
#if CONFIG_VP9_HIGHBITDEPTH
class VP10HighbdTrans4x4HT : public ::testing::TestWithParam<HighbdHt4x4Param> {
@@ -93,7 +89,7 @@
virtual void SetUp() {
fwd_txfm_ = GET_PARAM(0);
fwd_txfm_ref_ = highbe_fht4x4_ref;
- tx_type_ = GET_PARAM(1);
+ tx_type_ = GET_PARAM(1);
bit_depth_ = GET_PARAM(2);
mask_ = (1 << bit_depth_) - 1;
num_coeffs_ = 16;
@@ -145,86 +141,66 @@
for (j = 0; j < num_coeffs; ++j) {
EXPECT_EQ(output_[j], output_ref_[j])
- << "Not bit-exact result at index: " << j
- << " at test block: " << i;
+ << "Not bit-exact result at index: " << j << " at test block: " << i;
}
}
}
-TEST_P(VP10HighbdTrans4x4HT, HighbdCoeffCheck) {
- RunBitexactCheck();
-}
+TEST_P(VP10HighbdTrans4x4HT, HighbdCoeffCheck) { RunBitexactCheck(); }
#endif // CONFIG_VP9_HIGHBITDEPTH
using std::tr1::make_tuple;
#if HAVE_SSE2
const Ht4x4Param kArrayHt4x4Param_sse2[] = {
- make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 0,
- VPX_BITS_8, 16),
- make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 1,
- VPX_BITS_8, 16),
- make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 2,
- VPX_BITS_8, 16),
- make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 3,
- VPX_BITS_8, 16),
+ make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 0, VPX_BITS_8, 16),
+ make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 1, VPX_BITS_8, 16),
+ make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 2, VPX_BITS_8, 16),
+ make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 3, VPX_BITS_8, 16),
#if CONFIG_EXT_TX
- make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 4,
- VPX_BITS_8, 16),
- make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 5,
- VPX_BITS_8, 16),
- make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 6,
- VPX_BITS_8, 16),
- make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 7,
- VPX_BITS_8, 16),
- make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 8,
- VPX_BITS_8, 16),
- make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 10,
- VPX_BITS_8, 16),
- make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 11,
- VPX_BITS_8, 16),
- make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 12,
- VPX_BITS_8, 16),
- make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 13,
- VPX_BITS_8, 16),
- make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 14,
- VPX_BITS_8, 16),
- make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 15,
- VPX_BITS_8, 16)
+ make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 4, VPX_BITS_8, 16),
+ make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 5, VPX_BITS_8, 16),
+ make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 6, VPX_BITS_8, 16),
+ make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 7, VPX_BITS_8, 16),
+ make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 8, VPX_BITS_8, 16),
+ make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 10, VPX_BITS_8, 16),
+ make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 11, VPX_BITS_8, 16),
+ make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 12, VPX_BITS_8, 16),
+ make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 13, VPX_BITS_8, 16),
+ make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 14, VPX_BITS_8, 16),
+ make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 15, VPX_BITS_8, 16)
#endif // CONFIG_EXT_TX
};
-INSTANTIATE_TEST_CASE_P(
- SSE2, VP10Trans4x4HT,
- ::testing::ValuesIn(kArrayHt4x4Param_sse2));
+INSTANTIATE_TEST_CASE_P(SSE2, VP10Trans4x4HT,
+ ::testing::ValuesIn(kArrayHt4x4Param_sse2));
#endif // HAVE_SSE2
#if HAVE_SSE4_1 && CONFIG_VP9_HIGHBITDEPTH
const HighbdHt4x4Param kArrayHighbdHt4x4Param[] = {
- make_tuple(&vp10_fwd_txfm2d_4x4_sse4_1, 0, 10),
- make_tuple(&vp10_fwd_txfm2d_4x4_sse4_1, 0, 12),
- make_tuple(&vp10_fwd_txfm2d_4x4_sse4_1, 1, 10),
- make_tuple(&vp10_fwd_txfm2d_4x4_sse4_1, 1, 12),
- make_tuple(&vp10_fwd_txfm2d_4x4_sse4_1, 2, 10),
- make_tuple(&vp10_fwd_txfm2d_4x4_sse4_1, 2, 12),
- make_tuple(&vp10_fwd_txfm2d_4x4_sse4_1, 3, 10),
- make_tuple(&vp10_fwd_txfm2d_4x4_sse4_1, 3, 12),
+ make_tuple(&vp10_fwd_txfm2d_4x4_sse4_1, 0, 10),
+ make_tuple(&vp10_fwd_txfm2d_4x4_sse4_1, 0, 12),
+ make_tuple(&vp10_fwd_txfm2d_4x4_sse4_1, 1, 10),
+ make_tuple(&vp10_fwd_txfm2d_4x4_sse4_1, 1, 12),
+ make_tuple(&vp10_fwd_txfm2d_4x4_sse4_1, 2, 10),
+ make_tuple(&vp10_fwd_txfm2d_4x4_sse4_1, 2, 12),
+ make_tuple(&vp10_fwd_txfm2d_4x4_sse4_1, 3, 10),
+ make_tuple(&vp10_fwd_txfm2d_4x4_sse4_1, 3, 12),
#if CONFIG_EXT_TX
- make_tuple(&vp10_fwd_txfm2d_4x4_sse4_1, 4, 10),
- make_tuple(&vp10_fwd_txfm2d_4x4_sse4_1, 4, 12),
- make_tuple(&vp10_fwd_txfm2d_4x4_sse4_1, 5, 10),
- make_tuple(&vp10_fwd_txfm2d_4x4_sse4_1, 5, 12),
- make_tuple(&vp10_fwd_txfm2d_4x4_sse4_1, 6, 10),
- make_tuple(&vp10_fwd_txfm2d_4x4_sse4_1, 6, 12),
- make_tuple(&vp10_fwd_txfm2d_4x4_sse4_1, 7, 10),
- make_tuple(&vp10_fwd_txfm2d_4x4_sse4_1, 7, 12),
- make_tuple(&vp10_fwd_txfm2d_4x4_sse4_1, 8, 10),
- make_tuple(&vp10_fwd_txfm2d_4x4_sse4_1, 8, 12),
+ make_tuple(&vp10_fwd_txfm2d_4x4_sse4_1, 4, 10),
+ make_tuple(&vp10_fwd_txfm2d_4x4_sse4_1, 4, 12),
+ make_tuple(&vp10_fwd_txfm2d_4x4_sse4_1, 5, 10),
+ make_tuple(&vp10_fwd_txfm2d_4x4_sse4_1, 5, 12),
+ make_tuple(&vp10_fwd_txfm2d_4x4_sse4_1, 6, 10),
+ make_tuple(&vp10_fwd_txfm2d_4x4_sse4_1, 6, 12),
+ make_tuple(&vp10_fwd_txfm2d_4x4_sse4_1, 7, 10),
+ make_tuple(&vp10_fwd_txfm2d_4x4_sse4_1, 7, 12),
+ make_tuple(&vp10_fwd_txfm2d_4x4_sse4_1, 8, 10),
+ make_tuple(&vp10_fwd_txfm2d_4x4_sse4_1, 8, 12),
#endif // CONFIG_EXT_TX
};
-INSTANTIATE_TEST_CASE_P(
- SSE4_1, VP10HighbdTrans4x4HT,
- ::testing::ValuesIn(kArrayHighbdHt4x4Param));
+INSTANTIATE_TEST_CASE_P(SSE4_1, VP10HighbdTrans4x4HT,
+ ::testing::ValuesIn(kArrayHighbdHt4x4Param));
#endif // HAVE_SSE4_1 && CONFIG_VP9_HIGHBITDEPTH
diff --git a/test/vp10_fht8x8_test.cc b/test/vp10_fht8x8_test.cc
index da278c4..a711901 100644
--- a/test/vp10_fht8x8_test.cc
+++ b/test/vp10_fht8x8_test.cc
@@ -30,8 +30,7 @@
using std::tr1::tuple;
typedef tuple<FhtFunc, IhtFunc, int, vpx_bit_depth_t, int> Ht8x8Param;
-void fht8x8_ref(const int16_t *in, tran_low_t *out, int stride,
- int tx_type) {
+void fht8x8_ref(const int16_t *in, tran_low_t *out, int stride, int tx_type) {
vp10_fht8x8_c(in, out, stride, tx_type);
}
@@ -43,23 +42,22 @@
// Target optimized function, tx_type, bit depth
typedef tuple<HbdHtFunc, int, int> HighbdHt8x8Param;
-void highbd_fht8x8_ref(const int16_t *in, int32_t *out, int stride,
- int tx_type, int bd) {
+void highbd_fht8x8_ref(const int16_t *in, int32_t *out, int stride, int tx_type,
+ int bd) {
vp10_fwd_txfm2d_8x8_c(in, out, stride, tx_type, bd);
}
#endif // CONFIG_VP9_HIGHBITDEPTH
-class VP10Trans8x8HT
- : public libvpx_test::TransformTestBase,
- public ::testing::TestWithParam<Ht8x8Param> {
+class VP10Trans8x8HT : public libvpx_test::TransformTestBase,
+ public ::testing::TestWithParam<Ht8x8Param> {
public:
virtual ~VP10Trans8x8HT() {}
virtual void SetUp() {
fwd_txfm_ = GET_PARAM(0);
inv_txfm_ = GET_PARAM(1);
- tx_type_ = GET_PARAM(2);
- pitch_ = 8;
+ tx_type_ = GET_PARAM(2);
+ pitch_ = 8;
fwd_txfm_ref = fht8x8_ref;
bit_depth_ = GET_PARAM(3);
mask_ = (1 << bit_depth_) - 1;
@@ -80,9 +78,7 @@
IhtFunc inv_txfm_;
};
-TEST_P(VP10Trans8x8HT, CoeffCheck) {
- RunCoeffCheck();
-}
+TEST_P(VP10Trans8x8HT, CoeffCheck) { RunCoeffCheck(); }
#if CONFIG_VP9_HIGHBITDEPTH
class VP10HighbdTrans8x8HT : public ::testing::TestWithParam<HighbdHt8x8Param> {
@@ -92,7 +88,7 @@
virtual void SetUp() {
fwd_txfm_ = GET_PARAM(0);
fwd_txfm_ref_ = highbd_fht8x8_ref;
- tx_type_ = GET_PARAM(1);
+ tx_type_ = GET_PARAM(1);
bit_depth_ = GET_PARAM(2);
mask_ = (1 << bit_depth_) - 1;
num_coeffs_ = 64;
@@ -140,90 +136,70 @@
}
fwd_txfm_ref_(input_, output_ref_, stride, tx_type_, bit_depth_);
- ASM_REGISTER_STATE_CHECK(fwd_txfm_(input_, output_, stride, tx_type_,
- bit_depth_));
+ ASM_REGISTER_STATE_CHECK(
+ fwd_txfm_(input_, output_, stride, tx_type_, bit_depth_));
for (j = 0; j < num_coeffs; ++j) {
EXPECT_EQ(output_ref_[j], output_[j])
- << "Not bit-exact result at index: " << j
- << " at test block: " << i;
+ << "Not bit-exact result at index: " << j << " at test block: " << i;
}
}
}
-TEST_P(VP10HighbdTrans8x8HT, HighbdCoeffCheck) {
- RunBitexactCheck();
-}
+TEST_P(VP10HighbdTrans8x8HT, HighbdCoeffCheck) { RunBitexactCheck(); }
#endif // CONFIG_VP9_HIGHBITDEPTH
using std::tr1::make_tuple;
#if HAVE_SSE2
const Ht8x8Param kArrayHt8x8Param_sse2[] = {
- make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 0,
- VPX_BITS_8, 64),
- make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 1,
- VPX_BITS_8, 64),
- make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 2,
- VPX_BITS_8, 64),
- make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 3,
- VPX_BITS_8, 64),
+ make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 0, VPX_BITS_8, 64),
+ make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 1, VPX_BITS_8, 64),
+ make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 2, VPX_BITS_8, 64),
+ make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 3, VPX_BITS_8, 64),
#if CONFIG_EXT_TX
- make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 4,
- VPX_BITS_8, 64),
- make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 5,
- VPX_BITS_8, 64),
- make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 6,
- VPX_BITS_8, 64),
- make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 7,
- VPX_BITS_8, 64),
- make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 8,
- VPX_BITS_8, 64),
- make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 10,
- VPX_BITS_8, 64),
- make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 11,
- VPX_BITS_8, 64),
- make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 12,
- VPX_BITS_8, 64),
- make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 13,
- VPX_BITS_8, 64),
- make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 14,
- VPX_BITS_8, 64),
- make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 15,
- VPX_BITS_8, 64)
+ make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 4, VPX_BITS_8, 64),
+ make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 5, VPX_BITS_8, 64),
+ make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 6, VPX_BITS_8, 64),
+ make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 7, VPX_BITS_8, 64),
+ make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 8, VPX_BITS_8, 64),
+ make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 10, VPX_BITS_8, 64),
+ make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 11, VPX_BITS_8, 64),
+ make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 12, VPX_BITS_8, 64),
+ make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 13, VPX_BITS_8, 64),
+ make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 14, VPX_BITS_8, 64),
+ make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 15, VPX_BITS_8, 64)
#endif // CONFIG_EXT_TX
};
-INSTANTIATE_TEST_CASE_P(
- SSE2, VP10Trans8x8HT,
- ::testing::ValuesIn(kArrayHt8x8Param_sse2));
+INSTANTIATE_TEST_CASE_P(SSE2, VP10Trans8x8HT,
+ ::testing::ValuesIn(kArrayHt8x8Param_sse2));
#endif // HAVE_SSE2
#if HAVE_SSE4_1 && CONFIG_VP9_HIGHBITDEPTH
const HighbdHt8x8Param kArrayHBDHt8x8Param_sse4_1[] = {
- make_tuple(&vp10_fwd_txfm2d_8x8_sse4_1, 0, 10),
- make_tuple(&vp10_fwd_txfm2d_8x8_sse4_1, 0, 12),
- make_tuple(&vp10_fwd_txfm2d_8x8_sse4_1, 1, 10),
- make_tuple(&vp10_fwd_txfm2d_8x8_sse4_1, 1, 12),
- make_tuple(&vp10_fwd_txfm2d_8x8_sse4_1, 2, 10),
- make_tuple(&vp10_fwd_txfm2d_8x8_sse4_1, 2, 12),
- make_tuple(&vp10_fwd_txfm2d_8x8_sse4_1, 3, 10),
- make_tuple(&vp10_fwd_txfm2d_8x8_sse4_1, 3, 12),
+ make_tuple(&vp10_fwd_txfm2d_8x8_sse4_1, 0, 10),
+ make_tuple(&vp10_fwd_txfm2d_8x8_sse4_1, 0, 12),
+ make_tuple(&vp10_fwd_txfm2d_8x8_sse4_1, 1, 10),
+ make_tuple(&vp10_fwd_txfm2d_8x8_sse4_1, 1, 12),
+ make_tuple(&vp10_fwd_txfm2d_8x8_sse4_1, 2, 10),
+ make_tuple(&vp10_fwd_txfm2d_8x8_sse4_1, 2, 12),
+ make_tuple(&vp10_fwd_txfm2d_8x8_sse4_1, 3, 10),
+ make_tuple(&vp10_fwd_txfm2d_8x8_sse4_1, 3, 12),
#if CONFIG_EXT_TX
- make_tuple(&vp10_fwd_txfm2d_8x8_sse4_1, 4, 10),
- make_tuple(&vp10_fwd_txfm2d_8x8_sse4_1, 4, 12),
- make_tuple(&vp10_fwd_txfm2d_8x8_sse4_1, 5, 10),
- make_tuple(&vp10_fwd_txfm2d_8x8_sse4_1, 5, 12),
- make_tuple(&vp10_fwd_txfm2d_8x8_sse4_1, 6, 10),
- make_tuple(&vp10_fwd_txfm2d_8x8_sse4_1, 6, 12),
- make_tuple(&vp10_fwd_txfm2d_8x8_sse4_1, 7, 10),
- make_tuple(&vp10_fwd_txfm2d_8x8_sse4_1, 7, 12),
- make_tuple(&vp10_fwd_txfm2d_8x8_sse4_1, 8, 10),
- make_tuple(&vp10_fwd_txfm2d_8x8_sse4_1, 8, 12),
+ make_tuple(&vp10_fwd_txfm2d_8x8_sse4_1, 4, 10),
+ make_tuple(&vp10_fwd_txfm2d_8x8_sse4_1, 4, 12),
+ make_tuple(&vp10_fwd_txfm2d_8x8_sse4_1, 5, 10),
+ make_tuple(&vp10_fwd_txfm2d_8x8_sse4_1, 5, 12),
+ make_tuple(&vp10_fwd_txfm2d_8x8_sse4_1, 6, 10),
+ make_tuple(&vp10_fwd_txfm2d_8x8_sse4_1, 6, 12),
+ make_tuple(&vp10_fwd_txfm2d_8x8_sse4_1, 7, 10),
+ make_tuple(&vp10_fwd_txfm2d_8x8_sse4_1, 7, 12),
+ make_tuple(&vp10_fwd_txfm2d_8x8_sse4_1, 8, 10),
+ make_tuple(&vp10_fwd_txfm2d_8x8_sse4_1, 8, 12),
#endif // CONFIG_EXT_TX
};
-INSTANTIATE_TEST_CASE_P(
- SSE4_1, VP10HighbdTrans8x8HT,
- ::testing::ValuesIn(kArrayHBDHt8x8Param_sse4_1));
+INSTANTIATE_TEST_CASE_P(SSE4_1, VP10HighbdTrans8x8HT,
+ ::testing::ValuesIn(kArrayHBDHt8x8Param_sse4_1));
#endif // HAVE_SSE4_1 && CONFIG_VP9_HIGHBITDEPTH
} // namespace
diff --git a/test/vp10_fwd_txfm1d_test.cc b/test/vp10_fwd_txfm1d_test.cc
index f8dc0b6..088924a 100644
--- a/test/vp10_fwd_txfm1d_test.cc
+++ b/test/vp10_fwd_txfm1d_test.cc
@@ -20,20 +20,20 @@
namespace {
const int txfm_type_num = 2;
-const TYPE_TXFM txfm_type_ls[2] = {TYPE_DCT, TYPE_ADST};
+const TYPE_TXFM txfm_type_ls[2] = { TYPE_DCT, TYPE_ADST };
const int txfm_size_num = 5;
-const int txfm_size_ls[5] = {4, 8, 16, 32, 64};
+const int txfm_size_ls[5] = { 4, 8, 16, 32, 64 };
const TxfmFunc fwd_txfm_func_ls[2][5] = {
- {vp10_fdct4_new, vp10_fdct8_new, vp10_fdct16_new, vp10_fdct32_new,
- vp10_fdct64_new},
- {vp10_fadst4_new, vp10_fadst8_new, vp10_fadst16_new, vp10_fadst32_new,
- NULL}};
+ { vp10_fdct4_new, vp10_fdct8_new, vp10_fdct16_new, vp10_fdct32_new,
+ vp10_fdct64_new },
+ { vp10_fadst4_new, vp10_fadst8_new, vp10_fadst16_new, vp10_fadst32_new, NULL }
+};
// the maximum stage number of fwd/inv 1d dct/adst txfm is 12
-const int8_t cos_bit[12] = {14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14};
-const int8_t range_bit[12] = {32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32};
+const int8_t cos_bit[12] = { 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14 };
+const int8_t range_bit[12] = { 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32 };
TEST(vp10_fwd_txfm1d, round_shift) {
EXPECT_EQ(round_shift(7, 1), 4);
@@ -61,17 +61,17 @@
}
TEST(vp10_fwd_txfm1d, clamp_block) {
- int16_t block[5][5] = {{7, -5, 6, -3, 9},
- {7, -5, 6, -3, 9},
- {7, -5, 6, -3, 9},
- {7, -5, 6, -3, 9},
- {7, -5, 6, -3, 9}};
+ int16_t block[5][5] = { { 7, -5, 6, -3, 9 },
+ { 7, -5, 6, -3, 9 },
+ { 7, -5, 6, -3, 9 },
+ { 7, -5, 6, -3, 9 },
+ { 7, -5, 6, -3, 9 } };
- int16_t ref_block[5][5] = {{7, -5, 6, -3, 9},
- {7, -5, 6, -3, 9},
- {7, -4, 2, -3, 9},
- {7, -4, 2, -3, 9},
- {7, -4, 2, -3, 9}};
+ int16_t ref_block[5][5] = { { 7, -5, 6, -3, 9 },
+ { 7, -5, 6, -3, 9 },
+ { 7, -4, 2, -3, 9 },
+ { 7, -4, 2, -3, 9 },
+ { 7, -4, 2, -3, 9 } };
int row = 2;
int col = 1;
diff --git a/test/vp10_fwd_txfm2d_test.cc b/test/vp10_fwd_txfm2d_test.cc
index 953ae11..8051fb5 100644
--- a/test/vp10_fwd_txfm2d_test.cc
+++ b/test/vp10_fwd_txfm2d_test.cc
@@ -52,14 +52,14 @@
txfm1d_size_ = libvpx_test::get_txfm1d_size(tx_size_);
txfm2d_size_ = txfm1d_size_ * txfm1d_size_;
get_txfm1d_type(tx_type_, &type0_, &type1_);
- input_ = reinterpret_cast<int16_t *>
- (vpx_memalign(16, sizeof(int16_t) * txfm2d_size_));
- output_ = reinterpret_cast<int32_t *>
- (vpx_memalign(16, sizeof(int32_t) * txfm2d_size_));
- ref_input_ = reinterpret_cast<double *>
- (vpx_memalign(16, sizeof(double) * txfm2d_size_));
- ref_output_ = reinterpret_cast<double *>
- (vpx_memalign(16, sizeof(double) * txfm2d_size_));
+ input_ = reinterpret_cast<int16_t *>(
+ vpx_memalign(16, sizeof(int16_t) * txfm2d_size_));
+ output_ = reinterpret_cast<int32_t *>(
+ vpx_memalign(16, sizeof(int32_t) * txfm2d_size_));
+ ref_input_ = reinterpret_cast<double *>(
+ vpx_memalign(16, sizeof(double) * txfm2d_size_));
+ ref_output_ = reinterpret_cast<double *>(
+ vpx_memalign(16, sizeof(double) * txfm2d_size_));
}
void RunFwdAccuracyCheck() {
@@ -82,8 +82,8 @@
else if (ud_flip_)
libvpx_test::flipud(ref_input_, txfm1d_size_, txfm1d_size_);
- reference_hybrid_2d(ref_input_, ref_output_, txfm1d_size_,
- type0_, type1_);
+ reference_hybrid_2d(ref_input_, ref_output_, txfm1d_size_, type0_,
+ type1_);
for (int ni = 0; ni < txfm2d_size_; ++ni) {
ref_output_[ni] = round(ref_output_[ni] * amplify_factor_);
@@ -121,61 +121,58 @@
Fwd_Txfm2d_Func fwd_txfm_;
TYPE_TXFM type0_;
TYPE_TXFM type1_;
- int16_t* input_;
- int32_t* output_;
- double* ref_input_;
- double* ref_output_;
+ int16_t *input_;
+ int32_t *output_;
+ double *ref_input_;
+ double *ref_output_;
int ud_flip_; // flip upside down
int lr_flip_; // flip left to right
};
-TEST_P(VP10FwdTxfm2d, RunFwdAccuracyCheck) {
- RunFwdAccuracyCheck();
-}
+TEST_P(VP10FwdTxfm2d, RunFwdAccuracyCheck) { RunFwdAccuracyCheck(); }
const VP10FwdTxfm2dParam vp10_fwd_txfm2d_param_c[] = {
#if CONFIG_EXT_TX
- VP10FwdTxfm2dParam(FLIPADST_DCT, TX_4X4, 2, 0.2),
- VP10FwdTxfm2dParam(DCT_FLIPADST, TX_4X4, 2, 0.2),
+ VP10FwdTxfm2dParam(FLIPADST_DCT, TX_4X4, 2, 0.2),
+ VP10FwdTxfm2dParam(DCT_FLIPADST, TX_4X4, 2, 0.2),
VP10FwdTxfm2dParam(FLIPADST_FLIPADST, TX_4X4, 2, 0.2),
VP10FwdTxfm2dParam(ADST_FLIPADST, TX_4X4, 2, 0.2),
VP10FwdTxfm2dParam(FLIPADST_ADST, TX_4X4, 2, 0.2),
- VP10FwdTxfm2dParam(FLIPADST_DCT, TX_8X8, 5, 0.6),
- VP10FwdTxfm2dParam(DCT_FLIPADST, TX_8X8, 5, 0.6),
+ VP10FwdTxfm2dParam(FLIPADST_DCT, TX_8X8, 5, 0.6),
+ VP10FwdTxfm2dParam(DCT_FLIPADST, TX_8X8, 5, 0.6),
VP10FwdTxfm2dParam(FLIPADST_FLIPADST, TX_8X8, 5, 0.6),
VP10FwdTxfm2dParam(ADST_FLIPADST, TX_8X8, 5, 0.6),
VP10FwdTxfm2dParam(FLIPADST_ADST, TX_8X8, 5, 0.6),
- VP10FwdTxfm2dParam(FLIPADST_DCT, TX_16X16, 11, 1.5),
- VP10FwdTxfm2dParam(DCT_FLIPADST, TX_16X16, 11, 1.5),
+ VP10FwdTxfm2dParam(FLIPADST_DCT, TX_16X16, 11, 1.5),
+ VP10FwdTxfm2dParam(DCT_FLIPADST, TX_16X16, 11, 1.5),
VP10FwdTxfm2dParam(FLIPADST_FLIPADST, TX_16X16, 11, 1.5),
VP10FwdTxfm2dParam(ADST_FLIPADST, TX_16X16, 11, 1.5),
VP10FwdTxfm2dParam(FLIPADST_ADST, TX_16X16, 11, 1.5),
- VP10FwdTxfm2dParam(FLIPADST_DCT, TX_32X32, 70, 7),
- VP10FwdTxfm2dParam(DCT_FLIPADST, TX_32X32, 70, 7),
+ VP10FwdTxfm2dParam(FLIPADST_DCT, TX_32X32, 70, 7),
+ VP10FwdTxfm2dParam(DCT_FLIPADST, TX_32X32, 70, 7),
VP10FwdTxfm2dParam(FLIPADST_FLIPADST, TX_32X32, 70, 7),
VP10FwdTxfm2dParam(ADST_FLIPADST, TX_32X32, 70, 7),
VP10FwdTxfm2dParam(FLIPADST_ADST, TX_32X32, 70, 7),
#endif
- VP10FwdTxfm2dParam(DCT_DCT, TX_4X4, 2, 0.2),
- VP10FwdTxfm2dParam(ADST_DCT, TX_4X4, 2, 0.2),
- VP10FwdTxfm2dParam(DCT_ADST, TX_4X4, 2, 0.2),
+ VP10FwdTxfm2dParam(DCT_DCT, TX_4X4, 2, 0.2),
+ VP10FwdTxfm2dParam(ADST_DCT, TX_4X4, 2, 0.2),
+ VP10FwdTxfm2dParam(DCT_ADST, TX_4X4, 2, 0.2),
VP10FwdTxfm2dParam(ADST_ADST, TX_4X4, 2, 0.2),
- VP10FwdTxfm2dParam(DCT_DCT, TX_8X8, 5, 0.6),
- VP10FwdTxfm2dParam(ADST_DCT, TX_8X8, 5, 0.6),
- VP10FwdTxfm2dParam(DCT_ADST, TX_8X8, 5, 0.6),
+ VP10FwdTxfm2dParam(DCT_DCT, TX_8X8, 5, 0.6),
+ VP10FwdTxfm2dParam(ADST_DCT, TX_8X8, 5, 0.6),
+ VP10FwdTxfm2dParam(DCT_ADST, TX_8X8, 5, 0.6),
VP10FwdTxfm2dParam(ADST_ADST, TX_8X8, 5, 0.6),
- VP10FwdTxfm2dParam(DCT_DCT, TX_16X16, 11, 1.5),
- VP10FwdTxfm2dParam(ADST_DCT, TX_16X16, 11, 1.5),
- VP10FwdTxfm2dParam(DCT_ADST, TX_16X16, 11, 1.5),
+ VP10FwdTxfm2dParam(DCT_DCT, TX_16X16, 11, 1.5),
+ VP10FwdTxfm2dParam(ADST_DCT, TX_16X16, 11, 1.5),
+ VP10FwdTxfm2dParam(DCT_ADST, TX_16X16, 11, 1.5),
VP10FwdTxfm2dParam(ADST_ADST, TX_16X16, 11, 1.5),
- VP10FwdTxfm2dParam(DCT_DCT, TX_32X32, 70, 7),
- VP10FwdTxfm2dParam(ADST_DCT, TX_32X32, 70, 7),
- VP10FwdTxfm2dParam(DCT_ADST, TX_32X32, 70, 7),
+ VP10FwdTxfm2dParam(DCT_DCT, TX_32X32, 70, 7),
+ VP10FwdTxfm2dParam(ADST_DCT, TX_32X32, 70, 7),
+ VP10FwdTxfm2dParam(DCT_ADST, TX_32X32, 70, 7),
VP10FwdTxfm2dParam(ADST_ADST, TX_32X32, 70, 7)
};
-INSTANTIATE_TEST_CASE_P(
- C, VP10FwdTxfm2d,
- ::testing::ValuesIn(vp10_fwd_txfm2d_param_c));
+INSTANTIATE_TEST_CASE_P(C, VP10FwdTxfm2d,
+ ::testing::ValuesIn(vp10_fwd_txfm2d_param_c));
#endif // CONFIG_VP9_HIGHBITDEPTH
} // namespace
diff --git a/test/vp10_highbd_iht_test.cc b/test/vp10_highbd_iht_test.cc
index caab04c..b632b40 100644
--- a/test/vp10_highbd_iht_test.cc
+++ b/test/vp10_highbd_iht_test.cc
@@ -118,35 +118,32 @@
txfm_ref_(input_, coeffs_, stride, tx_type_, bit_depth_);
inv_txfm_ref_(coeffs_, output_ref_, stride, tx_type_, bit_depth_);
- ASM_REGISTER_STATE_CHECK(inv_txfm_(coeffs_, output_, stride, tx_type_,
- bit_depth_));
+ ASM_REGISTER_STATE_CHECK(
+ inv_txfm_(coeffs_, output_, stride, tx_type_, bit_depth_));
for (int j = 0; j < num_coeffs_; ++j) {
EXPECT_EQ(output_ref_[j], output_[j])
- << "Not bit-exact result at index: " << j
- << " At test block: " << i;
+ << "Not bit-exact result at index: " << j << " At test block: " << i;
}
}
}
-TEST_P(VP10HighbdInvHTNxN, InvTransResultCheck) {
- RunBitexactCheck();
-}
+TEST_P(VP10HighbdInvHTNxN, InvTransResultCheck) { RunBitexactCheck(); }
using std::tr1::make_tuple;
#if HAVE_SSE4_1 && CONFIG_VP9_HIGHBITDEPTH
-#define PARAM_LIST_4X4 &vp10_fwd_txfm2d_4x4_c, \
- &vp10_inv_txfm2d_add_4x4_sse4_1, \
- &vp10_inv_txfm2d_add_4x4_c, 16
+#define PARAM_LIST_4X4 \
+ &vp10_fwd_txfm2d_4x4_c, &vp10_inv_txfm2d_add_4x4_sse4_1, \
+ &vp10_inv_txfm2d_add_4x4_c, 16
-#define PARAM_LIST_8X8 &vp10_fwd_txfm2d_8x8_c, \
- &vp10_inv_txfm2d_add_8x8_sse4_1, \
- &vp10_inv_txfm2d_add_8x8_c, 64
+#define PARAM_LIST_8X8 \
+ &vp10_fwd_txfm2d_8x8_c, &vp10_inv_txfm2d_add_8x8_sse4_1, \
+ &vp10_inv_txfm2d_add_8x8_c, 64
-#define PARAM_LIST_16X16 &vp10_fwd_txfm2d_16x16_c, \
- &vp10_inv_txfm2d_add_16x16_sse4_1, \
- &vp10_inv_txfm2d_add_16x16_c, 256
+#define PARAM_LIST_16X16 \
+ &vp10_fwd_txfm2d_16x16_c, &vp10_inv_txfm2d_add_16x16_sse4_1, \
+ &vp10_inv_txfm2d_add_16x16_c, 256
const IHbdHtParam kArrayIhtParam[] = {
// 16x16
@@ -214,9 +211,8 @@
#endif
};
-INSTANTIATE_TEST_CASE_P(
- SSE4_1, VP10HighbdInvHTNxN,
- ::testing::ValuesIn(kArrayIhtParam));
+INSTANTIATE_TEST_CASE_P(SSE4_1, VP10HighbdInvHTNxN,
+ ::testing::ValuesIn(kArrayIhtParam));
#endif // HAVE_SSE4_1 && CONFIG_VP9_HIGHBITDEPTH
} // namespace
diff --git a/test/vp10_inv_txfm1d_test.cc b/test/vp10_inv_txfm1d_test.cc
index c024f2c..829eaa2 100644
--- a/test/vp10_inv_txfm1d_test.cc
+++ b/test/vp10_inv_txfm1d_test.cc
@@ -18,23 +18,23 @@
namespace {
const int txfm_type_num = 2;
const int txfm_size_num = 5;
-const int txfm_size_ls[5] = {4, 8, 16, 32, 64};
+const int txfm_size_ls[5] = { 4, 8, 16, 32, 64 };
const TxfmFunc fwd_txfm_func_ls[2][5] = {
- {vp10_fdct4_new, vp10_fdct8_new, vp10_fdct16_new, vp10_fdct32_new,
- vp10_fdct64_new},
- {vp10_fadst4_new, vp10_fadst8_new, vp10_fadst16_new, vp10_fadst32_new,
- NULL}};
+ { vp10_fdct4_new, vp10_fdct8_new, vp10_fdct16_new, vp10_fdct32_new,
+ vp10_fdct64_new },
+ { vp10_fadst4_new, vp10_fadst8_new, vp10_fadst16_new, vp10_fadst32_new, NULL }
+};
const TxfmFunc inv_txfm_func_ls[2][5] = {
- {vp10_idct4_new, vp10_idct8_new, vp10_idct16_new, vp10_idct32_new,
- vp10_idct64_new},
- {vp10_iadst4_new, vp10_iadst8_new, vp10_iadst16_new, vp10_iadst32_new,
- NULL}};
+ { vp10_idct4_new, vp10_idct8_new, vp10_idct16_new, vp10_idct32_new,
+ vp10_idct64_new },
+ { vp10_iadst4_new, vp10_iadst8_new, vp10_iadst16_new, vp10_iadst32_new, NULL }
+};
// the maximum stage number of fwd/inv 1d dct/adst txfm is 12
-const int8_t cos_bit[12] = {14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14};
-const int8_t range_bit[12] = {32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32};
+const int8_t cos_bit[12] = { 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14 };
+const int8_t range_bit[12] = { 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32 };
TEST(vp10_inv_txfm1d, round_trip) {
ACMRandom rnd(ACMRandom::DeterministicSeed());
diff --git a/test/vp10_inv_txfm2d_test.cc b/test/vp10_inv_txfm2d_test.cc
index 8f55f55..8cbca5a 100644
--- a/test/vp10_inv_txfm2d_test.cc
+++ b/test/vp10_inv_txfm2d_test.cc
@@ -43,12 +43,12 @@
txfm2d_size_ = txfm1d_size_ * txfm1d_size_;
count_ = 500;
- input_ = reinterpret_cast<int16_t *>
- (vpx_memalign(16, sizeof(int16_t) * txfm2d_size_));
- ref_input_ = reinterpret_cast<uint16_t *>
- (vpx_memalign(16, sizeof(uint16_t) * txfm2d_size_));
- output_ = reinterpret_cast<int32_t *>
- (vpx_memalign(16, sizeof(int32_t) * txfm2d_size_));
+ input_ = reinterpret_cast<int16_t *>(
+ vpx_memalign(16, sizeof(int16_t) * txfm2d_size_));
+ ref_input_ = reinterpret_cast<uint16_t *>(
+ vpx_memalign(16, sizeof(uint16_t) * txfm2d_size_));
+ output_ = reinterpret_cast<int32_t *>(
+ vpx_memalign(16, sizeof(int32_t) * txfm2d_size_));
}
void RunRoundtripCheck() {
@@ -101,9 +101,9 @@
TX_SIZE tx_size_;
int txfm1d_size_;
int txfm2d_size_;
- int16_t* input_;
- uint16_t* ref_input_;
- int32_t* output_;
+ int16_t *input_;
+ uint16_t *ref_input_;
+ int32_t *output_;
};
TEST_P(VP10InvTxfm2d, RunRoundtripCheck) { RunRoundtripCheck(); }
@@ -149,9 +149,8 @@
VP10InvTxfm2dParam(ADST_ADST, TX_32X32, 4, 0.4)
};
-INSTANTIATE_TEST_CASE_P(
- C, VP10InvTxfm2d,
- ::testing::ValuesIn(vp10_inv_txfm2d_param));
+INSTANTIATE_TEST_CASE_P(C, VP10InvTxfm2d,
+ ::testing::ValuesIn(vp10_inv_txfm2d_param));
#endif // CONFIG_VP9_HIGHBITDEPTH
diff --git a/test/vp10_inv_txfm_test.cc b/test/vp10_inv_txfm_test.cc
index 917e6e9..3469d198 100644
--- a/test/vp10_inv_txfm_test.cc
+++ b/test/vp10_inv_txfm_test.cc
@@ -51,14 +51,14 @@
protected:
void RunInvAccuracyCheck() {
- tran_low_t *input = new tran_low_t[txfm_size_];
+ tran_low_t *input = new tran_low_t[txfm_size_];
tran_low_t *output = new tran_low_t[txfm_size_];
- double *ref_input = new double[txfm_size_];
+ double *ref_input = new double[txfm_size_];
double *ref_output = new double[txfm_size_];
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 5000;
- for (int ti = 0; ti < count_test_block; ++ti) {
+ for (int ti = 0; ti < count_test_block; ++ti) {
for (int ni = 0; ni < txfm_size_; ++ni) {
input[ni] = rnd.Rand8() - rnd.Rand8();
ref_input[ni] = static_cast<double>(input[ni]);
@@ -87,9 +87,8 @@
};
typedef std::tr1::tuple<IdctFunc, IdctFuncRef, int, int> IdctParam;
-class Vp10InvTxfm
- : public TransTestBase,
- public ::testing::TestWithParam<IdctParam> {
+class Vp10InvTxfm : public TransTestBase,
+ public ::testing::TestWithParam<IdctParam> {
public:
virtual void SetUp() {
fwd_txfm_ = GET_PARAM(0);
@@ -100,25 +99,19 @@
virtual void TearDown() {}
};
-TEST_P(Vp10InvTxfm, RunInvAccuracyCheck) {
- RunInvAccuracyCheck();
-}
+TEST_P(Vp10InvTxfm, RunInvAccuracyCheck) { RunInvAccuracyCheck(); }
INSTANTIATE_TEST_CASE_P(
C, Vp10InvTxfm,
- ::testing::Values(
- IdctParam(&vp10_idct4_c, &reference_idct_1d, 4, 1),
- IdctParam(&vp10_idct8_c, &reference_idct_1d, 8, 2),
- IdctParam(&vp10_idct16_c, &reference_idct_1d, 16, 4),
- IdctParam(&vp10_idct32_c, &reference_idct_1d, 32, 6))
-);
+ ::testing::Values(IdctParam(&vp10_idct4_c, &reference_idct_1d, 4, 1),
+ IdctParam(&vp10_idct8_c, &reference_idct_1d, 8, 2),
+ IdctParam(&vp10_idct16_c, &reference_idct_1d, 16, 4),
+ IdctParam(&vp10_idct32_c, &reference_idct_1d, 32, 6)));
typedef void (*FwdTxfmFunc)(const int16_t *in, tran_low_t *out, int stride);
typedef void (*InvTxfmFunc)(const tran_low_t *in, uint8_t *out, int stride);
-typedef std::tr1::tuple<FwdTxfmFunc,
- InvTxfmFunc,
- InvTxfmFunc,
- TX_SIZE, int> PartialInvTxfmParam;
+typedef std::tr1::tuple<FwdTxfmFunc, InvTxfmFunc, InvTxfmFunc, TX_SIZE, int>
+ PartialInvTxfmParam;
const int kMaxNumCoeffs = 1024;
class Vp10PartialIDctTest
: public ::testing::TestWithParam<PartialInvTxfmParam> {
@@ -128,7 +121,7 @@
ftxfm_ = GET_PARAM(0);
full_itxfm_ = GET_PARAM(1);
partial_itxfm_ = GET_PARAM(2);
- tx_size_ = GET_PARAM(3);
+ tx_size_ = GET_PARAM(3);
last_nonzero_ = GET_PARAM(4);
}
@@ -146,21 +139,11 @@
ACMRandom rnd(ACMRandom::DeterministicSeed());
int size;
switch (tx_size_) {
- case TX_4X4:
- size = 4;
- break;
- case TX_8X8:
- size = 8;
- break;
- case TX_16X16:
- size = 16;
- break;
- case TX_32X32:
- size = 32;
- break;
- default:
- FAIL() << "Wrong Size!";
- break;
+ case TX_4X4: size = 4; break;
+ case TX_8X8: size = 8; break;
+ case TX_16X16: size = 16; break;
+ case TX_32X32: size = 32; break;
+ default: FAIL() << "Wrong Size!"; break;
}
DECLARE_ALIGNED(16, tran_low_t, test_coef_block1[kMaxNumCoeffs]);
DECLARE_ALIGNED(16, tran_low_t, test_coef_block2[kMaxNumCoeffs]);
@@ -186,11 +169,9 @@
for (int i = 0; i < count_test_block; ++i) {
// Initialize a test block with input range [-255, 255].
if (i == 0) {
- for (int j = 0; j < block_size; ++j)
- input_extreme_block[j] = 255;
+ for (int j = 0; j < block_size; ++j) input_extreme_block[j] = 255;
} else if (i == 1) {
- for (int j = 0; j < block_size; ++j)
- input_extreme_block[j] = -255;
+ for (int j = 0; j < block_size; ++j) input_extreme_block[j] = -255;
} else {
for (int j = 0; j < block_size; ++j) {
input_extreme_block[j] = rnd.Rand8() % 2 ? 255 : -255;
@@ -202,8 +183,8 @@
// quantization with maximum allowed step sizes
test_coef_block1[0] = (output_ref_block[0] / 1336) * 1336;
for (int j = 1; j < last_nonzero_; ++j)
- test_coef_block1[get_scan(tx_size_, DCT_DCT, 0)->scan[j]]
- = (output_ref_block[j] / 1828) * 1828;
+ test_coef_block1[get_scan(tx_size_, DCT_DCT, 0)->scan[j]] =
+ (output_ref_block[j] / 1828) * 1828;
}
ASM_REGISTER_STATE_CHECK(full_itxfm_(test_coef_block1, dst1, size));
@@ -212,8 +193,7 @@
for (int j = 0; j < block_size; ++j) {
const int diff = dst1[j] - dst2[j];
const int error = diff * diff;
- if (max_error < error)
- max_error = error;
+ if (max_error < error) max_error = error;
}
}
@@ -225,21 +205,11 @@
ACMRandom rnd(ACMRandom::DeterministicSeed());
int size;
switch (tx_size_) {
- case TX_4X4:
- size = 4;
- break;
- case TX_8X8:
- size = 8;
- break;
- case TX_16X16:
- size = 16;
- break;
- case TX_32X32:
- size = 32;
- break;
- default:
- FAIL() << "Wrong Size!";
- break;
+ case TX_4X4: size = 4; break;
+ case TX_8X8: size = 8; break;
+ case TX_16X16: size = 16; break;
+ case TX_32X32: size = 32; break;
+ default: FAIL() << "Wrong Size!"; break;
}
DECLARE_ALIGNED(16, tran_low_t, test_coef_block1[kMaxNumCoeffs]);
DECLARE_ALIGNED(16, tran_low_t, test_coef_block2[kMaxNumCoeffs]);
@@ -276,8 +246,7 @@
for (int j = 0; j < block_size; ++j) {
const int diff = dst1[j] - dst2[j];
const int error = diff * diff;
- if (max_error < error)
- max_error = error;
+ if (max_error < error) max_error = error;
}
}
@@ -288,33 +257,18 @@
INSTANTIATE_TEST_CASE_P(
C, Vp10PartialIDctTest,
- ::testing::Values(
- make_tuple(&vp10_fdct32x32_c,
- &vp10_idct32x32_1024_add_c,
- &vp10_idct32x32_34_add_c,
- TX_32X32, 34),
- make_tuple(&vp10_fdct32x32_c,
- &vp10_idct32x32_1024_add_c,
- &vp10_idct32x32_1_add_c,
- TX_32X32, 1),
- make_tuple(&vp10_fdct16x16_c,
- &vp10_idct16x16_256_add_c,
- &vp10_idct16x16_10_add_c,
- TX_16X16, 10),
- make_tuple(&vp10_fdct16x16_c,
- &vp10_idct16x16_256_add_c,
- &vp10_idct16x16_1_add_c,
- TX_16X16, 1),
- make_tuple(&vp10_fdct8x8_c,
- &vp10_idct8x8_64_add_c,
- &vp10_idct8x8_12_add_c,
- TX_8X8, 12),
- make_tuple(&vp10_fdct8x8_c,
- &vp10_idct8x8_64_add_c,
- &vp10_idct8x8_1_add_c,
- TX_8X8, 1),
- make_tuple(&vp10_fdct4x4_c,
- &vp10_idct4x4_16_add_c,
- &vp10_idct4x4_1_add_c,
- TX_4X4, 1)));
+ ::testing::Values(make_tuple(&vp10_fdct32x32_c, &vp10_idct32x32_1024_add_c,
+ &vp10_idct32x32_34_add_c, TX_32X32, 34),
+ make_tuple(&vp10_fdct32x32_c, &vp10_idct32x32_1024_add_c,
+ &vp10_idct32x32_1_add_c, TX_32X32, 1),
+ make_tuple(&vp10_fdct16x16_c, &vp10_idct16x16_256_add_c,
+ &vp10_idct16x16_10_add_c, TX_16X16, 10),
+ make_tuple(&vp10_fdct16x16_c, &vp10_idct16x16_256_add_c,
+ &vp10_idct16x16_1_add_c, TX_16X16, 1),
+ make_tuple(&vp10_fdct8x8_c, &vp10_idct8x8_64_add_c,
+ &vp10_idct8x8_12_add_c, TX_8X8, 12),
+ make_tuple(&vp10_fdct8x8_c, &vp10_idct8x8_64_add_c,
+ &vp10_idct8x8_1_add_c, TX_8X8, 1),
+ make_tuple(&vp10_fdct4x4_c, &vp10_idct4x4_16_add_c,
+ &vp10_idct4x4_1_add_c, TX_4X4, 1)));
} // namespace
diff --git a/test/vp10_quantize_test.cc b/test/vp10_quantize_test.cc
index f8bbb25..e7365bb 100644
--- a/test/vp10_quantize_test.cc
+++ b/test/vp10_quantize_test.cc
@@ -21,20 +21,17 @@
namespace {
-typedef void (*QuantizeFpFunc)(const tran_low_t *coeff_ptr, intptr_t count,
- int skip_block, const int16_t *zbin_ptr,
- const int16_t *round_ptr,
- const int16_t *quant_ptr,
- const int16_t *quant_shift_ptr,
- tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
- const int16_t *dequant_ptr, uint16_t *eob_ptr,
- const int16_t *scan, const int16_t *iscan,
- const int log_scale);
+typedef void (*QuantizeFpFunc)(
+ const tran_low_t *coeff_ptr, intptr_t count, int skip_block,
+ const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr,
+ const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr,
+ tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr,
+ const int16_t *scan, const int16_t *iscan, const int log_scale);
struct QuantizeFuncParams {
QuantizeFuncParams(QuantizeFpFunc qF = NULL, QuantizeFpFunc qRefF = NULL,
- int count = 16) : qFunc(qF), qFuncRef(qRefF),
- coeffCount(count) {}
+ int count = 16)
+ : qFunc(qF), qFuncRef(qRefF), coeffCount(count) {}
QuantizeFpFunc qFunc;
QuantizeFpFunc qFuncRef;
int coeffCount;
@@ -90,28 +87,25 @@
round_ptr[j] = (abs(rnd(roundFactorRange)) * dequant_ptr[j]) >> 7;
}
- quanFuncRef(coeff_ptr, count, skip_block, zbin_ptr,
- round_ptr, quant_ptr, quant_shift_ptr,
- ref_qcoeff_ptr, ref_dqcoeff_ptr, dequant_ptr,
- &ref_eob, scanOrder.scan, scanOrder.iscan,
- log_scale);
+ quanFuncRef(coeff_ptr, count, skip_block, zbin_ptr, round_ptr, quant_ptr,
+ quant_shift_ptr, ref_qcoeff_ptr, ref_dqcoeff_ptr, dequant_ptr,
+ &ref_eob, scanOrder.scan, scanOrder.iscan, log_scale);
- ASM_REGISTER_STATE_CHECK(quanFunc(coeff_ptr, count, skip_block, zbin_ptr,
- round_ptr, quant_ptr, quant_shift_ptr,
- qcoeff_ptr, dqcoeff_ptr, dequant_ptr,
- &eob, scanOrder.scan, scanOrder.iscan,
- log_scale));
+ ASM_REGISTER_STATE_CHECK(
+ quanFunc(coeff_ptr, count, skip_block, zbin_ptr, round_ptr, quant_ptr,
+ quant_shift_ptr, qcoeff_ptr, dqcoeff_ptr, dequant_ptr, &eob,
+ scanOrder.scan, scanOrder.iscan, log_scale));
for (int j = 0; j < count; ++j) {
- err_count += (ref_qcoeff_ptr[j] != qcoeff_ptr[j]) |
- (ref_dqcoeff_ptr[j] != dqcoeff_ptr[j]);
- EXPECT_EQ(ref_qcoeff_ptr[j], qcoeff_ptr[j])
- << "qcoeff error: i = " << i << " j = " << j << "\n";
+ err_count += (ref_qcoeff_ptr[j] != qcoeff_ptr[j]) |
+ (ref_dqcoeff_ptr[j] != dqcoeff_ptr[j]);
+ EXPECT_EQ(ref_qcoeff_ptr[j], qcoeff_ptr[j]) << "qcoeff error: i = " << i
+ << " j = " << j << "\n";
EXPECT_EQ(ref_dqcoeff_ptr[j], dqcoeff_ptr[j])
<< "dqcoeff error: i = " << i << " j = " << j << "\n";
}
- EXPECT_EQ(ref_eob, eob)
- << "eob error: " << "i = " << i << "\n";
+ EXPECT_EQ(ref_eob, eob) << "eob error: "
+ << "i = " << i << "\n";
err_count += (ref_eob != eob);
if (err_count && !err_count_total) {
first_failure = i;
@@ -164,29 +158,22 @@
round_ptr[j] = (abs(rnd(roundFactorRange)) * dequant_ptr[j]) >> 7;
}
- quanFuncRef(coeff_ptr, count, skip_block, zbin_ptr,
- round_ptr, quant_ptr, quant_shift_ptr,
- ref_qcoeff_ptr, ref_dqcoeff_ptr, dequant_ptr,
- &ref_eob, scanOrder.scan, scanOrder.iscan,
- log_scale);
+ quanFuncRef(coeff_ptr, count, skip_block, zbin_ptr, round_ptr, quant_ptr,
+ quant_shift_ptr, ref_qcoeff_ptr, ref_dqcoeff_ptr, dequant_ptr,
+ &ref_eob, scanOrder.scan, scanOrder.iscan, log_scale);
- ASM_REGISTER_STATE_CHECK(quanFunc(coeff_ptr, count, skip_block, zbin_ptr,
- round_ptr, quant_ptr, quant_shift_ptr,
- qcoeff_ptr, dqcoeff_ptr, dequant_ptr,
- &eob, scanOrder.scan, scanOrder.iscan,
- log_scale));
- EXPECT_EQ(ref_eob, eob)
- << "eob error: " << "i = " << i << "\n";
+ ASM_REGISTER_STATE_CHECK(
+ quanFunc(coeff_ptr, count, skip_block, zbin_ptr, round_ptr, quant_ptr,
+ quant_shift_ptr, qcoeff_ptr, dqcoeff_ptr, dequant_ptr, &eob,
+ scanOrder.scan, scanOrder.iscan, log_scale));
+ EXPECT_EQ(ref_eob, eob) << "eob error: "
+ << "i = " << i << "\n";
}
}
- virtual void SetUp() {
- params_ = GetParam();
- }
+ virtual void SetUp() { params_ = GetParam(); }
- virtual void TearDown() {
- libvpx_test::ClearSystemState();
- }
+ virtual void TearDown() { libvpx_test::ClearSystemState(); }
virtual ~VP10QuantizeTest() {}
@@ -208,12 +195,8 @@
QuantizeFuncParams params_;
};
-TEST_P(VP10QuantizeTest, BitExactCheck) {
- RunQuantizeTest();
-}
-TEST_P(VP10QuantizeTest, EobVerify) {
- RunEobTest();
-}
+TEST_P(VP10QuantizeTest, BitExactCheck) { RunQuantizeTest(); }
+TEST_P(VP10QuantizeTest, EobVerify) { RunEobTest(); }
#if HAVE_SSE4_1
INSTANTIATE_TEST_CASE_P(
diff --git a/test/vp10_txfm_test.cc b/test/vp10_txfm_test.cc
index 6b36126..f9055f1 100644
--- a/test/vp10_txfm_test.cc
+++ b/test/vp10_txfm_test.cc
@@ -13,12 +13,9 @@
namespace libvpx_test {
-int get_txfm1d_size(TX_SIZE tx_size) {
- return 1 << (tx_size + 2);
-}
+int get_txfm1d_size(TX_SIZE tx_size) { return 1 << (tx_size + 2); }
-void get_txfm1d_type(TX_TYPE txfm2d_type, TYPE_TXFM* type0,
- TYPE_TXFM* type1) {
+void get_txfm1d_type(TX_TYPE txfm2d_type, TYPE_TXFM *type0, TYPE_TXFM *type1) {
switch (txfm2d_type) {
case DCT_DCT:
*type0 = TYPE_DCT;
@@ -68,7 +65,7 @@
double invSqrt2 = 1 / pow(2, 0.5);
-void reference_dct_1d(const double* in, double* out, int size) {
+void reference_dct_1d(const double *in, double *out, int size) {
for (int k = 0; k < size; ++k) {
out[k] = 0;
for (int n = 0; n < size; ++n) {
@@ -78,7 +75,7 @@
}
}
-void reference_adst_1d(const double* in, double* out, int size) {
+void reference_adst_1d(const double *in, double *out, int size) {
for (int k = 0; k < size; ++k) {
out[k] = 0;
for (int n = 0; n < size; ++n) {
@@ -87,16 +84,16 @@
}
}
-void reference_hybrid_1d(double* in, double* out, int size, int type) {
+void reference_hybrid_1d(double *in, double *out, int size, int type) {
if (type == TYPE_DCT)
reference_dct_1d(in, out, size);
else
reference_adst_1d(in, out, size);
}
-void reference_hybrid_2d(double* in, double* out, int size,
- int type0, int type1) {
- double* tempOut = new double[size * size];
+void reference_hybrid_2d(double *in, double *out, int size, int type0,
+ int type1) {
+ double *tempOut = new double[size * size];
for (int r = 0; r < size; r++) {
// out ->tempOut
@@ -123,7 +120,7 @@
delete[] tempOut;
}
-template<typename Type>
+template <typename Type>
void fliplr(Type *dest, int stride, int length) {
int i, j;
for (i = 0; i < length; ++i) {
@@ -135,7 +132,7 @@
}
}
-template<typename Type>
+template <typename Type>
void flipud(Type *dest, int stride, int length) {
int i, j;
for (j = 0; j < length; ++j) {
@@ -147,7 +144,7 @@
}
}
-template<typename Type>
+template <typename Type>
void fliplrud(Type *dest, int stride, int length) {
int i, j;
for (i = 0; i < length / 2; ++i) {
diff --git a/test/vp10_txfm_test.h b/test/vp10_txfm_test.h
index fb9e12e..4b7d978 100644
--- a/test/vp10_txfm_test.h
+++ b/test/vp10_txfm_test.h
@@ -36,19 +36,18 @@
int get_txfm1d_size(TX_SIZE tx_size);
-void get_txfm1d_type(TX_TYPE txfm2d_type, TYPE_TXFM* type0,
- TYPE_TXFM* type1);
+void get_txfm1d_type(TX_TYPE txfm2d_type, TYPE_TXFM *type0, TYPE_TXFM *type1);
-void reference_dct_1d(const double* in, double* out, int size);
+void reference_dct_1d(const double *in, double *out, int size);
-void reference_adst_1d(const double* in, double* out, int size);
+void reference_adst_1d(const double *in, double *out, int size);
-void reference_hybrid_1d(double* in, double* out, int size, int type);
+void reference_hybrid_1d(double *in, double *out, int size, int type);
-void reference_hybrid_2d(double* in, double* out, int size,
- int type0, int type1);
+void reference_hybrid_2d(double *in, double *out, int size, int type0,
+ int type1);
template <typename Type1, typename Type2>
-static double compute_avg_abs_error(const Type1* a, const Type2* b,
+static double compute_avg_abs_error(const Type1 *a, const Type2 *b,
const int size) {
double error = 0;
for (int i = 0; i < size; i++) {
@@ -58,32 +57,34 @@
return error;
}
-template<typename Type>
+template <typename Type>
void fliplr(Type *dest, int stride, int length);
-template<typename Type>
+template <typename Type>
void flipud(Type *dest, int stride, int length);
-template<typename Type>
+template <typename Type>
void fliplrud(Type *dest, int stride, int length);
-typedef void (*TxfmFunc)(const int32_t* in, int32_t* out, const int8_t* cos_bit,
- const int8_t* range_bit);
+typedef void (*TxfmFunc)(const int32_t *in, int32_t *out, const int8_t *cos_bit,
+ const int8_t *range_bit);
-typedef void (*Fwd_Txfm2d_Func)(const int16_t*, int32_t*, int, int, int);
-typedef void (*Inv_Txfm2d_Func)(const int32_t*, uint16_t*, int, int, int);
+typedef void (*Fwd_Txfm2d_Func)(const int16_t *, int32_t *, int, int, int);
+typedef void (*Inv_Txfm2d_Func)(const int32_t *, uint16_t *, int, int, int);
static const int bd = 10;
static const int input_base = (1 << bd);
#if CONFIG_VP9_HIGHBITDEPTH
static const Fwd_Txfm2d_Func fwd_txfm_func_ls[TX_SIZES] = {
- vp10_fwd_txfm2d_4x4_c, vp10_fwd_txfm2d_8x8_c, vp10_fwd_txfm2d_16x16_c,
- vp10_fwd_txfm2d_32x32_c};
+ vp10_fwd_txfm2d_4x4_c, vp10_fwd_txfm2d_8x8_c, vp10_fwd_txfm2d_16x16_c,
+ vp10_fwd_txfm2d_32x32_c
+};
static const Inv_Txfm2d_Func inv_txfm_func_ls[TX_SIZES] = {
- vp10_inv_txfm2d_add_4x4_c, vp10_inv_txfm2d_add_8x8_c,
- vp10_inv_txfm2d_add_16x16_c, vp10_inv_txfm2d_add_32x32_c};
+ vp10_inv_txfm2d_add_4x4_c, vp10_inv_txfm2d_add_8x8_c,
+ vp10_inv_txfm2d_add_16x16_c, vp10_inv_txfm2d_add_32x32_c
+};
#endif // CONFIG_VP9_HIGHBITDEPTH
} // namespace libvpx_test
diff --git a/test/vp10_wedge_utils_test.cc b/test/vp10_wedge_utils_test.cc
index 9fa4849..57008c6 100644
--- a/test/vp10_wedge_utils_test.cc
+++ b/test/vp10_wedge_utils_test.cc
@@ -24,7 +24,7 @@
#include "test/register_state_check.h"
#define WEDGE_WEIGHT_BITS 6
-#define MAX_MASK_VALUE (1 << (WEDGE_WEIGHT_BITS))
+#define MAX_MASK_VALUE (1 << (WEDGE_WEIGHT_BITS))
using libvpx_test::ACMRandom;
using libvpx_test::FunctionEquivalenceTest;
@@ -46,12 +46,9 @@
ACMRandom rng_;
};
-static void equiv_blend_residuals(int16_t *r,
- const int16_t *r0,
- const int16_t *r1,
- const uint8_t *m,
- int N) {
- for (int i = 0 ; i < N ; i++) {
+static void equiv_blend_residuals(int16_t *r, const int16_t *r0,
+ const int16_t *r1, const uint8_t *m, int N) {
+ for (int i = 0; i < N; i++) {
const int32_t m0 = m[i];
const int32_t m1 = MAX_MASK_VALUE - m0;
const int16_t R = m0 * r0[i] + m1 * r1[i];
@@ -62,12 +59,10 @@
}
}
-static uint64_t equiv_sse_from_residuals(const int16_t *r0,
- const int16_t *r1,
- const uint8_t *m,
- int N) {
+static uint64_t equiv_sse_from_residuals(const int16_t *r0, const int16_t *r1,
+ const uint8_t *m, int N) {
uint64_t acc = 0;
- for (int i = 0 ; i < N ; i++) {
+ for (int i = 0; i < N; i++) {
const int32_t m0 = m[i];
const int32_t m1 = MAX_MASK_VALUE - m0;
const int16_t R = m0 * r0[i] + m1 * r1[i];
@@ -89,8 +84,8 @@
DECLARE_ALIGNED(32, int16_t, r_tst[MAX_SB_SQUARE]);
DECLARE_ALIGNED(32, uint8_t, m[MAX_SB_SQUARE]);
- for (int iter = 0 ; iter < kIterations && !HasFatalFailure(); ++iter) {
- for (int i = 0 ; i < MAX_SB_SQUARE ; ++i) {
+ for (int iter = 0; iter < kIterations && !HasFatalFailure(); ++iter) {
+ for (int i = 0; i < MAX_SB_SQUARE; ++i) {
s[i] = rng_.Rand8();
m[i] = rng_(MAX_MASK_VALUE + 1);
}
@@ -99,7 +94,7 @@
const int h = 1 << (rng_(MAX_SB_SIZE_LOG2 + 1 - 3) + 3);
const int N = w * h;
- for (int j = 0 ; j < N ; j++) {
+ for (int j = 0; j < N; j++) {
p0[j] = clamp(s[j] + rng_(33) - 16, 0, UINT8_MAX);
p1[j] = clamp(s[j] + rng_(33) - 16, 0, UINT8_MAX);
}
@@ -112,8 +107,7 @@
vpx_subtract_block(h, w, r_ref, w, s, w, p, w);
equiv_blend_residuals(r_tst, r0, r1, m, N);
- for (int i = 0 ; i < N ; ++i)
- ASSERT_EQ(r_ref[i], r_tst[i]);
+ for (int i = 0; i < N; ++i) ASSERT_EQ(r_ref[i], r_tst[i]);
uint64_t ref_sse = vpx_sum_squares_i16(r_ref, N);
uint64_t tst_sse = equiv_sse_from_residuals(r0, r1, m, N);
@@ -122,12 +116,10 @@
}
}
-static uint64_t sse_from_residuals(const int16_t *r0,
- const int16_t *r1,
- const uint8_t *m,
- int N) {
+static uint64_t sse_from_residuals(const int16_t *r0, const int16_t *r1,
+ const uint8_t *m, int N) {
uint64_t acc = 0;
- for (int i = 0 ; i < N ; i++) {
+ for (int i = 0; i < N; i++) {
const int32_t m0 = m[i];
const int32_t m1 = MAX_MASK_VALUE - m0;
const int32_t r = m0 * r0[i] + m1 * r1[i];
@@ -142,17 +134,16 @@
DECLARE_ALIGNED(32, int16_t, d[MAX_SB_SQUARE]);
DECLARE_ALIGNED(32, uint8_t, m[MAX_SB_SQUARE]);
- for (int iter = 0 ; iter < kIterations && !HasFatalFailure(); ++iter) {
- for (int i = 0 ; i < MAX_SB_SQUARE ; ++i) {
+ for (int iter = 0; iter < kIterations && !HasFatalFailure(); ++iter) {
+ for (int i = 0; i < MAX_SB_SQUARE; ++i) {
r1[i] = rng_(2 * INT8_MAX - 2 * INT8_MIN + 1) + 2 * INT8_MIN;
d[i] = rng_(2 * INT8_MAX - 2 * INT8_MIN + 1) + 2 * INT8_MIN;
m[i] = rng_(MAX_MASK_VALUE + 1);
}
- const int N = 64 * (rng_(MAX_SB_SQUARE/64) + 1);
+ const int N = 64 * (rng_(MAX_SB_SQUARE / 64) + 1);
- for (int i = 0 ; i < N ; i++)
- r0[i] = r1[i] + d[i];
+ for (int i = 0; i < N; i++) r0[i] = r1[i] + d[i];
const uint64_t ref_res = sse_from_residuals(r0, r1, m, N);
const uint64_t tst_res = vp10_wedge_sse_from_residuals(r1, d, m, N);
@@ -165,9 +156,7 @@
// vp10_wedge_sse_from_residuals - optimizations
//////////////////////////////////////////////////////////////////////////////
-typedef uint64_t (*FSSE)(const int16_t *r1,
- const int16_t *d,
- const uint8_t *m,
+typedef uint64_t (*FSSE)(const int16_t *r1, const int16_t *d, const uint8_t *m,
int N);
typedef libvpx_test::FuncParam<FSSE> TestFuncsFSSE;
@@ -181,14 +170,14 @@
DECLARE_ALIGNED(32, int16_t, d[MAX_SB_SQUARE]);
DECLARE_ALIGNED(32, uint8_t, m[MAX_SB_SQUARE]);
- for (int iter = 0 ; iter < kIterations && !HasFatalFailure(); ++iter) {
- for (int i = 0 ; i < MAX_SB_SQUARE ; ++i) {
+ for (int iter = 0; iter < kIterations && !HasFatalFailure(); ++iter) {
+ for (int i = 0; i < MAX_SB_SQUARE; ++i) {
r1[i] = rng_(2 * kInt13Max + 1) - kInt13Max;
d[i] = rng_(2 * kInt13Max + 1) - kInt13Max;
m[i] = rng_(MAX_MASK_VALUE + 1);
}
- const int N = 64 * (rng_(MAX_SB_SQUARE/64) + 1);
+ const int N = 64 * (rng_(MAX_SB_SQUARE / 64) + 1);
const uint64_t ref_res = params_.ref_func(r1, d, m, N);
uint64_t tst_res;
@@ -203,27 +192,22 @@
DECLARE_ALIGNED(32, int16_t, d[MAX_SB_SQUARE]);
DECLARE_ALIGNED(32, uint8_t, m[MAX_SB_SQUARE]);
- for (int iter = 0 ; iter < kIterations && !HasFatalFailure(); ++iter) {
+ for (int iter = 0; iter < kIterations && !HasFatalFailure(); ++iter) {
if (rng_(2)) {
- for (int i = 0 ; i < MAX_SB_SQUARE ; ++i)
- r1[i] = kInt13Max;
+ for (int i = 0; i < MAX_SB_SQUARE; ++i) r1[i] = kInt13Max;
} else {
- for (int i = 0 ; i < MAX_SB_SQUARE ; ++i)
- r1[i] = -kInt13Max;
+ for (int i = 0; i < MAX_SB_SQUARE; ++i) r1[i] = -kInt13Max;
}
if (rng_(2)) {
- for (int i = 0 ; i < MAX_SB_SQUARE ; ++i)
- d[i] = kInt13Max;
+ for (int i = 0; i < MAX_SB_SQUARE; ++i) d[i] = kInt13Max;
} else {
- for (int i = 0 ; i < MAX_SB_SQUARE ; ++i)
- d[i] = -kInt13Max;
+ for (int i = 0; i < MAX_SB_SQUARE; ++i) d[i] = -kInt13Max;
}
- for (int i = 0 ; i < MAX_SB_SQUARE ; ++i)
- m[i] = MAX_MASK_VALUE;
+ for (int i = 0; i < MAX_SB_SQUARE; ++i) m[i] = MAX_MASK_VALUE;
- const int N = 64 * (rng_(MAX_SB_SQUARE/64) + 1);
+ const int N = 64 * (rng_(MAX_SB_SQUARE / 64) + 1);
const uint64_t ref_res = params_.ref_func(r1, d, m, N);
uint64_t tst_res;
@@ -236,9 +220,8 @@
#if HAVE_SSE2
INSTANTIATE_TEST_CASE_P(
SSE2, WedgeUtilsSSEOptTest,
- ::testing::Values(
- TestFuncsFSSE(vp10_wedge_sse_from_residuals_c,
- vp10_wedge_sse_from_residuals_sse2)));
+ ::testing::Values(TestFuncsFSSE(vp10_wedge_sse_from_residuals_c,
+ vp10_wedge_sse_from_residuals_sse2)));
#endif // HAVE_SSE2
@@ -246,10 +229,7 @@
// vp10_wedge_sign_from_residuals
//////////////////////////////////////////////////////////////////////////////
-typedef int (*FSign)(const int16_t *ds,
- const uint8_t *m,
- int N,
- int64_t limit);
+typedef int (*FSign)(const int16_t *ds, const uint8_t *m, int N, int64_t limit);
typedef libvpx_test::FuncParam<FSign> TestFuncsFSign;
class WedgeUtilsSignOptTest : public FunctionEquivalenceTest<FSign> {
@@ -264,23 +244,23 @@
DECLARE_ALIGNED(32, int16_t, ds[MAX_SB_SQUARE]);
DECLARE_ALIGNED(32, uint8_t, m[MAX_SB_SQUARE]);
- for (int iter = 0 ; iter < kIterations && !HasFatalFailure(); ++iter) {
- for (int i = 0 ; i < MAX_SB_SQUARE ; ++i) {
+ for (int iter = 0; iter < kIterations && !HasFatalFailure(); ++iter) {
+ for (int i = 0; i < MAX_SB_SQUARE; ++i) {
r0[i] = rng_(2 * kInt13Max + 1) - kInt13Max;
r1[i] = rng_(2 * kInt13Max + 1) - kInt13Max;
m[i] = rng_(MAX_MASK_VALUE + 1);
}
const int maxN = VPXMIN(kMaxSize, MAX_SB_SQUARE);
- const int N = 64 * (rng_(maxN/64 - 1) + 1);
+ const int N = 64 * (rng_(maxN / 64 - 1) + 1);
int64_t limit;
limit = (int64_t)vpx_sum_squares_i16(r0, N);
limit -= (int64_t)vpx_sum_squares_i16(r1, N);
limit *= (1 << WEDGE_WEIGHT_BITS) / 2;
- for (int i = 0 ; i < N ; i++)
- ds[i] = clamp(r0[i]*r0[i] - r1[i]*r1[i], INT16_MIN, INT16_MAX);
+ for (int i = 0; i < N; i++)
+ ds[i] = clamp(r0[i] * r0[i] - r1[i] * r1[i], INT16_MIN, INT16_MAX);
const int ref_res = params_.ref_func(ds, m, N, limit);
int tst_res;
@@ -296,47 +276,46 @@
DECLARE_ALIGNED(32, int16_t, ds[MAX_SB_SQUARE]);
DECLARE_ALIGNED(32, uint8_t, m[MAX_SB_SQUARE]);
- for (int iter = 0 ; iter < kIterations && !HasFatalFailure(); ++iter) {
+ for (int iter = 0; iter < kIterations && !HasFatalFailure(); ++iter) {
switch (rng_(4)) {
- case 0:
- for (int i = 0 ; i < MAX_SB_SQUARE ; ++i) {
- r0[i] = 0;
- r1[i] = kInt13Max;
- }
- break;
- case 1:
- for (int i = 0 ; i < MAX_SB_SQUARE ; ++i) {
- r0[i] = kInt13Max;
- r1[i] = 0;
- }
- break;
- case 2:
- for (int i = 0 ; i < MAX_SB_SQUARE ; ++i) {
- r0[i] = 0;
- r1[i] = -kInt13Max;
- }
- break;
- default:
- for (int i = 0 ; i < MAX_SB_SQUARE ; ++i) {
- r0[i] = -kInt13Max;
- r1[i] = 0;
- }
- break;
+ case 0:
+ for (int i = 0; i < MAX_SB_SQUARE; ++i) {
+ r0[i] = 0;
+ r1[i] = kInt13Max;
+ }
+ break;
+ case 1:
+ for (int i = 0; i < MAX_SB_SQUARE; ++i) {
+ r0[i] = kInt13Max;
+ r1[i] = 0;
+ }
+ break;
+ case 2:
+ for (int i = 0; i < MAX_SB_SQUARE; ++i) {
+ r0[i] = 0;
+ r1[i] = -kInt13Max;
+ }
+ break;
+ default:
+ for (int i = 0; i < MAX_SB_SQUARE; ++i) {
+ r0[i] = -kInt13Max;
+ r1[i] = 0;
+ }
+ break;
}
- for (int i = 0 ; i < MAX_SB_SQUARE ; ++i)
- m[i] = MAX_MASK_VALUE;
+ for (int i = 0; i < MAX_SB_SQUARE; ++i) m[i] = MAX_MASK_VALUE;
const int maxN = VPXMIN(kMaxSize, MAX_SB_SQUARE);
- const int N = 64 * (rng_(maxN/64 - 1) + 1);
+ const int N = 64 * (rng_(maxN / 64 - 1) + 1);
int64_t limit;
limit = (int64_t)vpx_sum_squares_i16(r0, N);
limit -= (int64_t)vpx_sum_squares_i16(r1, N);
limit *= (1 << WEDGE_WEIGHT_BITS) / 2;
- for (int i = 0 ; i < N ; i++)
- ds[i] = clamp(r0[i]*r0[i] - r1[i]*r1[i], INT16_MIN, INT16_MAX);
+ for (int i = 0; i < N; i++)
+ ds[i] = clamp(r0[i] * r0[i] - r1[i] * r1[i], INT16_MIN, INT16_MAX);
const int ref_res = params_.ref_func(ds, m, N, limit);
int tst_res;
@@ -350,9 +329,8 @@
INSTANTIATE_TEST_CASE_P(
SSE2, WedgeUtilsSignOptTest,
- ::testing::Values(
- TestFuncsFSign(vp10_wedge_sign_from_residuals_c,
- vp10_wedge_sign_from_residuals_sse2)));
+ ::testing::Values(TestFuncsFSign(vp10_wedge_sign_from_residuals_c,
+ vp10_wedge_sign_from_residuals_sse2)));
#endif // HAVE_SSE2
@@ -360,10 +338,7 @@
// vp10_wedge_compute_delta_squares
//////////////////////////////////////////////////////////////////////////////
-typedef void (*FDS)(int16_t *d,
- const int16_t *a,
- const int16_t *b,
- int N);
+typedef void (*FDS)(int16_t *d, const int16_t *a, const int16_t *b, int N);
typedef libvpx_test::FuncParam<FDS> TestFuncsFDS;
class WedgeUtilsDeltaSquaresOptTest : public FunctionEquivalenceTest<FDS> {
@@ -377,13 +352,13 @@
DECLARE_ALIGNED(32, int16_t, d_ref[MAX_SB_SQUARE]);
DECLARE_ALIGNED(32, int16_t, d_tst[MAX_SB_SQUARE]);
- for (int iter = 0 ; iter < kIterations && !HasFatalFailure(); ++iter) {
- for (int i = 0 ; i < MAX_SB_SQUARE ; ++i) {
+ for (int iter = 0; iter < kIterations && !HasFatalFailure(); ++iter) {
+ for (int i = 0; i < MAX_SB_SQUARE; ++i) {
a[i] = rng_.Rand16();
b[i] = rng_(2 * INT16_MAX + 1) - INT16_MAX;
}
- const int N = 64 * (rng_(MAX_SB_SQUARE/64) + 1);
+ const int N = 64 * (rng_(MAX_SB_SQUARE / 64) + 1);
memset(&d_ref, INT16_MAX, sizeof(d_ref));
memset(&d_tst, INT16_MAX, sizeof(d_tst));
@@ -391,8 +366,7 @@
params_.ref_func(d_ref, a, b, N);
ASM_REGISTER_STATE_CHECK(params_.tst_func(d_tst, a, b, N));
- for (int i = 0 ; i < MAX_SB_SQUARE ; ++i)
- ASSERT_EQ(d_ref[i], d_tst[i]);
+ for (int i = 0; i < MAX_SB_SQUARE; ++i) ASSERT_EQ(d_ref[i], d_tst[i]);
}
}
@@ -400,9 +374,8 @@
INSTANTIATE_TEST_CASE_P(
SSE2, WedgeUtilsDeltaSquaresOptTest,
- ::testing::Values(
- TestFuncsFDS(vp10_wedge_compute_delta_squares_c,
- vp10_wedge_compute_delta_squares_sse2)));
+ ::testing::Values(TestFuncsFDS(vp10_wedge_compute_delta_squares_c,
+ vp10_wedge_compute_delta_squares_sse2)));
#endif // HAVE_SSE2
diff --git a/test/webm_video_source.h b/test/webm_video_source.h
index 8258756..5371361 100644
--- a/test/webm_video_source.h
+++ b/test/webm_video_source.h
@@ -25,30 +25,23 @@
class WebMVideoSource : public CompressedVideoSource {
public:
explicit WebMVideoSource(const std::string &file_name)
- : file_name_(file_name),
- vpx_ctx_(new VpxInputContext()),
- webm_ctx_(new WebmInputContext()),
- buf_(NULL),
- buf_sz_(0),
- frame_(0),
- end_of_file_(false) {
- }
+ : file_name_(file_name), vpx_ctx_(new VpxInputContext()),
+ webm_ctx_(new WebmInputContext()), buf_(NULL), buf_sz_(0), frame_(0),
+ end_of_file_(false) {}
virtual ~WebMVideoSource() {
- if (vpx_ctx_->file != NULL)
- fclose(vpx_ctx_->file);
+ if (vpx_ctx_->file != NULL) fclose(vpx_ctx_->file);
webm_free(webm_ctx_);
delete vpx_ctx_;
delete webm_ctx_;
}
- virtual void Init() {
- }
+ virtual void Init() {}
virtual void Begin() {
vpx_ctx_->file = OpenTestDataFile(file_name_);
ASSERT_TRUE(vpx_ctx_->file != NULL) << "Input file open failed. Filename: "
- << file_name_;
+ << file_name_;
ASSERT_EQ(file_is_webm(webm_ctx_, vpx_ctx_), 1) << "file is not WebM";
@@ -81,9 +74,7 @@
} while (!webm_ctx_->is_key_frame && !end_of_file_);
}
- virtual const uint8_t *cxdata() const {
- return end_of_file_ ? NULL : buf_;
- }
+ virtual const uint8_t *cxdata() const { return end_of_file_ ? NULL : buf_; }
virtual size_t frame_size() const { return buf_sz_; }
virtual unsigned int frame_number() const { return frame_; }
diff --git a/test/y4m_test.cc b/test/y4m_test.cc
index a555329..000bab6 100644
--- a/test/y4m_test.cc
+++ b/test/y4m_test.cc
@@ -22,7 +22,7 @@
using std::string;
-static const unsigned int kWidth = 160;
+static const unsigned int kWidth = 160;
static const unsigned int kHeight = 90;
static const unsigned int kFrames = 10;
@@ -34,24 +34,24 @@
};
const Y4mTestParam kY4mTestVectors[] = {
- {"park_joy_90p_8_420.y4m", 8, VPX_IMG_FMT_I420,
- "e5406275b9fc6bb3436c31d4a05c1cab"},
- {"park_joy_90p_8_422.y4m", 8, VPX_IMG_FMT_I422,
- "284a47a47133b12884ec3a14e959a0b6"},
- {"park_joy_90p_8_444.y4m", 8, VPX_IMG_FMT_I444,
- "90517ff33843d85de712fd4fe60dbed0"},
- {"park_joy_90p_10_420.y4m", 10, VPX_IMG_FMT_I42016,
- "63f21f9f717d8b8631bd2288ee87137b"},
- {"park_joy_90p_10_422.y4m", 10, VPX_IMG_FMT_I42216,
- "48ab51fb540aed07f7ff5af130c9b605"},
- {"park_joy_90p_10_444.y4m", 10, VPX_IMG_FMT_I44416,
- "067bfd75aa85ff9bae91fa3e0edd1e3e"},
- {"park_joy_90p_12_420.y4m", 12, VPX_IMG_FMT_I42016,
- "9e6d8f6508c6e55625f6b697bc461cef"},
- {"park_joy_90p_12_422.y4m", 12, VPX_IMG_FMT_I42216,
- "b239c6b301c0b835485be349ca83a7e3"},
- {"park_joy_90p_12_444.y4m", 12, VPX_IMG_FMT_I44416,
- "5a6481a550821dab6d0192f5c63845e9"},
+ { "park_joy_90p_8_420.y4m", 8, VPX_IMG_FMT_I420,
+ "e5406275b9fc6bb3436c31d4a05c1cab" },
+ { "park_joy_90p_8_422.y4m", 8, VPX_IMG_FMT_I422,
+ "284a47a47133b12884ec3a14e959a0b6" },
+ { "park_joy_90p_8_444.y4m", 8, VPX_IMG_FMT_I444,
+ "90517ff33843d85de712fd4fe60dbed0" },
+ { "park_joy_90p_10_420.y4m", 10, VPX_IMG_FMT_I42016,
+ "63f21f9f717d8b8631bd2288ee87137b" },
+ { "park_joy_90p_10_422.y4m", 10, VPX_IMG_FMT_I42216,
+ "48ab51fb540aed07f7ff5af130c9b605" },
+ { "park_joy_90p_10_444.y4m", 10, VPX_IMG_FMT_I44416,
+ "067bfd75aa85ff9bae91fa3e0edd1e3e" },
+ { "park_joy_90p_12_420.y4m", 12, VPX_IMG_FMT_I42016,
+ "9e6d8f6508c6e55625f6b697bc461cef" },
+ { "park_joy_90p_12_422.y4m", 12, VPX_IMG_FMT_I42216,
+ "b239c6b301c0b835485be349ca83a7e3" },
+ { "park_joy_90p_12_444.y4m", 12, VPX_IMG_FMT_I44416,
+ "5a6481a550821dab6d0192f5c63845e9" },
};
static void write_image_file(const vpx_image_t *img, FILE *file) {
@@ -60,10 +60,12 @@
const unsigned char *buf = img->planes[plane];
const int stride = img->stride[plane];
const int bytes_per_sample = (img->fmt & VPX_IMG_FMT_HIGHBITDEPTH) ? 2 : 1;
- const int h = (plane ? (img->d_h + img->y_chroma_shift) >>
- img->y_chroma_shift : img->d_h);
- const int w = (plane ? (img->d_w + img->x_chroma_shift) >>
- img->x_chroma_shift : img->d_w);
+ const int h =
+ (plane ? (img->d_h + img->y_chroma_shift) >> img->y_chroma_shift
+ : img->d_h);
+ const int w =
+ (plane ? (img->d_w + img->x_chroma_shift) >> img->x_chroma_shift
+ : img->d_w);
for (y = 0; y < h; ++y) {
fwrite(buf, bytes_per_sample, w, file);
buf += stride;
@@ -71,15 +73,12 @@
}
}
-class Y4mVideoSourceTest
- : public ::testing::TestWithParam<Y4mTestParam>,
- public ::libvpx_test::Y4mVideoSource {
+class Y4mVideoSourceTest : public ::testing::TestWithParam<Y4mTestParam>,
+ public ::libvpx_test::Y4mVideoSource {
protected:
Y4mVideoSourceTest() : Y4mVideoSource("", 0, 0) {}
- virtual ~Y4mVideoSourceTest() {
- CloseSource();
- }
+ virtual ~Y4mVideoSourceTest() { CloseSource(); }
virtual void Init(const std::string &file_name, int limit) {
file_name_ = file_name;
@@ -137,8 +136,7 @@
INSTANTIATE_TEST_CASE_P(C, Y4mVideoSourceTest,
::testing::ValuesIn(kY4mTestVectors));
-class Y4mVideoWriteTest
- : public Y4mVideoSourceTest {
+class Y4mVideoWriteTest : public Y4mVideoSourceTest {
protected:
Y4mVideoWriteTest() {}
@@ -158,14 +156,12 @@
// Writes out a y4m file and then reads it back
void WriteY4mAndReadBack() {
ASSERT_TRUE(input_file_ != NULL);
- char buf[Y4M_BUFFER_SIZE] = {0};
- const struct VpxRational framerate = {y4m_.fps_n, y4m_.fps_d};
+ char buf[Y4M_BUFFER_SIZE] = { 0 };
+ const struct VpxRational framerate = { y4m_.fps_n, y4m_.fps_d };
tmpfile_ = new libvpx_test::TempOutFile;
ASSERT_TRUE(tmpfile_->file() != NULL);
- y4m_write_file_header(buf, sizeof(buf),
- kWidth, kHeight,
- &framerate, y4m_.vpx_fmt,
- y4m_.bit_depth);
+ y4m_write_file_header(buf, sizeof(buf), kWidth, kHeight, &framerate,
+ y4m_.vpx_fmt, y4m_.bit_depth);
fputs(buf, tmpfile_->file());
for (unsigned int i = start_; i < limit_; i++) {
y4m_write_frame_header(buf, sizeof(buf));
diff --git a/test/y4m_video_source.h b/test/y4m_video_source.h
index 03d9388..2682ddd 100644
--- a/test/y4m_video_source.h
+++ b/test/y4m_video_source.h
@@ -21,18 +21,10 @@
// so that we can do actual file encodes.
class Y4mVideoSource : public VideoSource {
public:
- Y4mVideoSource(const std::string &file_name,
- unsigned int start, int limit)
- : file_name_(file_name),
- input_file_(NULL),
- img_(new vpx_image_t()),
- start_(start),
- limit_(limit),
- frame_(0),
- framerate_numerator_(0),
- framerate_denominator_(0),
- y4m_() {
- }
+ Y4mVideoSource(const std::string &file_name, unsigned int start, int limit)
+ : file_name_(file_name), input_file_(NULL), img_(new vpx_image_t()),
+ start_(start), limit_(limit), frame_(0), framerate_numerator_(0),
+ framerate_denominator_(0), y4m_() {}
virtual ~Y4mVideoSource() {
vpx_img_free(img_.get());
diff --git a/test/yuv_video_source.h b/test/yuv_video_source.h
index 3c852b2..2cc81a0 100644
--- a/test/yuv_video_source.h
+++ b/test/yuv_video_source.h
@@ -25,19 +25,11 @@
class YUVVideoSource : public VideoSource {
public:
YUVVideoSource(const std::string &file_name, vpx_img_fmt format,
- unsigned int width, unsigned int height,
- int rate_numerator, int rate_denominator,
- unsigned int start, int limit)
- : file_name_(file_name),
- input_file_(NULL),
- img_(NULL),
- start_(start),
- limit_(limit),
- frame_(0),
- width_(0),
- height_(0),
- format_(VPX_IMG_FMT_NONE),
- framerate_numerator_(rate_numerator),
+ unsigned int width, unsigned int height, int rate_numerator,
+ int rate_denominator, unsigned int start, int limit)
+ : file_name_(file_name), input_file_(NULL), img_(NULL), start_(start),
+ limit_(limit), frame_(0), width_(0), height_(0),
+ format_(VPX_IMG_FMT_NONE), framerate_numerator_(rate_numerator),
framerate_denominator_(rate_denominator) {
// This initializes format_, raw_size_, width_, height_ and allocates img.
SetSize(width, height, format);
@@ -45,13 +37,11 @@
virtual ~YUVVideoSource() {
vpx_img_free(img_);
- if (input_file_)
- fclose(input_file_);
+ if (input_file_) fclose(input_file_);
}
virtual void Begin() {
- if (input_file_)
- fclose(input_file_);
+ if (input_file_) fclose(input_file_);
input_file_ = OpenTestDataFile(file_name_);
ASSERT_TRUE(input_file_ != NULL) << "Input file open failed. Filename: "
<< file_name_;
@@ -67,7 +57,7 @@
FillFrame();
}
- virtual vpx_image_t *img() const { return (frame_ < limit_) ? img_ : NULL; }
+ virtual vpx_image_t *img() const { return (frame_ < limit_) ? img_ : NULL; }
// Models a stream where Timebase = 1/FPS, so pts == frame.
virtual vpx_codec_pts_t pts() const { return frame_; }
@@ -93,32 +83,15 @@
height_ = height;
format_ = format;
switch (format) {
- case VPX_IMG_FMT_I420:
- raw_size_ = width * height * 3 / 2;
- break;
- case VPX_IMG_FMT_I422:
- raw_size_ = width * height * 2;
- break;
- case VPX_IMG_FMT_I440:
- raw_size_ = width * height * 2;
- break;
- case VPX_IMG_FMT_I444:
- raw_size_ = width * height * 3;
- break;
- case VPX_IMG_FMT_I42016:
- raw_size_ = width * height * 3;
- break;
- case VPX_IMG_FMT_I42216:
- raw_size_ = width * height * 4;
- break;
- case VPX_IMG_FMT_I44016:
- raw_size_ = width * height * 4;
- break;
- case VPX_IMG_FMT_I44416:
- raw_size_ = width * height * 6;
- break;
- default:
- ASSERT_TRUE(0);
+ case VPX_IMG_FMT_I420: raw_size_ = width * height * 3 / 2; break;
+ case VPX_IMG_FMT_I422: raw_size_ = width * height * 2; break;
+ case VPX_IMG_FMT_I440: raw_size_ = width * height * 2; break;
+ case VPX_IMG_FMT_I444: raw_size_ = width * height * 3; break;
+ case VPX_IMG_FMT_I42016: raw_size_ = width * height * 3; break;
+ case VPX_IMG_FMT_I42216: raw_size_ = width * height * 4; break;
+ case VPX_IMG_FMT_I44016: raw_size_ = width * height * 4; break;
+ case VPX_IMG_FMT_I44416: raw_size_ = width * height * 6; break;
+ default: ASSERT_TRUE(0);
}
}
}