test/: apply clang-format

Change-Id: I1138fbeff5f63beb5c0de2c357793da12502d453
diff --git a/test/lpf_8_test.cc b/test/lpf_8_test.cc
index b14352c..8aaab23 100644
--- a/test/lpf_8_test.cc
+++ b/test/lpf_8_test.cc
@@ -36,8 +36,7 @@
 
 #if CONFIG_VP9_HIGHBITDEPTH
 typedef void (*loop_op_t)(uint16_t *s, int p, const uint8_t *blimit,
-                          const uint8_t *limit, const uint8_t *thresh,
-                          int bd);
+                          const uint8_t *limit, const uint8_t *thresh, int bd);
 typedef void (*dual_loop_op_t)(uint16_t *s, int p, const uint8_t *blimit0,
                                const uint8_t *limit0, const uint8_t *thresh0,
                                const uint8_t *blimit1, const uint8_t *limit1,
@@ -108,21 +107,18 @@
   for (int i = 0; i < count_test_block; ++i) {
     int err_count = 0;
     uint8_t tmp = static_cast<uint8_t>(rnd(3 * MAX_LOOP_FILTER + 4));
-    DECLARE_ALIGNED(16, const uint8_t, blimit[16]) = {
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
-    };
+    DECLARE_ALIGNED(16, const uint8_t,
+                    blimit[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
+                                    tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp };
     tmp = static_cast<uint8_t>(rnd(MAX_LOOP_FILTER));
-    DECLARE_ALIGNED(16, const uint8_t, limit[16])  = {
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
-    };
+    DECLARE_ALIGNED(16, const uint8_t,
+                    limit[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
+                                   tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp };
     tmp = rnd.Rand8();
-    DECLARE_ALIGNED(16, const uint8_t, thresh[16]) = {
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
-    };
-    int32_t p = kNumCoeffs/32;
+    DECLARE_ALIGNED(16, const uint8_t,
+                    thresh[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
+                                    tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp };
+    int32_t p = kNumCoeffs / 32;
 
     uint16_t tmp_s[kNumCoeffs];
     int j = 0;
@@ -158,7 +154,7 @@
     ASM_REGISTER_STATE_CHECK(
         loopfilter_op_(s + 8 + p * 8, p, blimit, limit, thresh, bd));
 #else
-    ref_loopfilter_op_(ref_s+8+p*8, p, blimit, limit, thresh);
+    ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit, limit, thresh);
     ASM_REGISTER_STATE_CHECK(
         loopfilter_op_(s + 8 + p * 8, p, blimit, limit, thresh));
 #endif  // CONFIG_VP9_HIGHBITDEPTH
@@ -206,20 +202,17 @@
   for (int i = 0; i < count_test_block; ++i) {
     int err_count = 0;
     uint8_t tmp = static_cast<uint8_t>(rnd(3 * MAX_LOOP_FILTER + 4));
-    DECLARE_ALIGNED(16, const uint8_t, blimit[16]) = {
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
-    };
+    DECLARE_ALIGNED(16, const uint8_t,
+                    blimit[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
+                                    tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp };
     tmp = static_cast<uint8_t>(rnd(MAX_LOOP_FILTER));
-    DECLARE_ALIGNED(16, const uint8_t, limit[16])  = {
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
-    };
+    DECLARE_ALIGNED(16, const uint8_t,
+                    limit[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
+                                   tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp };
     tmp = rnd.Rand8();
-    DECLARE_ALIGNED(16, const uint8_t, thresh[16]) = {
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
-    };
+    DECLARE_ALIGNED(16, const uint8_t,
+                    thresh[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
+                                    tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp };
     int32_t p = kNumCoeffs / 32;
     for (int j = 0; j < kNumCoeffs; ++j) {
       s[j] = rnd.Rand16() & mask_;
@@ -230,7 +223,7 @@
     ASM_REGISTER_STATE_CHECK(
         loopfilter_op_(s + 8 + p * 8, p, blimit, limit, thresh, bd));
 #else
-    ref_loopfilter_op_(ref_s+8+p*8, p, blimit, limit, thresh);
+    ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit, limit, thresh);
     ASM_REGISTER_STATE_CHECK(
         loopfilter_op_(s + 8 + p * 8, p, blimit, limit, thresh));
 #endif  // CONFIG_VP9_HIGHBITDEPTH
@@ -256,43 +249,37 @@
   DECLARE_ALIGNED(16, uint16_t, s[kNumCoeffs]);
   DECLARE_ALIGNED(16, uint16_t, ref_s[kNumCoeffs]);
 #else
-  DECLARE_ALIGNED(8,  uint8_t,  s[kNumCoeffs]);
-  DECLARE_ALIGNED(8,  uint8_t,  ref_s[kNumCoeffs]);
+  DECLARE_ALIGNED(8, uint8_t, s[kNumCoeffs]);
+  DECLARE_ALIGNED(8, uint8_t, ref_s[kNumCoeffs]);
 #endif  // CONFIG_VP9_HIGHBITDEPTH
   int err_count_total = 0;
   int first_failure = -1;
   for (int i = 0; i < count_test_block; ++i) {
     int err_count = 0;
     uint8_t tmp = static_cast<uint8_t>(rnd(3 * MAX_LOOP_FILTER + 4));
-    DECLARE_ALIGNED(16, const uint8_t, blimit0[16]) = {
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
-    };
+    DECLARE_ALIGNED(16, const uint8_t,
+                    blimit0[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
+                                     tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp };
     tmp = static_cast<uint8_t>(rnd(MAX_LOOP_FILTER));
-    DECLARE_ALIGNED(16, const uint8_t, limit0[16])  = {
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
-    };
+    DECLARE_ALIGNED(16, const uint8_t,
+                    limit0[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
+                                    tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp };
     tmp = rnd.Rand8();
-    DECLARE_ALIGNED(16, const uint8_t, thresh0[16]) = {
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
-    };
+    DECLARE_ALIGNED(16, const uint8_t,
+                    thresh0[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
+                                     tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp };
     tmp = static_cast<uint8_t>(rnd(3 * MAX_LOOP_FILTER + 4));
-    DECLARE_ALIGNED(16, const uint8_t, blimit1[16]) = {
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
-    };
+    DECLARE_ALIGNED(16, const uint8_t,
+                    blimit1[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
+                                     tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp };
     tmp = static_cast<uint8_t>(rnd(MAX_LOOP_FILTER));
-    DECLARE_ALIGNED(16, const uint8_t, limit1[16])  = {
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
-    };
+    DECLARE_ALIGNED(16, const uint8_t,
+                    limit1[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
+                                    tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp };
     tmp = rnd.Rand8();
-    DECLARE_ALIGNED(16, const uint8_t, thresh1[16]) = {
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
-    };
+    DECLARE_ALIGNED(16, const uint8_t,
+                    thresh1[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
+                                     tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp };
     int32_t p = kNumCoeffs / 32;
     uint16_t tmp_s[kNumCoeffs];
     int j = 0;
@@ -325,17 +312,16 @@
       ref_s[j] = s[j];
     }
 #if CONFIG_VP9_HIGHBITDEPTH
-    ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit0, limit0, thresh0,
-                       blimit1, limit1, thresh1, bd);
-    ASM_REGISTER_STATE_CHECK(
-        loopfilter_op_(s + 8 + p * 8, p, blimit0, limit0, thresh0,
-                       blimit1, limit1, thresh1, bd));
+    ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit0, limit0, thresh0, blimit1,
+                       limit1, thresh1, bd);
+    ASM_REGISTER_STATE_CHECK(loopfilter_op_(s + 8 + p * 8, p, blimit0, limit0,
+                                            thresh0, blimit1, limit1, thresh1,
+                                            bd));
 #else
-    ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit0, limit0, thresh0,
-                       blimit1, limit1, thresh1);
-    ASM_REGISTER_STATE_CHECK(
-        loopfilter_op_(s + 8 + p * 8, p, blimit0, limit0, thresh0,
-                       blimit1, limit1, thresh1));
+    ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit0, limit0, thresh0, blimit1,
+                       limit1, thresh1);
+    ASM_REGISTER_STATE_CHECK(loopfilter_op_(s + 8 + p * 8, p, blimit0, limit0,
+                                            thresh0, blimit1, limit1, thresh1));
 #endif  // CONFIG_VP9_HIGHBITDEPTH
     for (int j = 0; j < kNumCoeffs; ++j) {
       err_count += ref_s[j] != s[j];
@@ -358,43 +344,37 @@
   DECLARE_ALIGNED(16, uint16_t, s[kNumCoeffs]);
   DECLARE_ALIGNED(16, uint16_t, ref_s[kNumCoeffs]);
 #else
-  DECLARE_ALIGNED(8,  uint8_t, s[kNumCoeffs]);
-  DECLARE_ALIGNED(8,  uint8_t, ref_s[kNumCoeffs]);
+  DECLARE_ALIGNED(8, uint8_t, s[kNumCoeffs]);
+  DECLARE_ALIGNED(8, uint8_t, ref_s[kNumCoeffs]);
 #endif  // CONFIG_VP9_HIGHBITDEPTH
   int err_count_total = 0;
   int first_failure = -1;
   for (int i = 0; i < count_test_block; ++i) {
     int err_count = 0;
     uint8_t tmp = static_cast<uint8_t>(rnd(3 * MAX_LOOP_FILTER + 4));
-    DECLARE_ALIGNED(16, const uint8_t, blimit0[16]) = {
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
-    };
+    DECLARE_ALIGNED(16, const uint8_t,
+                    blimit0[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
+                                     tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp };
     tmp = static_cast<uint8_t>(rnd(MAX_LOOP_FILTER));
-    DECLARE_ALIGNED(16, const uint8_t, limit0[16])  = {
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
-    };
+    DECLARE_ALIGNED(16, const uint8_t,
+                    limit0[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
+                                    tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp };
     tmp = rnd.Rand8();
-    DECLARE_ALIGNED(16, const uint8_t, thresh0[16]) = {
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
-    };
+    DECLARE_ALIGNED(16, const uint8_t,
+                    thresh0[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
+                                     tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp };
     tmp = static_cast<uint8_t>(rnd(3 * MAX_LOOP_FILTER + 4));
-    DECLARE_ALIGNED(16, const uint8_t, blimit1[16]) = {
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
-    };
+    DECLARE_ALIGNED(16, const uint8_t,
+                    blimit1[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
+                                     tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp };
     tmp = static_cast<uint8_t>(rnd(MAX_LOOP_FILTER));
-    DECLARE_ALIGNED(16, const uint8_t, limit1[16])  = {
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
-    };
+    DECLARE_ALIGNED(16, const uint8_t,
+                    limit1[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
+                                    tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp };
     tmp = rnd.Rand8();
-    DECLARE_ALIGNED(16, const uint8_t, thresh1[16]) = {
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
-        tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp
-    };
+    DECLARE_ALIGNED(16, const uint8_t,
+                    thresh1[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp,
+                                     tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp };
     int32_t p = kNumCoeffs / 32;  // TODO(pdlf) can we have non-square here?
     for (int j = 0; j < kNumCoeffs; ++j) {
       s[j] = rnd.Rand16() & mask_;
@@ -402,17 +382,16 @@
     }
 #if CONFIG_VP9_HIGHBITDEPTH
     const int32_t bd = bit_depth_;
-    ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit0, limit0, thresh0,
-                       blimit1, limit1, thresh1, bd);
-    ASM_REGISTER_STATE_CHECK(
-        loopfilter_op_(s + 8 + p * 8, p, blimit0, limit0,
-                       thresh0, blimit1, limit1, thresh1, bd));
+    ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit0, limit0, thresh0, blimit1,
+                       limit1, thresh1, bd);
+    ASM_REGISTER_STATE_CHECK(loopfilter_op_(s + 8 + p * 8, p, blimit0, limit0,
+                                            thresh0, blimit1, limit1, thresh1,
+                                            bd));
 #else
-    ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit0, limit0, thresh0,
-                       blimit1, limit1, thresh1);
-    ASM_REGISTER_STATE_CHECK(
-        loopfilter_op_(s + 8 + p * 8, p, blimit0, limit0, thresh0,
-                       blimit1, limit1, thresh1));
+    ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit0, limit0, thresh0, blimit1,
+                       limit1, thresh1);
+    ASM_REGISTER_STATE_CHECK(loopfilter_op_(s + 8 + p * 8, p, blimit0, limit0,
+                                            thresh0, blimit1, limit1, thresh1));
 #endif  // CONFIG_VP9_HIGHBITDEPTH
     for (int j = 0; j < kNumCoeffs; ++j) {
       err_count += ref_s[j] != s[j];
@@ -434,129 +413,120 @@
 #if CONFIG_VP9_HIGHBITDEPTH
 INSTANTIATE_TEST_CASE_P(
     SSE2, Loop8Test6Param,
-    ::testing::Values(
-        make_tuple(&vpx_highbd_lpf_horizontal_4_sse2,
-                   &vpx_highbd_lpf_horizontal_4_c, 8),
-        make_tuple(&vpx_highbd_lpf_vertical_4_sse2,
-                   &vpx_highbd_lpf_vertical_4_c, 8),
-        make_tuple(&vpx_highbd_lpf_horizontal_8_sse2,
-                   &vpx_highbd_lpf_horizontal_8_c, 8),
-        make_tuple(&vpx_highbd_lpf_horizontal_edge_8_sse2,
-                   &vpx_highbd_lpf_horizontal_edge_8_c, 8),
-        make_tuple(&vpx_highbd_lpf_horizontal_edge_16_sse2,
-                   &vpx_highbd_lpf_horizontal_edge_16_c, 8),
-        make_tuple(&vpx_highbd_lpf_vertical_8_sse2,
-                   &vpx_highbd_lpf_vertical_8_c, 8),
-        make_tuple(&vpx_highbd_lpf_vertical_16_sse2,
-                   &vpx_highbd_lpf_vertical_16_c, 8),
-        make_tuple(&vpx_highbd_lpf_horizontal_4_sse2,
-                   &vpx_highbd_lpf_horizontal_4_c, 10),
-        make_tuple(&vpx_highbd_lpf_vertical_4_sse2,
-                   &vpx_highbd_lpf_vertical_4_c, 10),
-        make_tuple(&vpx_highbd_lpf_horizontal_8_sse2,
-                   &vpx_highbd_lpf_horizontal_8_c, 10),
-        make_tuple(&vpx_highbd_lpf_horizontal_edge_8_sse2,
-                   &vpx_highbd_lpf_horizontal_edge_8_c, 10),
-        make_tuple(&vpx_highbd_lpf_horizontal_edge_16_sse2,
-                   &vpx_highbd_lpf_horizontal_edge_16_c, 10),
-        make_tuple(&vpx_highbd_lpf_vertical_8_sse2,
-                   &vpx_highbd_lpf_vertical_8_c, 10),
-        make_tuple(&vpx_highbd_lpf_vertical_16_sse2,
-                   &vpx_highbd_lpf_vertical_16_c, 10),
-        make_tuple(&vpx_highbd_lpf_horizontal_4_sse2,
-                   &vpx_highbd_lpf_horizontal_4_c, 12),
-        make_tuple(&vpx_highbd_lpf_vertical_4_sse2,
-                   &vpx_highbd_lpf_vertical_4_c, 12),
-        make_tuple(&vpx_highbd_lpf_horizontal_8_sse2,
-                   &vpx_highbd_lpf_horizontal_8_c, 12),
-        make_tuple(&vpx_highbd_lpf_horizontal_edge_8_sse2,
-                   &vpx_highbd_lpf_horizontal_edge_8_c, 12),
-        make_tuple(&vpx_highbd_lpf_horizontal_edge_16_sse2,
-                   &vpx_highbd_lpf_horizontal_edge_16_c, 12),
-        make_tuple(&vpx_highbd_lpf_vertical_8_sse2,
-                   &vpx_highbd_lpf_vertical_8_c, 12),
-        make_tuple(&vpx_highbd_lpf_vertical_16_sse2,
-                   &vpx_highbd_lpf_vertical_16_c, 12),
-        make_tuple(&vpx_highbd_lpf_vertical_16_dual_sse2,
-                   &vpx_highbd_lpf_vertical_16_dual_c, 8),
-        make_tuple(&vpx_highbd_lpf_vertical_16_dual_sse2,
-                   &vpx_highbd_lpf_vertical_16_dual_c, 10),
-        make_tuple(&vpx_highbd_lpf_vertical_16_dual_sse2,
-                   &vpx_highbd_lpf_vertical_16_dual_c, 12)));
+    ::testing::Values(make_tuple(&vpx_highbd_lpf_horizontal_4_sse2,
+                                 &vpx_highbd_lpf_horizontal_4_c, 8),
+                      make_tuple(&vpx_highbd_lpf_vertical_4_sse2,
+                                 &vpx_highbd_lpf_vertical_4_c, 8),
+                      make_tuple(&vpx_highbd_lpf_horizontal_8_sse2,
+                                 &vpx_highbd_lpf_horizontal_8_c, 8),
+                      make_tuple(&vpx_highbd_lpf_horizontal_edge_8_sse2,
+                                 &vpx_highbd_lpf_horizontal_edge_8_c, 8),
+                      make_tuple(&vpx_highbd_lpf_horizontal_edge_16_sse2,
+                                 &vpx_highbd_lpf_horizontal_edge_16_c, 8),
+                      make_tuple(&vpx_highbd_lpf_vertical_8_sse2,
+                                 &vpx_highbd_lpf_vertical_8_c, 8),
+                      make_tuple(&vpx_highbd_lpf_vertical_16_sse2,
+                                 &vpx_highbd_lpf_vertical_16_c, 8),
+                      make_tuple(&vpx_highbd_lpf_horizontal_4_sse2,
+                                 &vpx_highbd_lpf_horizontal_4_c, 10),
+                      make_tuple(&vpx_highbd_lpf_vertical_4_sse2,
+                                 &vpx_highbd_lpf_vertical_4_c, 10),
+                      make_tuple(&vpx_highbd_lpf_horizontal_8_sse2,
+                                 &vpx_highbd_lpf_horizontal_8_c, 10),
+                      make_tuple(&vpx_highbd_lpf_horizontal_edge_8_sse2,
+                                 &vpx_highbd_lpf_horizontal_edge_8_c, 10),
+                      make_tuple(&vpx_highbd_lpf_horizontal_edge_16_sse2,
+                                 &vpx_highbd_lpf_horizontal_edge_16_c, 10),
+                      make_tuple(&vpx_highbd_lpf_vertical_8_sse2,
+                                 &vpx_highbd_lpf_vertical_8_c, 10),
+                      make_tuple(&vpx_highbd_lpf_vertical_16_sse2,
+                                 &vpx_highbd_lpf_vertical_16_c, 10),
+                      make_tuple(&vpx_highbd_lpf_horizontal_4_sse2,
+                                 &vpx_highbd_lpf_horizontal_4_c, 12),
+                      make_tuple(&vpx_highbd_lpf_vertical_4_sse2,
+                                 &vpx_highbd_lpf_vertical_4_c, 12),
+                      make_tuple(&vpx_highbd_lpf_horizontal_8_sse2,
+                                 &vpx_highbd_lpf_horizontal_8_c, 12),
+                      make_tuple(&vpx_highbd_lpf_horizontal_edge_8_sse2,
+                                 &vpx_highbd_lpf_horizontal_edge_8_c, 12),
+                      make_tuple(&vpx_highbd_lpf_horizontal_edge_16_sse2,
+                                 &vpx_highbd_lpf_horizontal_edge_16_c, 12),
+                      make_tuple(&vpx_highbd_lpf_vertical_8_sse2,
+                                 &vpx_highbd_lpf_vertical_8_c, 12),
+                      make_tuple(&vpx_highbd_lpf_vertical_16_sse2,
+                                 &vpx_highbd_lpf_vertical_16_c, 12),
+                      make_tuple(&vpx_highbd_lpf_vertical_16_dual_sse2,
+                                 &vpx_highbd_lpf_vertical_16_dual_c, 8),
+                      make_tuple(&vpx_highbd_lpf_vertical_16_dual_sse2,
+                                 &vpx_highbd_lpf_vertical_16_dual_c, 10),
+                      make_tuple(&vpx_highbd_lpf_vertical_16_dual_sse2,
+                                 &vpx_highbd_lpf_vertical_16_dual_c, 12)));
 #else
 INSTANTIATE_TEST_CASE_P(
     SSE2, Loop8Test6Param,
     ::testing::Values(
-        make_tuple(&vpx_lpf_horizontal_4_sse2,
-                   &vpx_lpf_horizontal_4_c, 8),
-        make_tuple(&vpx_lpf_horizontal_8_sse2,
-                   &vpx_lpf_horizontal_8_c, 8),
+        make_tuple(&vpx_lpf_horizontal_4_sse2, &vpx_lpf_horizontal_4_c, 8),
+        make_tuple(&vpx_lpf_horizontal_8_sse2, &vpx_lpf_horizontal_8_c, 8),
         make_tuple(&vpx_lpf_horizontal_edge_8_sse2,
                    &vpx_lpf_horizontal_edge_8_c, 8),
         make_tuple(&vpx_lpf_horizontal_edge_16_sse2,
                    &vpx_lpf_horizontal_edge_16_c, 8),
-        make_tuple(&vpx_lpf_vertical_4_sse2,
-                   &vpx_lpf_vertical_4_c, 8),
-        make_tuple(&vpx_lpf_vertical_8_sse2,
-                   &vpx_lpf_vertical_8_c, 8),
-        make_tuple(&vpx_lpf_vertical_16_sse2,
-                   &vpx_lpf_vertical_16_c, 8),
-        make_tuple(&vpx_lpf_vertical_16_dual_sse2,
-                   &vpx_lpf_vertical_16_dual_c, 8)));
+        make_tuple(&vpx_lpf_vertical_4_sse2, &vpx_lpf_vertical_4_c, 8),
+        make_tuple(&vpx_lpf_vertical_8_sse2, &vpx_lpf_vertical_8_c, 8),
+        make_tuple(&vpx_lpf_vertical_16_sse2, &vpx_lpf_vertical_16_c, 8),
+        make_tuple(&vpx_lpf_vertical_16_dual_sse2, &vpx_lpf_vertical_16_dual_c,
+                   8)));
 #endif  // CONFIG_VP9_HIGHBITDEPTH
 #endif
 
 #if HAVE_AVX2 && (!CONFIG_VP9_HIGHBITDEPTH)
 INSTANTIATE_TEST_CASE_P(
     AVX2, Loop8Test6Param,
-    ::testing::Values(
-        make_tuple(&vpx_lpf_horizontal_edge_8_avx2,
-                   &vpx_lpf_horizontal_edge_8_c, 8),
-        make_tuple(&vpx_lpf_horizontal_edge_16_avx2,
-                   &vpx_lpf_horizontal_edge_16_c, 8)));
+    ::testing::Values(make_tuple(&vpx_lpf_horizontal_edge_8_avx2,
+                                 &vpx_lpf_horizontal_edge_8_c, 8),
+                      make_tuple(&vpx_lpf_horizontal_edge_16_avx2,
+                                 &vpx_lpf_horizontal_edge_16_c, 8)));
 #endif
 
 #if HAVE_SSE2
 #if CONFIG_VP9_HIGHBITDEPTH
 INSTANTIATE_TEST_CASE_P(
     SSE2, Loop8Test9Param,
-    ::testing::Values(
-        make_tuple(&vpx_highbd_lpf_horizontal_4_dual_sse2,
-                   &vpx_highbd_lpf_horizontal_4_dual_c, 8),
-        make_tuple(&vpx_highbd_lpf_horizontal_8_dual_sse2,
-                   &vpx_highbd_lpf_horizontal_8_dual_c, 8),
-        make_tuple(&vpx_highbd_lpf_vertical_4_dual_sse2,
-                   &vpx_highbd_lpf_vertical_4_dual_c, 8),
-        make_tuple(&vpx_highbd_lpf_vertical_8_dual_sse2,
-                   &vpx_highbd_lpf_vertical_8_dual_c, 8),
-        make_tuple(&vpx_highbd_lpf_horizontal_4_dual_sse2,
-                   &vpx_highbd_lpf_horizontal_4_dual_c, 10),
-        make_tuple(&vpx_highbd_lpf_horizontal_8_dual_sse2,
-                   &vpx_highbd_lpf_horizontal_8_dual_c, 10),
-        make_tuple(&vpx_highbd_lpf_vertical_4_dual_sse2,
-                   &vpx_highbd_lpf_vertical_4_dual_c, 10),
-        make_tuple(&vpx_highbd_lpf_vertical_8_dual_sse2,
-                   &vpx_highbd_lpf_vertical_8_dual_c, 10),
-        make_tuple(&vpx_highbd_lpf_horizontal_4_dual_sse2,
-                   &vpx_highbd_lpf_horizontal_4_dual_c, 12),
-        make_tuple(&vpx_highbd_lpf_horizontal_8_dual_sse2,
-                   &vpx_highbd_lpf_horizontal_8_dual_c, 12),
-        make_tuple(&vpx_highbd_lpf_vertical_4_dual_sse2,
-                   &vpx_highbd_lpf_vertical_4_dual_c, 12),
-        make_tuple(&vpx_highbd_lpf_vertical_8_dual_sse2,
-                   &vpx_highbd_lpf_vertical_8_dual_c, 12)));
+    ::testing::Values(make_tuple(&vpx_highbd_lpf_horizontal_4_dual_sse2,
+                                 &vpx_highbd_lpf_horizontal_4_dual_c, 8),
+                      make_tuple(&vpx_highbd_lpf_horizontal_8_dual_sse2,
+                                 &vpx_highbd_lpf_horizontal_8_dual_c, 8),
+                      make_tuple(&vpx_highbd_lpf_vertical_4_dual_sse2,
+                                 &vpx_highbd_lpf_vertical_4_dual_c, 8),
+                      make_tuple(&vpx_highbd_lpf_vertical_8_dual_sse2,
+                                 &vpx_highbd_lpf_vertical_8_dual_c, 8),
+                      make_tuple(&vpx_highbd_lpf_horizontal_4_dual_sse2,
+                                 &vpx_highbd_lpf_horizontal_4_dual_c, 10),
+                      make_tuple(&vpx_highbd_lpf_horizontal_8_dual_sse2,
+                                 &vpx_highbd_lpf_horizontal_8_dual_c, 10),
+                      make_tuple(&vpx_highbd_lpf_vertical_4_dual_sse2,
+                                 &vpx_highbd_lpf_vertical_4_dual_c, 10),
+                      make_tuple(&vpx_highbd_lpf_vertical_8_dual_sse2,
+                                 &vpx_highbd_lpf_vertical_8_dual_c, 10),
+                      make_tuple(&vpx_highbd_lpf_horizontal_4_dual_sse2,
+                                 &vpx_highbd_lpf_horizontal_4_dual_c, 12),
+                      make_tuple(&vpx_highbd_lpf_horizontal_8_dual_sse2,
+                                 &vpx_highbd_lpf_horizontal_8_dual_c, 12),
+                      make_tuple(&vpx_highbd_lpf_vertical_4_dual_sse2,
+                                 &vpx_highbd_lpf_vertical_4_dual_c, 12),
+                      make_tuple(&vpx_highbd_lpf_vertical_8_dual_sse2,
+                                 &vpx_highbd_lpf_vertical_8_dual_c, 12)));
 #else
 INSTANTIATE_TEST_CASE_P(
     SSE2, Loop8Test9Param,
-    ::testing::Values(
-        make_tuple(&vpx_lpf_horizontal_4_dual_sse2,
-                   &vpx_lpf_horizontal_4_dual_c, 8),
-        make_tuple(&vpx_lpf_horizontal_8_dual_sse2,
-                   &vpx_lpf_horizontal_8_dual_c, 8),
-        make_tuple(&vpx_lpf_vertical_4_dual_sse2,
-                   &vpx_lpf_vertical_4_dual_c, 8),
-        make_tuple(&vpx_lpf_vertical_8_dual_sse2,
-                   &vpx_lpf_vertical_8_dual_c, 8)));
+    ::testing::Values(make_tuple(&vpx_lpf_horizontal_4_dual_sse2,
+                                 &vpx_lpf_horizontal_4_dual_c, 8),
+                      make_tuple(&vpx_lpf_horizontal_8_dual_sse2,
+                                 &vpx_lpf_horizontal_8_dual_c, 8),
+                      make_tuple(&vpx_lpf_vertical_4_dual_sse2,
+                                 &vpx_lpf_vertical_4_dual_c, 8),
+                      make_tuple(&vpx_lpf_vertical_8_dual_sse2,
+                                 &vpx_lpf_vertical_8_dual_c, 8)));
 #endif  // CONFIG_VP9_HIGHBITDEPTH
 #endif
 
@@ -568,38 +538,33 @@
     NEON, Loop8Test6Param,
     ::testing::Values(
 #if HAVE_NEON_ASM
-// Using #if inside the macro is unsupported on MSVS but the tests are not
-// currently built for MSVS with ARM and NEON.
+        // Using #if inside the macro is unsupported on MSVS but the tests are
+        // not
+        // currently built for MSVS with ARM and NEON.
         make_tuple(&vpx_lpf_horizontal_edge_8_neon,
                    &vpx_lpf_horizontal_edge_8_c, 8),
         make_tuple(&vpx_lpf_horizontal_edge_16_neon,
                    &vpx_lpf_horizontal_edge_16_c, 8),
-        make_tuple(&vpx_lpf_vertical_16_neon,
-                   &vpx_lpf_vertical_16_c, 8),
-        make_tuple(&vpx_lpf_vertical_16_dual_neon,
-                   &vpx_lpf_vertical_16_dual_c, 8),
+        make_tuple(&vpx_lpf_vertical_16_neon, &vpx_lpf_vertical_16_c, 8),
+        make_tuple(&vpx_lpf_vertical_16_dual_neon, &vpx_lpf_vertical_16_dual_c,
+                   8),
 #endif  // HAVE_NEON_ASM
-        make_tuple(&vpx_lpf_horizontal_8_neon,
-                   &vpx_lpf_horizontal_8_c, 8),
-        make_tuple(&vpx_lpf_vertical_8_neon,
-                   &vpx_lpf_vertical_8_c, 8),
-        make_tuple(&vpx_lpf_horizontal_4_neon,
-                   &vpx_lpf_horizontal_4_c, 8),
-        make_tuple(&vpx_lpf_vertical_4_neon,
-                   &vpx_lpf_vertical_4_c, 8)));
-INSTANTIATE_TEST_CASE_P(
-    NEON, Loop8Test9Param,
-    ::testing::Values(
+        make_tuple(&vpx_lpf_horizontal_8_neon, &vpx_lpf_horizontal_8_c, 8),
+        make_tuple(&vpx_lpf_vertical_8_neon, &vpx_lpf_vertical_8_c, 8),
+        make_tuple(&vpx_lpf_horizontal_4_neon, &vpx_lpf_horizontal_4_c, 8),
+        make_tuple(&vpx_lpf_vertical_4_neon, &vpx_lpf_vertical_4_c, 8)));
+INSTANTIATE_TEST_CASE_P(NEON, Loop8Test9Param,
+                        ::testing::Values(
 #if HAVE_NEON_ASM
-        make_tuple(&vpx_lpf_horizontal_8_dual_neon,
-                   &vpx_lpf_horizontal_8_dual_c, 8),
-        make_tuple(&vpx_lpf_vertical_8_dual_neon,
-                   &vpx_lpf_vertical_8_dual_c, 8),
+                            make_tuple(&vpx_lpf_horizontal_8_dual_neon,
+                                       &vpx_lpf_horizontal_8_dual_c, 8),
+                            make_tuple(&vpx_lpf_vertical_8_dual_neon,
+                                       &vpx_lpf_vertical_8_dual_c, 8),
 #endif  // HAVE_NEON_ASM
-        make_tuple(&vpx_lpf_horizontal_4_dual_neon,
-                   &vpx_lpf_horizontal_4_dual_c, 8),
-        make_tuple(&vpx_lpf_vertical_4_dual_neon,
-                   &vpx_lpf_vertical_4_dual_c, 8)));
+                            make_tuple(&vpx_lpf_horizontal_4_dual_neon,
+                                       &vpx_lpf_horizontal_4_dual_c, 8),
+                            make_tuple(&vpx_lpf_vertical_4_dual_neon,
+                                       &vpx_lpf_vertical_4_dual_c, 8)));
 #endif  // CONFIG_VP9_HIGHBITDEPTH
 #endif  // HAVE_NEON
 
@@ -607,66 +572,52 @@
 INSTANTIATE_TEST_CASE_P(
     DSPR2, Loop8Test6Param,
     ::testing::Values(
-        make_tuple(&vpx_lpf_horizontal_4_dspr2,
-                   &vpx_lpf_horizontal_4_c, 8),
-        make_tuple(&vpx_lpf_horizontal_8_dspr2,
-                   &vpx_lpf_horizontal_8_c, 8),
-        make_tuple(&vpx_lpf_horizontal_edge_8,
-                   &vpx_lpf_horizontal_edge_8, 8),
-        make_tuple(&vpx_lpf_horizontal_edge_16,
-                   &vpx_lpf_horizontal_edge_16, 8),
-        make_tuple(&vpx_lpf_vertical_4_dspr2,
-                   &vpx_lpf_vertical_4_c, 8),
-        make_tuple(&vpx_lpf_vertical_8_dspr2,
-                   &vpx_lpf_vertical_8_c, 8),
-        make_tuple(&vpx_lpf_vertical_16_dspr2,
-                   &vpx_lpf_vertical_16_c, 8),
-        make_tuple(&vpx_lpf_vertical_16_dual_dspr2,
-                   &vpx_lpf_vertical_16_dual_c, 8)));
+        make_tuple(&vpx_lpf_horizontal_4_dspr2, &vpx_lpf_horizontal_4_c, 8),
+        make_tuple(&vpx_lpf_horizontal_8_dspr2, &vpx_lpf_horizontal_8_c, 8),
+        make_tuple(&vpx_lpf_horizontal_edge_8, &vpx_lpf_horizontal_edge_8, 8),
+        make_tuple(&vpx_lpf_horizontal_edge_16, &vpx_lpf_horizontal_edge_16, 8),
+        make_tuple(&vpx_lpf_vertical_4_dspr2, &vpx_lpf_vertical_4_c, 8),
+        make_tuple(&vpx_lpf_vertical_8_dspr2, &vpx_lpf_vertical_8_c, 8),
+        make_tuple(&vpx_lpf_vertical_16_dspr2, &vpx_lpf_vertical_16_c, 8),
+        make_tuple(&vpx_lpf_vertical_16_dual_dspr2, &vpx_lpf_vertical_16_dual_c,
+                   8)));
 
 INSTANTIATE_TEST_CASE_P(
     DSPR2, Loop8Test9Param,
-    ::testing::Values(
-        make_tuple(&vpx_lpf_horizontal_4_dual_dspr2,
-                   &vpx_lpf_horizontal_4_dual_c, 8),
-        make_tuple(&vpx_lpf_horizontal_8_dual_dspr2,
-                   &vpx_lpf_horizontal_8_dual_c, 8),
-        make_tuple(&vpx_lpf_vertical_4_dual_dspr2,
-                   &vpx_lpf_vertical_4_dual_c, 8),
-        make_tuple(&vpx_lpf_vertical_8_dual_dspr2,
-                   &vpx_lpf_vertical_8_dual_c, 8)));
+    ::testing::Values(make_tuple(&vpx_lpf_horizontal_4_dual_dspr2,
+                                 &vpx_lpf_horizontal_4_dual_c, 8),
+                      make_tuple(&vpx_lpf_horizontal_8_dual_dspr2,
+                                 &vpx_lpf_horizontal_8_dual_c, 8),
+                      make_tuple(&vpx_lpf_vertical_4_dual_dspr2,
+                                 &vpx_lpf_vertical_4_dual_c, 8),
+                      make_tuple(&vpx_lpf_vertical_8_dual_dspr2,
+                                 &vpx_lpf_vertical_8_dual_c, 8)));
 #endif  // HAVE_DSPR2 && !CONFIG_VP9_HIGHBITDEPTH
 
 #if HAVE_MSA && (!CONFIG_VP9_HIGHBITDEPTH)
 INSTANTIATE_TEST_CASE_P(
     MSA, Loop8Test6Param,
     ::testing::Values(
-        make_tuple(&vpx_lpf_horizontal_4_msa,
-                   &vpx_lpf_horizontal_4_c, 8),
-        make_tuple(&vpx_lpf_horizontal_8_msa,
-                   &vpx_lpf_horizontal_8_c, 8),
-        make_tuple(&vpx_lpf_horizontal_edge_8_msa,
-                   &vpx_lpf_horizontal_edge_8_c, 8),
+        make_tuple(&vpx_lpf_horizontal_4_msa, &vpx_lpf_horizontal_4_c, 8),
+        make_tuple(&vpx_lpf_horizontal_8_msa, &vpx_lpf_horizontal_8_c, 8),
+        make_tuple(&vpx_lpf_horizontal_edge_8_msa, &vpx_lpf_horizontal_edge_8_c,
+                   8),
         make_tuple(&vpx_lpf_horizontal_edge_16_msa,
                    &vpx_lpf_horizontal_edge_16_c, 8),
-        make_tuple(&vpx_lpf_vertical_4_msa,
-                   &vpx_lpf_vertical_4_c, 8),
-        make_tuple(&vpx_lpf_vertical_8_msa,
-                   &vpx_lpf_vertical_8_c, 8),
-        make_tuple(&vpx_lpf_vertical_16_msa,
-                   &vpx_lpf_vertical_16_c, 8)));
+        make_tuple(&vpx_lpf_vertical_4_msa, &vpx_lpf_vertical_4_c, 8),
+        make_tuple(&vpx_lpf_vertical_8_msa, &vpx_lpf_vertical_8_c, 8),
+        make_tuple(&vpx_lpf_vertical_16_msa, &vpx_lpf_vertical_16_c, 8)));
 
 INSTANTIATE_TEST_CASE_P(
     MSA, Loop8Test9Param,
-    ::testing::Values(
-        make_tuple(&vpx_lpf_horizontal_4_dual_msa,
-                   &vpx_lpf_horizontal_4_dual_c, 8),
-        make_tuple(&vpx_lpf_horizontal_8_dual_msa,
-                   &vpx_lpf_horizontal_8_dual_c, 8),
-        make_tuple(&vpx_lpf_vertical_4_dual_msa,
-                   &vpx_lpf_vertical_4_dual_c, 8),
-        make_tuple(&vpx_lpf_vertical_8_dual_msa,
-                   &vpx_lpf_vertical_8_dual_c, 8)));
+    ::testing::Values(make_tuple(&vpx_lpf_horizontal_4_dual_msa,
+                                 &vpx_lpf_horizontal_4_dual_c, 8),
+                      make_tuple(&vpx_lpf_horizontal_8_dual_msa,
+                                 &vpx_lpf_horizontal_8_dual_c, 8),
+                      make_tuple(&vpx_lpf_vertical_4_dual_msa,
+                                 &vpx_lpf_vertical_4_dual_c, 8),
+                      make_tuple(&vpx_lpf_vertical_8_dual_msa,
+                                 &vpx_lpf_vertical_8_dual_c, 8)));
 #endif  // HAVE_MSA && (!CONFIG_VP9_HIGHBITDEPTH)
 
 }  // namespace