Rename aom_hadamard_8x8_dual() appropriately

The existing aom_hadamard_8x8_dual() is specific to
low-bitdepth path. Hence, this patch renames
aom_hadamard_8x8_dual() to aom_hadamard_lp_8x8_dual().

Change-Id: I1c6d2062d47af775c00987bfbe05feb85b64767c
diff --git a/aom_dsp/aom_dsp_rtcd_defs.pl b/aom_dsp/aom_dsp_rtcd_defs.pl
index 821f33c..4503c8e 100755
--- a/aom_dsp/aom_dsp_rtcd_defs.pl
+++ b/aom_dsp/aom_dsp_rtcd_defs.pl
@@ -1226,8 +1226,8 @@
   add_proto qw/void aom_hadamard_lp_16x16/, "const int16_t *src_diff, ptrdiff_t src_stride, int16_t *coeff";
   specialize qw/aom_hadamard_lp_16x16 sse2 avx2 neon/;
 
-  add_proto qw/void aom_hadamard_8x8_dual/, "const int16_t *src_diff, ptrdiff_t src_stride, int16_t *coeff";
-  specialize qw/aom_hadamard_8x8_dual sse2 avx2 neon/;
+  add_proto qw/void aom_hadamard_lp_8x8_dual/, "const int16_t *src_diff, ptrdiff_t src_stride, int16_t *coeff";
+  specialize qw/aom_hadamard_lp_8x8_dual sse2 avx2 neon/;
 
   add_proto qw/void aom_pixel_scale/, "const int16_t *src_diff, ptrdiff_t src_stride, int16_t *coeff, int log_scale, int h8, int w8";
   specialize qw/aom_pixel_scale sse2/;
diff --git a/aom_dsp/arm/hadamard_neon.c b/aom_dsp/arm/hadamard_neon.c
index 336f53f..939c9a6 100644
--- a/aom_dsp/arm/hadamard_neon.c
+++ b/aom_dsp/arm/hadamard_neon.c
@@ -104,8 +104,8 @@
   vst1q_s16(coeff + 56, a7);
 }
 
-void aom_hadamard_8x8_dual_neon(const int16_t *src_diff, ptrdiff_t src_stride,
-                                int16_t *coeff) {
+void aom_hadamard_lp_8x8_dual_neon(const int16_t *src_diff,
+                                   ptrdiff_t src_stride, int16_t *coeff) {
   for (int i = 0; i < 2; i++) {
     aom_hadamard_lp_8x8_neon(src_diff + (i * 8), src_stride, coeff + (i * 64));
   }
diff --git a/aom_dsp/avg.c b/aom_dsp/avg.c
index eb0f0ab..1e48bc1 100644
--- a/aom_dsp/avg.c
+++ b/aom_dsp/avg.c
@@ -228,8 +228,8 @@
   for (int idx = 0; idx < 64; ++idx) coeff[idx] = buffer2[idx];
 }
 
-void aom_hadamard_8x8_dual_c(const int16_t *src_diff, ptrdiff_t src_stride,
-                             int16_t *coeff) {
+void aom_hadamard_lp_8x8_dual_c(const int16_t *src_diff, ptrdiff_t src_stride,
+                                int16_t *coeff) {
   for (int i = 0; i < 2; i++) {
     aom_hadamard_lp_8x8_c(src_diff + (i * 8), src_stride,
                           (int16_t *)coeff + (i * 64));
diff --git a/aom_dsp/x86/avg_intrin_avx2.c b/aom_dsp/x86/avg_intrin_avx2.c
index cfadd1f..6c8db3a 100644
--- a/aom_dsp/x86/avg_intrin_avx2.c
+++ b/aom_dsp/x86/avg_intrin_avx2.c
@@ -92,8 +92,8 @@
   }
 }
 
-void aom_hadamard_8x8_dual_avx2(const int16_t *src_diff, ptrdiff_t src_stride,
-                                int16_t *coeff) {
+void aom_hadamard_lp_8x8_dual_avx2(const int16_t *src_diff,
+                                   ptrdiff_t src_stride, int16_t *coeff) {
   __m256i src[8];
   src[0] = _mm256_loadu_si256((const __m256i *)src_diff);
   src[1] = _mm256_loadu_si256((const __m256i *)(src_diff += src_stride));
@@ -141,7 +141,8 @@
   int idx;
   for (idx = 0; idx < 2; ++idx) {
     const int16_t *src_ptr = src_diff + idx * 8 * src_stride;
-    aom_hadamard_8x8_dual_avx2(src_ptr, src_stride, t_coeff + (idx * 64 * 2));
+    aom_hadamard_lp_8x8_dual_avx2(src_ptr, src_stride,
+                                  t_coeff + (idx * 64 * 2));
   }
 
   for (idx = 0; idx < 64; idx += 16) {
@@ -186,7 +187,8 @@
   int16_t *t_coeff = coeff;
   for (int idx = 0; idx < 2; ++idx) {
     const int16_t *src_ptr = src_diff + idx * 8 * src_stride;
-    aom_hadamard_8x8_dual_avx2(src_ptr, src_stride, t_coeff + (idx * 64 * 2));
+    aom_hadamard_lp_8x8_dual_avx2(src_ptr, src_stride,
+                                  t_coeff + (idx * 64 * 2));
   }
 
   for (int idx = 0; idx < 64; idx += 16) {
diff --git a/aom_dsp/x86/avg_intrin_sse2.c b/aom_dsp/x86/avg_intrin_sse2.c
index 5d79d50..bdbd1f6 100644
--- a/aom_dsp/x86/avg_intrin_sse2.c
+++ b/aom_dsp/x86/avg_intrin_sse2.c
@@ -411,8 +411,8 @@
   hadamard_lp_8x8_sse2(src_diff, src_stride, coeff);
 }
 
-void aom_hadamard_8x8_dual_sse2(const int16_t *src_diff, ptrdiff_t src_stride,
-                                int16_t *coeff) {
+void aom_hadamard_lp_8x8_dual_sse2(const int16_t *src_diff,
+                                   ptrdiff_t src_stride, int16_t *coeff) {
   for (int i = 0; i < 2; i++) {
     hadamard_lp_8x8_sse2(src_diff + (i * 8), src_stride, coeff + (i * 64));
   }
diff --git a/av1/encoder/nonrd_pickmode.c b/av1/encoder/nonrd_pickmode.c
index 768e888..1f13075 100644
--- a/av1/encoder/nonrd_pickmode.c
+++ b/av1/encoder/nonrd_pickmode.c
@@ -822,9 +822,11 @@
   rd_stats->dist = dist;
 }
 
-static INLINE void aom_process_hadamard_8x16(MACROBLOCK *x, int max_blocks_high,
-                                             int max_blocks_wide, int num_4x4_w,
-                                             int step, int block_step) {
+static INLINE void aom_process_hadamard_lp_8x16(MACROBLOCK *x,
+                                                int max_blocks_high,
+                                                int max_blocks_wide,
+                                                int num_4x4_w, int step,
+                                                int block_step) {
   struct macroblock_plane *const p = &x->plane[0];
   const int bw = 4 * num_4x4_w;
   const int num_4x4 = AOMMIN(num_4x4_w, max_blocks_wide);
@@ -834,7 +836,7 @@
     for (int c = 0; c < num_4x4; c += 2 * block_step) {
       const int16_t *src_diff = &p->src_diff[(r * bw + c) << 2];
       int16_t *low_coeff = (int16_t *)p->coeff + BLOCK_OFFSET(block);
-      aom_hadamard_8x8_dual(src_diff, (ptrdiff_t)bw, low_coeff);
+      aom_hadamard_lp_8x8_dual(src_diff, (ptrdiff_t)bw, low_coeff);
       block += 2 * step;
     }
   }
@@ -1016,8 +1018,8 @@
         (tx_size == TX_8X8 && block_size_wide[bsize] >= 16 &&
          block_size_high[bsize] >= 8);
     if (is_tx_8x8_dual_applicable) {
-      aom_process_hadamard_8x16(x, max_blocks_high, max_blocks_wide, num_4x4_w,
-                                step, block_step);
+      aom_process_hadamard_lp_8x16(x, max_blocks_high, max_blocks_wide,
+                                   num_4x4_w, step, block_step);
     }
 
     // Keep track of the row and column of the blocks we use so that we know
diff --git a/test/hadamard_test.cc b/test/hadamard_test.cc
index 0d02026..2b01cb8 100644
--- a/test/hadamard_test.cc
+++ b/test/hadamard_test.cc
@@ -385,24 +385,24 @@
 
 INSTANTIATE_TEST_SUITE_P(C, HadamardLowbdLP8x8DualTest,
                          ::testing::Values(HadamardLP8x8DualFuncWithSize(
-                             &aom_hadamard_8x8_dual_c, 8, 16)));
+                             &aom_hadamard_lp_8x8_dual_c, 8, 16)));
 
 #if HAVE_SSE2
 INSTANTIATE_TEST_SUITE_P(SSE2, HadamardLowbdLP8x8DualTest,
                          ::testing::Values(HadamardLP8x8DualFuncWithSize(
-                             &aom_hadamard_8x8_dual_sse2, 8, 16)));
+                             &aom_hadamard_lp_8x8_dual_sse2, 8, 16)));
 #endif  // HAVE_SSE2
 
 #if HAVE_AVX2
 INSTANTIATE_TEST_SUITE_P(AVX2, HadamardLowbdLP8x8DualTest,
                          ::testing::Values(HadamardLP8x8DualFuncWithSize(
-                             &aom_hadamard_8x8_dual_avx2, 8, 16)));
+                             &aom_hadamard_lp_8x8_dual_avx2, 8, 16)));
 #endif  // HAVE_AVX2
 
 #if HAVE_NEON
 INSTANTIATE_TEST_SUITE_P(NEON, HadamardLowbdLP8x8DualTest,
                          ::testing::Values(HadamardLP8x8DualFuncWithSize(
-                             &aom_hadamard_8x8_dual_neon, 8, 16)));
+                             &aom_hadamard_lp_8x8_dual_neon, 8, 16)));
 #endif  // HAVE_NEON
 
 }  // namespace