Prefix cfl functions with cfl_ BUG=aomedia:1540 Change-Id: I53fac5abf5556f574668724b05d83a675a9e56d8
diff --git a/av1/common/av1_rtcd_defs.pl b/av1/common/av1_rtcd_defs.pl index 3057558..7f2ae9e 100644 --- a/av1/common/av1_rtcd_defs.pl +++ b/av1/common/av1_rtcd_defs.pl
@@ -408,8 +408,8 @@ specialize qw/av1_upsample_intra_edge_high sse4_1/; # CFL -add_proto qw/cfl_subtract_average_fn get_subtract_average_fn/, "TX_SIZE tx_size"; -specialize qw/get_subtract_average_fn sse2 avx2 neon vsx/; +add_proto qw/cfl_subtract_average_fn cfl_get_subtract_average_fn/, "TX_SIZE tx_size"; +specialize qw/cfl_get_subtract_average_fn sse2 avx2 neon vsx/; add_proto qw/cfl_subsample_lbd_fn cfl_get_luma_subsampling_420_lbd/, "TX_SIZE tx_size"; specialize qw/cfl_get_luma_subsampling_420_lbd ssse3 avx2 neon/; @@ -429,10 +429,10 @@ add_proto qw/cfl_subsample_hbd_fn cfl_get_luma_subsampling_444_hbd/, "TX_SIZE tx_size"; specialize qw/cfl_get_luma_subsampling_444_hbd ssse3 avx2 neon/; -add_proto qw/cfl_predict_lbd_fn get_predict_lbd_fn/, "TX_SIZE tx_size"; -specialize qw/get_predict_lbd_fn ssse3 avx2 neon/; +add_proto qw/cfl_predict_lbd_fn cfl_get_predict_lbd_fn/, "TX_SIZE tx_size"; +specialize qw/cfl_get_predict_lbd_fn ssse3 avx2 neon/; -add_proto qw/cfl_predict_hbd_fn get_predict_hbd_fn/, "TX_SIZE tx_size"; -specialize qw/get_predict_hbd_fn ssse3 avx2 neon/; +add_proto qw/cfl_predict_hbd_fn cfl_get_predict_hbd_fn/, "TX_SIZE tx_size"; +specialize qw/cfl_get_predict_hbd_fn ssse3 avx2 neon/; 1;
diff --git a/av1/common/cfl.c b/av1/common/cfl.c index 65e18e8..f65e002 100644 --- a/av1/common/cfl.c +++ b/av1/common/cfl.c
@@ -180,7 +180,7 @@ assert(cfl->are_parameters_computed == 0); cfl_pad(cfl, tx_size_wide[tx_size], tx_size_high[tx_size]); - get_subtract_average_fn(tx_size)(cfl->recon_buf_q3, cfl->ac_buf_q3); + cfl_get_subtract_average_fn(tx_size)(cfl->recon_buf_q3, cfl->ac_buf_q3); cfl->are_parameters_computed = 1; } @@ -198,11 +198,11 @@ CFL_BUF_SQUARE); if (is_cur_buf_hbd(xd)) { uint16_t *dst_16 = CONVERT_TO_SHORTPTR(dst); - get_predict_hbd_fn(tx_size)(cfl->ac_buf_q3, dst_16, dst_stride, alpha_q3, - xd->bd); + cfl_get_predict_hbd_fn(tx_size)(cfl->ac_buf_q3, dst_16, dst_stride, + alpha_q3, xd->bd); return; } - get_predict_lbd_fn(tx_size)(cfl->ac_buf_q3, dst, dst_stride, alpha_q3); + cfl_get_predict_lbd_fn(tx_size)(cfl->ac_buf_q3, dst, dst_stride, alpha_q3); } static void cfl_luma_subsampling_420_lbd_c(const uint8_t *input,
diff --git a/av1/common/cfl.h b/av1/common/cfl.h index 3b91d85..052bf88 100644 --- a/av1/common/cfl.h +++ b/av1/common/cfl.h
@@ -164,46 +164,47 @@ } // Declare size-specific wrappers for all valid CfL sizes. -#define CFL_SUB_AVG_FN(arch) \ - CFL_SUB_AVG_X(arch, 4, 4, 8, 4) \ - CFL_SUB_AVG_X(arch, 4, 8, 16, 5) \ - CFL_SUB_AVG_X(arch, 4, 16, 32, 6) \ - CFL_SUB_AVG_X(arch, 8, 4, 16, 5) \ - CFL_SUB_AVG_X(arch, 8, 8, 32, 6) \ - CFL_SUB_AVG_X(arch, 8, 16, 64, 7) \ - CFL_SUB_AVG_X(arch, 8, 32, 128, 8) \ - CFL_SUB_AVG_X(arch, 16, 4, 32, 6) \ - CFL_SUB_AVG_X(arch, 16, 8, 64, 7) \ - CFL_SUB_AVG_X(arch, 16, 16, 128, 8) \ - CFL_SUB_AVG_X(arch, 16, 32, 256, 9) \ - CFL_SUB_AVG_X(arch, 32, 8, 128, 8) \ - CFL_SUB_AVG_X(arch, 32, 16, 256, 9) \ - CFL_SUB_AVG_X(arch, 32, 32, 512, 10) \ - cfl_subtract_average_fn get_subtract_average_fn_##arch(TX_SIZE tx_size) { \ - static const cfl_subtract_average_fn sub_avg[TX_SIZES_ALL] = { \ - subtract_average_4x4_##arch, /* 4x4 */ \ - subtract_average_8x8_##arch, /* 8x8 */ \ - subtract_average_16x16_##arch, /* 16x16 */ \ - subtract_average_32x32_##arch, /* 32x32 */ \ - NULL, /* 64x64 (invalid CFL size) */ \ - subtract_average_4x8_##arch, /* 4x8 */ \ - subtract_average_8x4_##arch, /* 8x4 */ \ - subtract_average_8x16_##arch, /* 8x16 */ \ - subtract_average_16x8_##arch, /* 16x8 */ \ - subtract_average_16x32_##arch, /* 16x32 */ \ - subtract_average_32x16_##arch, /* 32x16 */ \ - NULL, /* 32x64 (invalid CFL size) */ \ - NULL, /* 64x32 (invalid CFL size) */ \ - subtract_average_4x16_##arch, /* 4x16 (invalid CFL size) */ \ - subtract_average_16x4_##arch, /* 16x4 (invalid CFL size) */ \ - subtract_average_8x32_##arch, /* 8x32 (invalid CFL size) */ \ - subtract_average_32x8_##arch, /* 32x8 (invalid CFL size) */ \ - NULL, /* 16x64 (invalid CFL size) */ \ - NULL, /* 64x16 (invalid CFL size) */ \ - }; \ - /* Modulo TX_SIZES_ALL to ensure that an attacker won't be able to */ \ - /* index the function pointer array out of bounds. */ \ - return sub_avg[tx_size % TX_SIZES_ALL]; \ +#define CFL_SUB_AVG_FN(arch) \ + CFL_SUB_AVG_X(arch, 4, 4, 8, 4) \ + CFL_SUB_AVG_X(arch, 4, 8, 16, 5) \ + CFL_SUB_AVG_X(arch, 4, 16, 32, 6) \ + CFL_SUB_AVG_X(arch, 8, 4, 16, 5) \ + CFL_SUB_AVG_X(arch, 8, 8, 32, 6) \ + CFL_SUB_AVG_X(arch, 8, 16, 64, 7) \ + CFL_SUB_AVG_X(arch, 8, 32, 128, 8) \ + CFL_SUB_AVG_X(arch, 16, 4, 32, 6) \ + CFL_SUB_AVG_X(arch, 16, 8, 64, 7) \ + CFL_SUB_AVG_X(arch, 16, 16, 128, 8) \ + CFL_SUB_AVG_X(arch, 16, 32, 256, 9) \ + CFL_SUB_AVG_X(arch, 32, 8, 128, 8) \ + CFL_SUB_AVG_X(arch, 32, 16, 256, 9) \ + CFL_SUB_AVG_X(arch, 32, 32, 512, 10) \ + cfl_subtract_average_fn cfl_get_subtract_average_fn_##arch( \ + TX_SIZE tx_size) { \ + static const cfl_subtract_average_fn sub_avg[TX_SIZES_ALL] = { \ + subtract_average_4x4_##arch, /* 4x4 */ \ + subtract_average_8x8_##arch, /* 8x8 */ \ + subtract_average_16x16_##arch, /* 16x16 */ \ + subtract_average_32x32_##arch, /* 32x32 */ \ + NULL, /* 64x64 (invalid CFL size) */ \ + subtract_average_4x8_##arch, /* 4x8 */ \ + subtract_average_8x4_##arch, /* 8x4 */ \ + subtract_average_8x16_##arch, /* 8x16 */ \ + subtract_average_16x8_##arch, /* 16x8 */ \ + subtract_average_16x32_##arch, /* 16x32 */ \ + subtract_average_32x16_##arch, /* 32x16 */ \ + NULL, /* 32x64 (invalid CFL size) */ \ + NULL, /* 64x32 (invalid CFL size) */ \ + subtract_average_4x16_##arch, /* 4x16 (invalid CFL size) */ \ + subtract_average_16x4_##arch, /* 16x4 (invalid CFL size) */ \ + subtract_average_8x32_##arch, /* 8x32 (invalid CFL size) */ \ + subtract_average_32x8_##arch, /* 32x8 (invalid CFL size) */ \ + NULL, /* 16x64 (invalid CFL size) */ \ + NULL, /* 64x16 (invalid CFL size) */ \ + }; \ + /* Modulo TX_SIZES_ALL to ensure that an attacker won't be able to */ \ + /* index the function pointer array out of bounds. */ \ + return sub_avg[tx_size % TX_SIZES_ALL]; \ } // For VSX SIMD optimization, the C versions of width == 4 subtract are @@ -233,46 +234,46 @@ #define CFL_PREDICT_X(arch, width, height, bd) \ CFL_PREDICT_##bd(arch, width, height) -#define CFL_PREDICT_FN(arch, bd) \ - CFL_PREDICT_X(arch, 4, 4, bd) \ - CFL_PREDICT_X(arch, 4, 8, bd) \ - CFL_PREDICT_X(arch, 4, 16, bd) \ - CFL_PREDICT_X(arch, 8, 4, bd) \ - CFL_PREDICT_X(arch, 8, 8, bd) \ - CFL_PREDICT_X(arch, 8, 16, bd) \ - CFL_PREDICT_X(arch, 8, 32, bd) \ - CFL_PREDICT_X(arch, 16, 4, bd) \ - CFL_PREDICT_X(arch, 16, 8, bd) \ - CFL_PREDICT_X(arch, 16, 16, bd) \ - CFL_PREDICT_X(arch, 16, 32, bd) \ - CFL_PREDICT_X(arch, 32, 8, bd) \ - CFL_PREDICT_X(arch, 32, 16, bd) \ - CFL_PREDICT_X(arch, 32, 32, bd) \ - cfl_predict_##bd##_fn get_predict_##bd##_fn_##arch(TX_SIZE tx_size) { \ - static const cfl_predict_##bd##_fn pred[TX_SIZES_ALL] = { \ - predict_##bd##_4x4_##arch, /* 4x4 */ \ - predict_##bd##_8x8_##arch, /* 8x8 */ \ - predict_##bd##_16x16_##arch, /* 16x16 */ \ - predict_##bd##_32x32_##arch, /* 32x32 */ \ - NULL, /* 64x64 (invalid CFL size) */ \ - predict_##bd##_4x8_##arch, /* 4x8 */ \ - predict_##bd##_8x4_##arch, /* 8x4 */ \ - predict_##bd##_8x16_##arch, /* 8x16 */ \ - predict_##bd##_16x8_##arch, /* 16x8 */ \ - predict_##bd##_16x32_##arch, /* 16x32 */ \ - predict_##bd##_32x16_##arch, /* 32x16 */ \ - NULL, /* 32x64 (invalid CFL size) */ \ - NULL, /* 64x32 (invalid CFL size) */ \ - predict_##bd##_4x16_##arch, /* 4x16 */ \ - predict_##bd##_16x4_##arch, /* 16x4 */ \ - predict_##bd##_8x32_##arch, /* 8x32 */ \ - predict_##bd##_32x8_##arch, /* 32x8 */ \ - NULL, /* 16x64 (invalid CFL size) */ \ - NULL, /* 64x16 (invalid CFL size) */ \ - }; \ - /* Modulo TX_SIZES_ALL to ensure that an attacker won't be able to */ \ - /* index the function pointer array out of bounds. */ \ - return pred[tx_size % TX_SIZES_ALL]; \ +#define CFL_PREDICT_FN(arch, bd) \ + CFL_PREDICT_X(arch, 4, 4, bd) \ + CFL_PREDICT_X(arch, 4, 8, bd) \ + CFL_PREDICT_X(arch, 4, 16, bd) \ + CFL_PREDICT_X(arch, 8, 4, bd) \ + CFL_PREDICT_X(arch, 8, 8, bd) \ + CFL_PREDICT_X(arch, 8, 16, bd) \ + CFL_PREDICT_X(arch, 8, 32, bd) \ + CFL_PREDICT_X(arch, 16, 4, bd) \ + CFL_PREDICT_X(arch, 16, 8, bd) \ + CFL_PREDICT_X(arch, 16, 16, bd) \ + CFL_PREDICT_X(arch, 16, 32, bd) \ + CFL_PREDICT_X(arch, 32, 8, bd) \ + CFL_PREDICT_X(arch, 32, 16, bd) \ + CFL_PREDICT_X(arch, 32, 32, bd) \ + cfl_predict_##bd##_fn cfl_get_predict_##bd##_fn_##arch(TX_SIZE tx_size) { \ + static const cfl_predict_##bd##_fn pred[TX_SIZES_ALL] = { \ + predict_##bd##_4x4_##arch, /* 4x4 */ \ + predict_##bd##_8x8_##arch, /* 8x8 */ \ + predict_##bd##_16x16_##arch, /* 16x16 */ \ + predict_##bd##_32x32_##arch, /* 32x32 */ \ + NULL, /* 64x64 (invalid CFL size) */ \ + predict_##bd##_4x8_##arch, /* 4x8 */ \ + predict_##bd##_8x4_##arch, /* 8x4 */ \ + predict_##bd##_8x16_##arch, /* 8x16 */ \ + predict_##bd##_16x8_##arch, /* 16x8 */ \ + predict_##bd##_16x32_##arch, /* 16x32 */ \ + predict_##bd##_32x16_##arch, /* 32x16 */ \ + NULL, /* 32x64 (invalid CFL size) */ \ + NULL, /* 64x32 (invalid CFL size) */ \ + predict_##bd##_4x16_##arch, /* 4x16 */ \ + predict_##bd##_16x4_##arch, /* 16x4 */ \ + predict_##bd##_8x32_##arch, /* 8x32 */ \ + predict_##bd##_32x8_##arch, /* 32x8 */ \ + NULL, /* 16x64 (invalid CFL size) */ \ + NULL, /* 64x16 (invalid CFL size) */ \ + }; \ + /* Modulo TX_SIZES_ALL to ensure that an attacker won't be able to */ \ + /* index the function pointer array out of bounds. */ \ + return pred[tx_size % TX_SIZES_ALL]; \ } #endif // AOM_AV1_COMMON_CFL_H_
diff --git a/av1/common/ppc/cfl_ppc.c b/av1/common/ppc/cfl_ppc.c index 61d8dc1..dca860b 100644 --- a/av1/common/ppc/cfl_ppc.c +++ b/av1/common/ppc/cfl_ppc.c
@@ -124,7 +124,7 @@ // Based on observation, for small blocks VSX does not outperform C (no 64bit // load and store intrinsics). So we call the C code for block widths 4. -cfl_subtract_average_fn get_subtract_average_fn_vsx(TX_SIZE tx_size) { +cfl_subtract_average_fn cfl_get_subtract_average_fn_vsx(TX_SIZE tx_size) { static const cfl_subtract_average_fn sub_avg[TX_SIZES_ALL] = { subtract_average_4x4_c, /* 4x4 */ subtract_average_8x8_vsx, /* 8x8 */
diff --git a/av1/common/x86/cfl_avx2.c b/av1/common/x86/cfl_avx2.c index d9bdf60..3d183b4 100644 --- a/av1/common/x86/cfl_avx2.c +++ b/av1/common/x86/cfl_avx2.c
@@ -273,7 +273,7 @@ CFL_PREDICT_X(avx2, 32, 16, lbd); CFL_PREDICT_X(avx2, 32, 32, lbd); -cfl_predict_lbd_fn get_predict_lbd_fn_avx2(TX_SIZE tx_size) { +cfl_predict_lbd_fn cfl_get_predict_lbd_fn_avx2(TX_SIZE tx_size) { static const cfl_predict_lbd_fn pred[TX_SIZES_ALL] = { predict_lbd_4x4_ssse3, /* 4x4 */ predict_lbd_8x8_ssse3, /* 8x8 */ @@ -346,7 +346,7 @@ CFL_PREDICT_X(avx2, 32, 16, hbd) CFL_PREDICT_X(avx2, 32, 32, hbd) -cfl_predict_hbd_fn get_predict_hbd_fn_avx2(TX_SIZE tx_size) { +cfl_predict_hbd_fn cfl_get_predict_hbd_fn_avx2(TX_SIZE tx_size) { static const cfl_predict_hbd_fn pred[TX_SIZES_ALL] = { predict_hbd_4x4_ssse3, /* 4x4 */ predict_hbd_8x8_ssse3, /* 8x8 */ @@ -463,7 +463,7 @@ // Based on the observation that for small blocks AVX2 does not outperform // SSE2, we call the SSE2 code for block widths 4 and 8. -cfl_subtract_average_fn get_subtract_average_fn_avx2(TX_SIZE tx_size) { +cfl_subtract_average_fn cfl_get_subtract_average_fn_avx2(TX_SIZE tx_size) { static const cfl_subtract_average_fn sub_avg[TX_SIZES_ALL] = { subtract_average_4x4_sse2, /* 4x4 */ subtract_average_8x8_sse2, /* 8x8 */
diff --git a/test/cfl_test.cc b/test/cfl_test.cc index 9515b67..fccc9f2 100644 --- a/test/cfl_test.cc +++ b/test/cfl_test.cc
@@ -173,7 +173,7 @@ virtual void SetUp() { CFLTest::init(::testing::get<0>(this->GetParam())); sub_avg = ::testing::get<1>(this->GetParam())(tx_size); - sub_avg_ref = get_subtract_average_fn_c(tx_size); + sub_avg_ref = cfl_get_subtract_average_fn_c(tx_size); } virtual ~CFLSubAvgTest() {} @@ -360,7 +360,7 @@ virtual void SetUp() { CFLTest::init(::testing::get<0>(this->GetParam())); predict = ::testing::get<1>(this->GetParam())(tx_size); - predict_ref = get_predict_lbd_fn_c(tx_size); + predict_ref = cfl_get_predict_lbd_fn_c(tx_size); } virtual ~CFLPredictTest() {} @@ -406,7 +406,7 @@ virtual void SetUp() { CFLTest::init(::testing::get<0>(this->GetParam())); predict = ::testing::get<1>(this->GetParam())(tx_size); - predict_ref = get_predict_hbd_fn_c(tx_size); + predict_ref = cfl_get_predict_hbd_fn_c(tx_size); } virtual ~CFLPredictHBDTest() {} @@ -448,7 +448,7 @@ #if HAVE_SSE2 const sub_avg_param sub_avg_sizes_sse2[] = { ALL_CFL_TX_SIZES( - get_subtract_average_fn_sse2) }; + cfl_get_subtract_average_fn_sse2) }; INSTANTIATE_TEST_CASE_P(SSE2, CFLSubAvgTest, ::testing::ValuesIn(sub_avg_sizes_sse2)); @@ -469,10 +469,10 @@ }; const predict_param predict_sizes_ssse3[] = { ALL_CFL_TX_SIZES( - get_predict_lbd_fn_ssse3) }; + cfl_get_predict_lbd_fn_ssse3) }; const predict_param_hbd predict_sizes_hbd_ssse3[] = { ALL_CFL_TX_SIZES( - get_predict_hbd_fn_ssse3) }; + cfl_get_predict_hbd_fn_ssse3) }; INSTANTIATE_TEST_CASE_P(SSSE3, CFLSubsampleLBDTest, ::testing::ValuesIn(subsample_lbd_sizes_ssse3)); @@ -489,7 +489,7 @@ #if HAVE_AVX2 const sub_avg_param sub_avg_sizes_avx2[] = { ALL_CFL_TX_SIZES( - get_subtract_average_fn_avx2) }; + cfl_get_subtract_average_fn_avx2) }; const subsample_lbd_param subsample_lbd_sizes_avx2[] = { ALL_CFL_TX_SIZES_SUBSAMPLE(cfl_get_luma_subsampling_420_lbd_avx2, @@ -504,10 +504,10 @@ }; const predict_param predict_sizes_avx2[] = { ALL_CFL_TX_SIZES( - get_predict_lbd_fn_avx2) }; + cfl_get_predict_lbd_fn_avx2) }; const predict_param_hbd predict_sizes_hbd_avx2[] = { ALL_CFL_TX_SIZES( - get_predict_hbd_fn_avx2) }; + cfl_get_predict_hbd_fn_avx2) }; INSTANTIATE_TEST_CASE_P(AVX2, CFLSubAvgTest, ::testing::ValuesIn(sub_avg_sizes_avx2)); @@ -528,7 +528,7 @@ #if HAVE_NEON const sub_avg_param sub_avg_sizes_neon[] = { ALL_CFL_TX_SIZES( - get_subtract_average_fn_neon) }; + cfl_get_subtract_average_fn_neon) }; const subsample_lbd_param subsample_lbd_sizes_neon[] = { ALL_CFL_TX_SIZES_SUBSAMPLE(cfl_get_luma_subsampling_420_lbd_neon, @@ -543,10 +543,10 @@ }; const predict_param predict_sizes_neon[] = { ALL_CFL_TX_SIZES( - get_predict_lbd_fn_neon) }; + cfl_get_predict_lbd_fn_neon) }; const predict_param_hbd predict_sizes_hbd_neon[] = { ALL_CFL_TX_SIZES( - get_predict_hbd_fn_neon) }; + cfl_get_predict_hbd_fn_neon) }; INSTANTIATE_TEST_CASE_P(NEON, CFLSubAvgTest, ::testing::ValuesIn(sub_avg_sizes_neon)); @@ -566,7 +566,7 @@ #if HAVE_VSX const sub_avg_param sub_avg_sizes_vsx[] = { ALL_CFL_TX_SIZES( - get_subtract_average_fn_vsx) }; + cfl_get_subtract_average_fn_vsx) }; INSTANTIATE_TEST_CASE_P(VSX, CFLSubAvgTest, ::testing::ValuesIn(sub_avg_sizes_vsx));