Cleanup: remove const
Similar to previous commit, this cleanup removes more "const"s for
parameters passed by value
BUG=aomedia:448
Change-Id: I092bcbeecab75f0c14c3ee60d34dcf6f69034fe4
diff --git a/aom_dsp/simd/v128_intrinsics.h b/aom_dsp/simd/v128_intrinsics.h
index d5f9e35..8f65093 100644
--- a/aom_dsp/simd/v128_intrinsics.h
+++ b/aom_dsp/simd/v128_intrinsics.h
@@ -49,7 +49,7 @@
c_v128_store_aligned(p, a);
}
-SIMD_INLINE v128 v128_align(v128 a, v128 b, const unsigned int c) {
+SIMD_INLINE v128 v128_align(v128 a, v128 b, unsigned int c) {
return c_v128_align(a, b, c);
}
@@ -231,37 +231,37 @@
return c_v128_shr_s32(a, c);
}
-SIMD_INLINE v128 v128_shr_n_byte(v128 a, const unsigned int n) {
+SIMD_INLINE v128 v128_shr_n_byte(v128 a, unsigned int n) {
return c_v128_shr_n_byte(a, n);
}
-SIMD_INLINE v128 v128_shl_n_byte(v128 a, const unsigned int n) {
+SIMD_INLINE v128 v128_shl_n_byte(v128 a, unsigned int n) {
return c_v128_shl_n_byte(a, n);
}
-SIMD_INLINE v128 v128_shl_n_8(v128 a, const unsigned int n) {
+SIMD_INLINE v128 v128_shl_n_8(v128 a, unsigned int n) {
return c_v128_shl_n_8(a, n);
}
-SIMD_INLINE v128 v128_shl_n_16(v128 a, const unsigned int n) {
+SIMD_INLINE v128 v128_shl_n_16(v128 a, unsigned int n) {
return c_v128_shl_n_16(a, n);
}
-SIMD_INLINE v128 v128_shl_n_32(v128 a, const unsigned int n) {
+SIMD_INLINE v128 v128_shl_n_32(v128 a, unsigned int n) {
return c_v128_shl_n_32(a, n);
}
-SIMD_INLINE v128 v128_shr_n_u8(v128 a, const unsigned int n) {
+SIMD_INLINE v128 v128_shr_n_u8(v128 a, unsigned int n) {
return c_v128_shr_n_u8(a, n);
}
-SIMD_INLINE v128 v128_shr_n_u16(v128 a, const unsigned int n) {
+SIMD_INLINE v128 v128_shr_n_u16(v128 a, unsigned int n) {
return c_v128_shr_n_u16(a, n);
}
-SIMD_INLINE v128 v128_shr_n_u32(v128 a, const unsigned int n) {
+SIMD_INLINE v128 v128_shr_n_u32(v128 a, unsigned int n) {
return c_v128_shr_n_u32(a, n);
}
-SIMD_INLINE v128 v128_shr_n_s8(v128 a, const unsigned int n) {
+SIMD_INLINE v128 v128_shr_n_s8(v128 a, unsigned int n) {
return c_v128_shr_n_s8(a, n);
}
-SIMD_INLINE v128 v128_shr_n_s16(v128 a, const unsigned int n) {
+SIMD_INLINE v128 v128_shr_n_s16(v128 a, unsigned int n) {
return c_v128_shr_n_s16(a, n);
}
-SIMD_INLINE v128 v128_shr_n_s32(v128 a, const unsigned int n) {
+SIMD_INLINE v128 v128_shr_n_s32(v128 a, unsigned int n) {
return c_v128_shr_n_s32(a, n);
}
diff --git a/aom_dsp/simd/v128_intrinsics_arm.h b/aom_dsp/simd/v128_intrinsics_arm.h
index 369b84d..0377d4c 100644
--- a/aom_dsp/simd/v128_intrinsics_arm.h
+++ b/aom_dsp/simd/v128_intrinsics_arm.h
@@ -51,7 +51,7 @@
vst1q_u8((uint8_t *)p, vreinterpretq_u8_s64(r));
}
-SIMD_INLINE v128 v128_align(v128 a, v128 b, const unsigned int c) {
+SIMD_INLINE v128 v128_align(v128 a, v128 b, unsigned int c) {
// The following functions require an immediate.
// Some compilers will check this during optimisation, others wont.
#if defined(__OPTIMIZE__) && __OPTIMIZE__ && !defined(__clang__)
@@ -541,7 +541,7 @@
#if defined(__OPTIMIZE__) && __OPTIMIZE__ && !defined(__clang__)
-SIMD_INLINE v128 v128_shl_n_byte(v128 a, const unsigned int n) {
+SIMD_INLINE v128 v128_shl_n_byte(v128 a, unsigned int n) {
return n < 8
? v128_from_64(
(uint64_t)vorr_u64(
@@ -559,7 +559,7 @@
0));
}
-SIMD_INLINE v128 v128_shr_n_byte(v128 a, const unsigned int n) {
+SIMD_INLINE v128 v128_shr_n_byte(v128 a, unsigned int n) {
return n < 8
? v128_from_64(
vshr_n_u64(vreinterpret_u64_s64(vget_high_s64(a)), n * 8),
@@ -574,45 +574,45 @@
(n - 8) * 8)));
}
-SIMD_INLINE v128 v128_shl_n_8(v128 a, const unsigned int c) {
+SIMD_INLINE v128 v128_shl_n_8(v128 a, unsigned int c) {
return vreinterpretq_s64_u8(vshlq_n_u8(vreinterpretq_u8_s64(a), c));
}
-SIMD_INLINE v128 v128_shr_n_u8(v128 a, const unsigned int c) {
+SIMD_INLINE v128 v128_shr_n_u8(v128 a, unsigned int c) {
return vreinterpretq_s64_u8(vshrq_n_u8(vreinterpretq_u8_s64(a), c));
}
-SIMD_INLINE v128 v128_shr_n_s8(v128 a, const unsigned int c) {
+SIMD_INLINE v128 v128_shr_n_s8(v128 a, unsigned int c) {
return vreinterpretq_s64_s8(vshrq_n_s8(vreinterpretq_s8_s64(a), c));
}
-SIMD_INLINE v128 v128_shl_n_16(v128 a, const unsigned int c) {
+SIMD_INLINE v128 v128_shl_n_16(v128 a, unsigned int c) {
return vreinterpretq_s64_u16(vshlq_n_u16(vreinterpretq_u16_s64(a), c));
}
-SIMD_INLINE v128 v128_shr_n_u16(v128 a, const unsigned int c) {
+SIMD_INLINE v128 v128_shr_n_u16(v128 a, unsigned int c) {
return vreinterpretq_s64_u16(vshrq_n_u16(vreinterpretq_u16_s64(a), c));
}
-SIMD_INLINE v128 v128_shr_n_s16(v128 a, const unsigned int c) {
+SIMD_INLINE v128 v128_shr_n_s16(v128 a, unsigned int c) {
return vreinterpretq_s64_s16(vshrq_n_s16(vreinterpretq_s16_s64(a), c));
}
-SIMD_INLINE v128 v128_shl_n_32(v128 a, const unsigned int c) {
+SIMD_INLINE v128 v128_shl_n_32(v128 a, unsigned int c) {
return vreinterpretq_s64_u32(vshlq_n_u32(vreinterpretq_u32_s64(a), c));
}
-SIMD_INLINE v128 v128_shr_n_u32(v128 a, const unsigned int c) {
+SIMD_INLINE v128 v128_shr_n_u32(v128 a, unsigned int c) {
return vreinterpretq_s64_u32(vshrq_n_u32(vreinterpretq_u32_s64(a), c));
}
-SIMD_INLINE v128 v128_shr_n_s32(v128 a, const unsigned int c) {
+SIMD_INLINE v128 v128_shr_n_s32(v128 a, unsigned int c) {
return vreinterpretq_s64_s32(vshrq_n_s32(vreinterpretq_s32_s64(a), c));
}
#else
-SIMD_INLINE v128 v128_shl_n_byte(v128 a, const unsigned int n) {
+SIMD_INLINE v128 v128_shl_n_byte(v128 a, unsigned int n) {
if (n < 8)
return v128_from_v64(v64_or(v64_shl_n_byte(v128_high_v64(a), n),
v64_shr_n_byte(v128_low_v64(a), 8 - n)),
@@ -621,7 +621,7 @@
return v128_from_v64(v64_shl_n_byte(v128_low_v64(a), n - 8), v64_zero());
}
-SIMD_INLINE v128 v128_shr_n_byte(v128 a, const unsigned int n) {
+SIMD_INLINE v128 v128_shr_n_byte(v128 a, unsigned int n) {
if (n < 8)
return v128_from_v64(v64_shr_n_byte(v128_high_v64(a), n),
v64_or(v64_shr_n_byte(v128_low_v64(a), n),
@@ -630,39 +630,39 @@
return v128_from_v64(v64_zero(), v64_shr_n_byte(v128_high_v64(a), n - 8));
}
-SIMD_INLINE v128 v128_shl_n_8(v128 a, const unsigned int c) {
+SIMD_INLINE v128 v128_shl_n_8(v128 a, unsigned int c) {
return v128_shl_8(a, c);
}
-SIMD_INLINE v128 v128_shr_n_u8(v128 a, const unsigned int c) {
+SIMD_INLINE v128 v128_shr_n_u8(v128 a, unsigned int c) {
return v128_shr_u8(a, c);
}
-SIMD_INLINE v128 v128_shr_n_s8(v128 a, const unsigned int c) {
+SIMD_INLINE v128 v128_shr_n_s8(v128 a, unsigned int c) {
return v128_shr_s8(a, c);
}
-SIMD_INLINE v128 v128_shl_n_16(v128 a, const unsigned int c) {
+SIMD_INLINE v128 v128_shl_n_16(v128 a, unsigned int c) {
return v128_shl_16(a, c);
}
-SIMD_INLINE v128 v128_shr_n_u16(v128 a, const unsigned int c) {
+SIMD_INLINE v128 v128_shr_n_u16(v128 a, unsigned int c) {
return v128_shr_u16(a, c);
}
-SIMD_INLINE v128 v128_shr_n_s16(v128 a, const unsigned int c) {
+SIMD_INLINE v128 v128_shr_n_s16(v128 a, unsigned int c) {
return v128_shr_s16(a, c);
}
-SIMD_INLINE v128 v128_shl_n_32(v128 a, const unsigned int c) {
+SIMD_INLINE v128 v128_shl_n_32(v128 a, unsigned int c) {
return v128_shl_32(a, c);
}
-SIMD_INLINE v128 v128_shr_n_u32(v128 a, const unsigned int c) {
+SIMD_INLINE v128 v128_shr_n_u32(v128 a, unsigned int c) {
return v128_shr_u32(a, c);
}
-SIMD_INLINE v128 v128_shr_n_s32(v128 a, const unsigned int c) {
+SIMD_INLINE v128 v128_shr_n_s32(v128 a, unsigned int c) {
return v128_shr_s32(a, c);
}
diff --git a/aom_dsp/simd/v128_intrinsics_c.h b/aom_dsp/simd/v128_intrinsics_c.h
index dd884b2..32e7c32 100644
--- a/aom_dsp/simd/v128_intrinsics_c.h
+++ b/aom_dsp/simd/v128_intrinsics_c.h
@@ -601,7 +601,7 @@
c_v64_cmpeq_16(a.v64[0], b.v64[0]));
}
-SIMD_INLINE c_v128 c_v128_shl_n_byte(c_v128 a, const unsigned int n) {
+SIMD_INLINE c_v128 c_v128_shl_n_byte(c_v128 a, unsigned int n) {
if (n < 8)
return c_v128_from_v64(c_v64_or(c_v64_shl_n_byte(a.v64[1], n),
c_v64_shr_n_byte(a.v64[0], 8 - n)),
@@ -610,7 +610,7 @@
return c_v128_from_v64(c_v64_shl_n_byte(a.v64[0], n - 8), c_v64_zero());
}
-SIMD_INLINE c_v128 c_v128_shr_n_byte(c_v128 a, const unsigned int n) {
+SIMD_INLINE c_v128 c_v128_shr_n_byte(c_v128 a, unsigned int n) {
if (n < 8)
return c_v128_from_v64(c_v64_shr_n_byte(a.v64[1], n),
c_v64_or(c_v64_shr_n_byte(a.v64[0], n),
@@ -619,7 +619,7 @@
return c_v128_from_v64(c_v64_zero(), c_v64_shr_n_byte(a.v64[1], n - 8));
}
-SIMD_INLINE c_v128 c_v128_align(c_v128 a, c_v128 b, const unsigned int c) {
+SIMD_INLINE c_v128 c_v128_align(c_v128 a, c_v128 b, unsigned int c) {
if (SIMD_CHECK && c > 15) {
fprintf(stderr, "Error: undefined alignment %d\n", c);
abort();
@@ -628,79 +628,79 @@
: b;
}
-SIMD_INLINE c_v128 c_v128_shl_8(c_v128 a, const unsigned int c) {
+SIMD_INLINE c_v128 c_v128_shl_8(c_v128 a, unsigned int c) {
return c_v128_from_v64(c_v64_shl_8(a.v64[1], c), c_v64_shl_8(a.v64[0], c));
}
-SIMD_INLINE c_v128 c_v128_shr_u8(c_v128 a, const unsigned int c) {
+SIMD_INLINE c_v128 c_v128_shr_u8(c_v128 a, unsigned int c) {
return c_v128_from_v64(c_v64_shr_u8(a.v64[1], c), c_v64_shr_u8(a.v64[0], c));
}
-SIMD_INLINE c_v128 c_v128_shr_s8(c_v128 a, const unsigned int c) {
+SIMD_INLINE c_v128 c_v128_shr_s8(c_v128 a, unsigned int c) {
return c_v128_from_v64(c_v64_shr_s8(a.v64[1], c), c_v64_shr_s8(a.v64[0], c));
}
-SIMD_INLINE c_v128 c_v128_shl_16(c_v128 a, const unsigned int c) {
+SIMD_INLINE c_v128 c_v128_shl_16(c_v128 a, unsigned int c) {
return c_v128_from_v64(c_v64_shl_16(a.v64[1], c), c_v64_shl_16(a.v64[0], c));
}
-SIMD_INLINE c_v128 c_v128_shr_u16(c_v128 a, const unsigned int c) {
+SIMD_INLINE c_v128 c_v128_shr_u16(c_v128 a, unsigned int c) {
return c_v128_from_v64(c_v64_shr_u16(a.v64[1], c),
c_v64_shr_u16(a.v64[0], c));
}
-SIMD_INLINE c_v128 c_v128_shr_s16(c_v128 a, const unsigned int c) {
+SIMD_INLINE c_v128 c_v128_shr_s16(c_v128 a, unsigned int c) {
return c_v128_from_v64(c_v64_shr_s16(a.v64[1], c),
c_v64_shr_s16(a.v64[0], c));
}
-SIMD_INLINE c_v128 c_v128_shl_32(c_v128 a, const unsigned int c) {
+SIMD_INLINE c_v128 c_v128_shl_32(c_v128 a, unsigned int c) {
return c_v128_from_v64(c_v64_shl_32(a.v64[1], c), c_v64_shl_32(a.v64[0], c));
}
-SIMD_INLINE c_v128 c_v128_shr_u32(c_v128 a, const unsigned int c) {
+SIMD_INLINE c_v128 c_v128_shr_u32(c_v128 a, unsigned int c) {
return c_v128_from_v64(c_v64_shr_u32(a.v64[1], c),
c_v64_shr_u32(a.v64[0], c));
}
-SIMD_INLINE c_v128 c_v128_shr_s32(c_v128 a, const unsigned int c) {
+SIMD_INLINE c_v128 c_v128_shr_s32(c_v128 a, unsigned int c) {
return c_v128_from_v64(c_v64_shr_s32(a.v64[1], c),
c_v64_shr_s32(a.v64[0], c));
}
-SIMD_INLINE c_v128 c_v128_shl_n_8(c_v128 a, const unsigned int n) {
+SIMD_INLINE c_v128 c_v128_shl_n_8(c_v128 a, unsigned int n) {
return c_v128_shl_8(a, n);
}
-SIMD_INLINE c_v128 c_v128_shl_n_16(c_v128 a, const unsigned int n) {
+SIMD_INLINE c_v128 c_v128_shl_n_16(c_v128 a, unsigned int n) {
return c_v128_shl_16(a, n);
}
-SIMD_INLINE c_v128 c_v128_shl_n_32(c_v128 a, const unsigned int n) {
+SIMD_INLINE c_v128 c_v128_shl_n_32(c_v128 a, unsigned int n) {
return c_v128_shl_32(a, n);
}
-SIMD_INLINE c_v128 c_v128_shr_n_u8(c_v128 a, const unsigned int n) {
+SIMD_INLINE c_v128 c_v128_shr_n_u8(c_v128 a, unsigned int n) {
return c_v128_shr_u8(a, n);
}
-SIMD_INLINE c_v128 c_v128_shr_n_u16(c_v128 a, const unsigned int n) {
+SIMD_INLINE c_v128 c_v128_shr_n_u16(c_v128 a, unsigned int n) {
return c_v128_shr_u16(a, n);
}
-SIMD_INLINE c_v128 c_v128_shr_n_u32(c_v128 a, const unsigned int n) {
+SIMD_INLINE c_v128 c_v128_shr_n_u32(c_v128 a, unsigned int n) {
return c_v128_shr_u32(a, n);
}
-SIMD_INLINE c_v128 c_v128_shr_n_s8(c_v128 a, const unsigned int n) {
+SIMD_INLINE c_v128 c_v128_shr_n_s8(c_v128 a, unsigned int n) {
return c_v128_shr_s8(a, n);
}
-SIMD_INLINE c_v128 c_v128_shr_n_s16(c_v128 a, const unsigned int n) {
+SIMD_INLINE c_v128 c_v128_shr_n_s16(c_v128 a, unsigned int n) {
return c_v128_shr_s16(a, n);
}
-SIMD_INLINE c_v128 c_v128_shr_n_s32(c_v128 a, const unsigned int n) {
+SIMD_INLINE c_v128 c_v128_shr_n_s32(c_v128 a, unsigned int n) {
return c_v128_shr_s32(a, n);
}
diff --git a/aom_dsp/simd/v128_intrinsics_x86.h b/aom_dsp/simd/v128_intrinsics_x86.h
index df8be27..cca1788 100644
--- a/aom_dsp/simd/v128_intrinsics_x86.h
+++ b/aom_dsp/simd/v128_intrinsics_x86.h
@@ -62,7 +62,7 @@
// Some compilers will check this during optimisation, others wont.
#if defined(__OPTIMIZE__) && __OPTIMIZE__ && !defined(__clang__)
#if defined(__SSSE3__)
-SIMD_INLINE v128 v128_align(v128 a, v128 b, const unsigned int c) {
+SIMD_INLINE v128 v128_align(v128 a, v128 b, unsigned int c) {
return c ? _mm_alignr_epi8(a, b, c) : b;
}
#else
diff --git a/aom_dsp/simd/v256_intrinsics.h b/aom_dsp/simd/v256_intrinsics.h
index 85d6272..1896374 100644
--- a/aom_dsp/simd/v256_intrinsics.h
+++ b/aom_dsp/simd/v256_intrinsics.h
@@ -51,7 +51,7 @@
c_v256_store_aligned(p, a);
}
-SIMD_INLINE v256 v256_align(v256 a, v256 b, const unsigned int c) {
+SIMD_INLINE v256 v256_align(v256 a, v256 b, unsigned int c) {
return c_v256_align(a, b, c);
}
@@ -246,37 +246,37 @@
return c_v256_shr_s32(a, c);
}
-SIMD_INLINE v256 v256_shr_n_byte(v256 a, const unsigned int n) {
+SIMD_INLINE v256 v256_shr_n_byte(v256 a, unsigned int n) {
return c_v256_shr_n_byte(a, n);
}
-SIMD_INLINE v256 v256_shl_n_byte(v256 a, const unsigned int n) {
+SIMD_INLINE v256 v256_shl_n_byte(v256 a, unsigned int n) {
return c_v256_shl_n_byte(a, n);
}
-SIMD_INLINE v256 v256_shl_n_8(v256 a, const unsigned int n) {
+SIMD_INLINE v256 v256_shl_n_8(v256 a, unsigned int n) {
return c_v256_shl_n_8(a, n);
}
-SIMD_INLINE v256 v256_shl_n_16(v256 a, const unsigned int n) {
+SIMD_INLINE v256 v256_shl_n_16(v256 a, unsigned int n) {
return c_v256_shl_n_16(a, n);
}
-SIMD_INLINE v256 v256_shl_n_32(v256 a, const unsigned int n) {
+SIMD_INLINE v256 v256_shl_n_32(v256 a, unsigned int n) {
return c_v256_shl_n_32(a, n);
}
-SIMD_INLINE v256 v256_shr_n_u8(v256 a, const unsigned int n) {
+SIMD_INLINE v256 v256_shr_n_u8(v256 a, unsigned int n) {
return c_v256_shr_n_u8(a, n);
}
-SIMD_INLINE v256 v256_shr_n_u16(v256 a, const unsigned int n) {
+SIMD_INLINE v256 v256_shr_n_u16(v256 a, unsigned int n) {
return c_v256_shr_n_u16(a, n);
}
-SIMD_INLINE v256 v256_shr_n_u32(v256 a, const unsigned int n) {
+SIMD_INLINE v256 v256_shr_n_u32(v256 a, unsigned int n) {
return c_v256_shr_n_u32(a, n);
}
-SIMD_INLINE v256 v256_shr_n_s8(v256 a, const unsigned int n) {
+SIMD_INLINE v256 v256_shr_n_s8(v256 a, unsigned int n) {
return c_v256_shr_n_s8(a, n);
}
-SIMD_INLINE v256 v256_shr_n_s16(v256 a, const unsigned int n) {
+SIMD_INLINE v256 v256_shr_n_s16(v256 a, unsigned int n) {
return c_v256_shr_n_s16(a, n);
}
-SIMD_INLINE v256 v256_shr_n_s32(v256 a, const unsigned int n) {
+SIMD_INLINE v256 v256_shr_n_s32(v256 a, unsigned int n) {
return c_v256_shr_n_s32(a, n);
}
diff --git a/aom_dsp/simd/v256_intrinsics_c.h b/aom_dsp/simd/v256_intrinsics_c.h
index 2ae5712..f96ca7f 100644
--- a/aom_dsp/simd/v256_intrinsics_c.h
+++ b/aom_dsp/simd/v256_intrinsics_c.h
@@ -607,7 +607,7 @@
c_v128_cmpeq_16(a.v128[0], b.v128[0]));
}
-SIMD_INLINE c_v256 c_v256_shl_n_byte(c_v256 a, const unsigned int n) {
+SIMD_INLINE c_v256 c_v256_shl_n_byte(c_v256 a, unsigned int n) {
if (n < 16)
return c_v256_from_v128(c_v128_or(c_v128_shl_n_byte(a.v128[1], n),
c_v128_shr_n_byte(a.v128[0], 16 - n)),
@@ -619,7 +619,7 @@
return c_v256_from_v128(c_v256_low_v128(a), c_v128_zero());
}
-SIMD_INLINE c_v256 c_v256_shr_n_byte(c_v256 a, const unsigned int n) {
+SIMD_INLINE c_v256 c_v256_shr_n_byte(c_v256 a, unsigned int n) {
if (n < 16)
return c_v256_from_v128(c_v128_shr_n_byte(a.v128[1], n),
c_v128_or(c_v128_shr_n_byte(a.v128[0], n),
@@ -631,7 +631,7 @@
return c_v256_from_v128(c_v128_zero(), c_v256_high_v128(a));
}
-SIMD_INLINE c_v256 c_v256_align(c_v256 a, c_v256 b, const unsigned int c) {
+SIMD_INLINE c_v256 c_v256_align(c_v256 a, c_v256 b, unsigned int c) {
if (SIMD_CHECK && c > 31) {
fprintf(stderr, "Error: undefined alignment %d\n", c);
abort();
@@ -640,84 +640,84 @@
: b;
}
-SIMD_INLINE c_v256 c_v256_shl_8(c_v256 a, const unsigned int c) {
+SIMD_INLINE c_v256 c_v256_shl_8(c_v256 a, unsigned int c) {
return c_v256_from_v128(c_v128_shl_8(a.v128[1], c),
c_v128_shl_8(a.v128[0], c));
}
-SIMD_INLINE c_v256 c_v256_shr_u8(c_v256 a, const unsigned int c) {
+SIMD_INLINE c_v256 c_v256_shr_u8(c_v256 a, unsigned int c) {
return c_v256_from_v128(c_v128_shr_u8(a.v128[1], c),
c_v128_shr_u8(a.v128[0], c));
}
-SIMD_INLINE c_v256 c_v256_shr_s8(c_v256 a, const unsigned int c) {
+SIMD_INLINE c_v256 c_v256_shr_s8(c_v256 a, unsigned int c) {
return c_v256_from_v128(c_v128_shr_s8(a.v128[1], c),
c_v128_shr_s8(a.v128[0], c));
}
-SIMD_INLINE c_v256 c_v256_shl_16(c_v256 a, const unsigned int c) {
+SIMD_INLINE c_v256 c_v256_shl_16(c_v256 a, unsigned int c) {
return c_v256_from_v128(c_v128_shl_16(a.v128[1], c),
c_v128_shl_16(a.v128[0], c));
}
-SIMD_INLINE c_v256 c_v256_shr_u16(c_v256 a, const unsigned int c) {
+SIMD_INLINE c_v256 c_v256_shr_u16(c_v256 a, unsigned int c) {
return c_v256_from_v128(c_v128_shr_u16(a.v128[1], c),
c_v128_shr_u16(a.v128[0], c));
}
-SIMD_INLINE c_v256 c_v256_shr_s16(c_v256 a, const unsigned int c) {
+SIMD_INLINE c_v256 c_v256_shr_s16(c_v256 a, unsigned int c) {
return c_v256_from_v128(c_v128_shr_s16(a.v128[1], c),
c_v128_shr_s16(a.v128[0], c));
}
-SIMD_INLINE c_v256 c_v256_shl_32(c_v256 a, const unsigned int c) {
+SIMD_INLINE c_v256 c_v256_shl_32(c_v256 a, unsigned int c) {
return c_v256_from_v128(c_v128_shl_32(a.v128[1], c),
c_v128_shl_32(a.v128[0], c));
}
-SIMD_INLINE c_v256 c_v256_shr_u32(c_v256 a, const unsigned int c) {
+SIMD_INLINE c_v256 c_v256_shr_u32(c_v256 a, unsigned int c) {
return c_v256_from_v128(c_v128_shr_u32(a.v128[1], c),
c_v128_shr_u32(a.v128[0], c));
}
-SIMD_INLINE c_v256 c_v256_shr_s32(c_v256 a, const unsigned int c) {
+SIMD_INLINE c_v256 c_v256_shr_s32(c_v256 a, unsigned int c) {
return c_v256_from_v128(c_v128_shr_s32(a.v128[1], c),
c_v128_shr_s32(a.v128[0], c));
}
-SIMD_INLINE c_v256 c_v256_shl_n_8(c_v256 a, const unsigned int n) {
+SIMD_INLINE c_v256 c_v256_shl_n_8(c_v256 a, unsigned int n) {
return c_v256_shl_8(a, n);
}
-SIMD_INLINE c_v256 c_v256_shl_n_16(c_v256 a, const unsigned int n) {
+SIMD_INLINE c_v256 c_v256_shl_n_16(c_v256 a, unsigned int n) {
return c_v256_shl_16(a, n);
}
-SIMD_INLINE c_v256 c_v256_shl_n_32(c_v256 a, const unsigned int n) {
+SIMD_INLINE c_v256 c_v256_shl_n_32(c_v256 a, unsigned int n) {
return c_v256_shl_32(a, n);
}
-SIMD_INLINE c_v256 c_v256_shr_n_u8(c_v256 a, const unsigned int n) {
+SIMD_INLINE c_v256 c_v256_shr_n_u8(c_v256 a, unsigned int n) {
return c_v256_shr_u8(a, n);
}
-SIMD_INLINE c_v256 c_v256_shr_n_u16(c_v256 a, const unsigned int n) {
+SIMD_INLINE c_v256 c_v256_shr_n_u16(c_v256 a, unsigned int n) {
return c_v256_shr_u16(a, n);
}
-SIMD_INLINE c_v256 c_v256_shr_n_u32(c_v256 a, const unsigned int n) {
+SIMD_INLINE c_v256 c_v256_shr_n_u32(c_v256 a, unsigned int n) {
return c_v256_shr_u32(a, n);
}
-SIMD_INLINE c_v256 c_v256_shr_n_s8(c_v256 a, const unsigned int n) {
+SIMD_INLINE c_v256 c_v256_shr_n_s8(c_v256 a, unsigned int n) {
return c_v256_shr_s8(a, n);
}
-SIMD_INLINE c_v256 c_v256_shr_n_s16(c_v256 a, const unsigned int n) {
+SIMD_INLINE c_v256 c_v256_shr_n_s16(c_v256 a, unsigned int n) {
return c_v256_shr_s16(a, n);
}
-SIMD_INLINE c_v256 c_v256_shr_n_s32(c_v256 a, const unsigned int n) {
+SIMD_INLINE c_v256 c_v256_shr_n_s32(c_v256 a, unsigned int n) {
return c_v256_shr_s32(a, n);
}
diff --git a/aom_dsp/simd/v256_intrinsics_v128.h b/aom_dsp/simd/v256_intrinsics_v128.h
index e5a7449..a4b334e 100644
--- a/aom_dsp/simd/v256_intrinsics_v128.h
+++ b/aom_dsp/simd/v256_intrinsics_v128.h
@@ -468,39 +468,39 @@
return v256_from_v128(v128_cmpeq_16(a.hi, b.hi), v128_cmpeq_16(a.lo, b.lo));
}
-SIMD_INLINE v256 v256_shl_8(v256 a, const unsigned int c) {
+SIMD_INLINE v256 v256_shl_8(v256 a, unsigned int c) {
return v256_from_v128(v128_shl_8(a.hi, c), v128_shl_8(a.lo, c));
}
-SIMD_INLINE v256 v256_shr_u8(v256 a, const unsigned int c) {
+SIMD_INLINE v256 v256_shr_u8(v256 a, unsigned int c) {
return v256_from_v128(v128_shr_u8(a.hi, c), v128_shr_u8(a.lo, c));
}
-SIMD_INLINE v256 v256_shr_s8(v256 a, const unsigned int c) {
+SIMD_INLINE v256 v256_shr_s8(v256 a, unsigned int c) {
return v256_from_v128(v128_shr_s8(a.hi, c), v128_shr_s8(a.lo, c));
}
-SIMD_INLINE v256 v256_shl_16(v256 a, const unsigned int c) {
+SIMD_INLINE v256 v256_shl_16(v256 a, unsigned int c) {
return v256_from_v128(v128_shl_16(a.hi, c), v128_shl_16(a.lo, c));
}
-SIMD_INLINE v256 v256_shr_u16(v256 a, const unsigned int c) {
+SIMD_INLINE v256 v256_shr_u16(v256 a, unsigned int c) {
return v256_from_v128(v128_shr_u16(a.hi, c), v128_shr_u16(a.lo, c));
}
-SIMD_INLINE v256 v256_shr_s16(v256 a, const unsigned int c) {
+SIMD_INLINE v256 v256_shr_s16(v256 a, unsigned int c) {
return v256_from_v128(v128_shr_s16(a.hi, c), v128_shr_s16(a.lo, c));
}
-SIMD_INLINE v256 v256_shl_32(v256 a, const unsigned int c) {
+SIMD_INLINE v256 v256_shl_32(v256 a, unsigned int c) {
return v256_from_v128(v128_shl_32(a.hi, c), v128_shl_32(a.lo, c));
}
-SIMD_INLINE v256 v256_shr_u32(v256 a, const unsigned int c) {
+SIMD_INLINE v256 v256_shr_u32(v256 a, unsigned int c) {
return v256_from_v128(v128_shr_u32(a.hi, c), v128_shr_u32(a.lo, c));
}
-SIMD_INLINE v256 v256_shr_s32(v256 a, const unsigned int c) {
+SIMD_INLINE v256 v256_shr_s32(v256 a, unsigned int c) {
return v256_from_v128(v128_shr_s32(a.hi, c), v128_shr_s32(a.lo, c));
}
diff --git a/aom_dsp/simd/v64_intrinsics.h b/aom_dsp/simd/v64_intrinsics.h
index 34c331e..ee2b683 100644
--- a/aom_dsp/simd/v64_intrinsics.h
+++ b/aom_dsp/simd/v64_intrinsics.h
@@ -60,9 +60,7 @@
c_v64_store_aligned(p, a);
}
-SIMD_INLINE v64 v64_align(v64 a, v64 b, const unsigned int c) {
- return c_v64_align(a, b, c);
-}
+SIMD_INLINE v64 v64_align(v64 a, v64 b, c) { return c_v64_align(a, b, c); }
SIMD_INLINE v64 v64_zero() { return c_v64_zero(); }
SIMD_INLINE v64 v64_dup_8(uint8_t x) { return c_v64_dup_8(x); }
@@ -188,37 +186,37 @@
SIMD_INLINE v64 v64_shr_s32(v64 a, unsigned int n) {
return c_v64_shr_s32(a, n);
}
-SIMD_INLINE v64 v64_shr_n_byte(v64 a, const unsigned int n) {
+SIMD_INLINE v64 v64_shr_n_byte(v64 a, unsigned int n) {
return c_v64_shr_n_byte(a, n);
}
-SIMD_INLINE v64 v64_shl_n_byte(v64 a, const unsigned int n) {
+SIMD_INLINE v64 v64_shl_n_byte(v64 a, unsigned int n) {
return c_v64_shl_n_byte(a, n);
}
-SIMD_INLINE v64 v64_shl_n_8(v64 a, const unsigned int c) {
+SIMD_INLINE v64 v64_shl_n_8(v64 a, unsigned int c) {
return c_v64_shl_n_8(a, c);
}
-SIMD_INLINE v64 v64_shr_n_u8(v64 a, const unsigned int c) {
+SIMD_INLINE v64 v64_shr_n_u8(v64 a, unsigned int c) {
return c_v64_shr_n_u8(a, c);
}
-SIMD_INLINE v64 v64_shr_n_s8(v64 a, const unsigned int c) {
+SIMD_INLINE v64 v64_shr_n_s8(v64 a, unsigned int c) {
return c_v64_shr_n_s8(a, c);
}
-SIMD_INLINE v64 v64_shl_n_16(v64 a, const unsigned int c) {
+SIMD_INLINE v64 v64_shl_n_16(v64 a, unsigned int c) {
return c_v64_shl_n_16(a, c);
}
-SIMD_INLINE v64 v64_shr_n_u16(v64 a, const unsigned int c) {
+SIMD_INLINE v64 v64_shr_n_u16(v64 a, unsigned int c) {
return c_v64_shr_n_u16(a, c);
}
-SIMD_INLINE v64 v64_shr_n_s16(v64 a, const unsigned int c) {
+SIMD_INLINE v64 v64_shr_n_s16(v64 a, unsigned int c) {
return c_v64_shr_n_s16(a, c);
}
-SIMD_INLINE v64 v64_shl_n_32(v64 a, const unsigned int c) {
+SIMD_INLINE v64 v64_shl_n_32(v64 a, unsigned int c) {
return c_v64_shl_n_32(a, c);
}
-SIMD_INLINE v64 v64_shr_n_u32(v64 a, const unsigned int c) {
+SIMD_INLINE v64 v64_shr_n_u32(v64 a, unsigned int c) {
return c_v64_shr_n_u32(a, c);
}
-SIMD_INLINE v64 v64_shr_n_s32(v64 a, const unsigned int c) {
+SIMD_INLINE v64 v64_shr_n_s32(v64 a, unsigned int c) {
return c_v64_shr_n_s32(a, c);
}
diff --git a/aom_dsp/simd/v64_intrinsics_arm.h b/aom_dsp/simd/v64_intrinsics_arm.h
index ff00d98..c7574ee 100644
--- a/aom_dsp/simd/v64_intrinsics_arm.h
+++ b/aom_dsp/simd/v64_intrinsics_arm.h
@@ -95,7 +95,7 @@
// The following function requires an immediate.
// Some compilers will check this if it's optimising, others wont.
-SIMD_INLINE v64 v64_align(v64 a, v64 b, const unsigned int c) {
+SIMD_INLINE v64 v64_align(v64 a, v64 b, unsigned int c) {
#if defined(__OPTIMIZE__) && __OPTIMIZE__ && !defined(__clang__)
return c ? vreinterpret_s64_s8(
vext_s8(vreinterpret_s8_s64(b), vreinterpret_s8_s64(a), c))
@@ -498,93 +498,83 @@
// Some compilers will check this during optimisation, others wont.
#if defined(__OPTIMIZE__) && __OPTIMIZE__ && !defined(__clang__)
-SIMD_INLINE v64 v64_shl_n_byte(v64 a, const unsigned int c) {
+SIMD_INLINE v64 v64_shl_n_byte(v64 a, unsigned int c) {
return vshl_n_s64(a, c * 8);
}
-SIMD_INLINE v64 v64_shr_n_byte(v64 a, const unsigned int c) {
+SIMD_INLINE v64 v64_shr_n_byte(v64 a, unsigned int c) {
return c ? (v64)vshr_n_u64(vreinterpret_u64_s64(a), c * 8) : a;
}
-SIMD_INLINE v64 v64_shl_n_8(v64 a, const unsigned int c) {
+SIMD_INLINE v64 v64_shl_n_8(v64 a, unsigned int c) {
return vreinterpret_s64_u8(vshl_n_u8(vreinterpret_u8_s64(a), c));
}
-SIMD_INLINE v64 v64_shr_n_u8(v64 a, const unsigned int c) {
+SIMD_INLINE v64 v64_shr_n_u8(v64 a, unsigned int c) {
return vreinterpret_s64_u8(vshr_n_u8(vreinterpret_u8_s64(a), c));
}
-SIMD_INLINE v64 v64_shr_n_s8(v64 a, const unsigned int c) {
+SIMD_INLINE v64 v64_shr_n_s8(v64 a, unsigned int c) {
return vreinterpret_s64_s8(vshr_n_s8(vreinterpret_s8_s64(a), c));
}
-SIMD_INLINE v64 v64_shl_n_16(v64 a, const unsigned int c) {
+SIMD_INLINE v64 v64_shl_n_16(v64 a, unsigned int c) {
return vreinterpret_s64_u16(vshl_n_u16(vreinterpret_u16_s64(a), c));
}
-SIMD_INLINE v64 v64_shr_n_u16(v64 a, const unsigned int c) {
+SIMD_INLINE v64 v64_shr_n_u16(v64 a, unsigned int c) {
return vreinterpret_s64_u16(vshr_n_u16(vreinterpret_u16_s64(a), c));
}
-SIMD_INLINE v64 v64_shr_n_s16(v64 a, const unsigned int c) {
+SIMD_INLINE v64 v64_shr_n_s16(v64 a, unsigned int c) {
return vreinterpret_s64_s16(vshr_n_s16(vreinterpret_s16_s64(a), c));
}
-SIMD_INLINE v64 v64_shl_n_32(v64 a, const unsigned int c) {
+SIMD_INLINE v64 v64_shl_n_32(v64 a, unsigned int c) {
return vreinterpret_s64_u32(vshl_n_u32(vreinterpret_u32_s64(a), c));
}
-SIMD_INLINE v64 v64_shr_n_u32(v64 a, const unsigned int c) {
+SIMD_INLINE v64 v64_shr_n_u32(v64 a, unsigned int c) {
return vreinterpret_s64_u32(vshr_n_u32(vreinterpret_u32_s64(a), c));
}
-SIMD_INLINE v64 v64_shr_n_s32(v64 a, const unsigned int c) {
+SIMD_INLINE v64 v64_shr_n_s32(v64 a, unsigned int c) {
return vreinterpret_s64_s32(vshr_n_s32(vreinterpret_s32_s64(a), c));
}
#else
-SIMD_INLINE v64 v64_shl_n_byte(v64 a, const unsigned int c) {
+SIMD_INLINE v64 v64_shl_n_byte(v64 a, unsigned int c) {
return v64_from_64(v64_u64(a) << c * 8);
}
-SIMD_INLINE v64 v64_shr_n_byte(v64 a, const unsigned int c) {
+SIMD_INLINE v64 v64_shr_n_byte(v64 a, unsigned int c) {
return v64_from_64(v64_u64(a) >> c * 8);
}
-SIMD_INLINE v64 v64_shl_n_8(v64 a, const unsigned int c) {
- return v64_shl_8(a, c);
-}
+SIMD_INLINE v64 v64_shl_n_8(v64 a, unsigned int c) { return v64_shl_8(a, c); }
-SIMD_INLINE v64 v64_shr_n_u8(v64 a, const unsigned int c) {
- return v64_shr_u8(a, c);
-}
+SIMD_INLINE v64 v64_shr_n_u8(v64 a, unsigned int c) { return v64_shr_u8(a, c); }
-SIMD_INLINE v64 v64_shr_n_s8(v64 a, const unsigned int c) {
- return v64_shr_s8(a, c);
-}
+SIMD_INLINE v64 v64_shr_n_s8(v64 a, unsigned int c) { return v64_shr_s8(a, c); }
-SIMD_INLINE v64 v64_shl_n_16(v64 a, const unsigned int c) {
- return v64_shl_16(a, c);
-}
+SIMD_INLINE v64 v64_shl_n_16(v64 a, unsigned int c) { return v64_shl_16(a, c); }
-SIMD_INLINE v64 v64_shr_n_u16(v64 a, const unsigned int c) {
+SIMD_INLINE v64 v64_shr_n_u16(v64 a, unsigned int c) {
return v64_shr_u16(a, c);
}
-SIMD_INLINE v64 v64_shr_n_s16(v64 a, const unsigned int c) {
+SIMD_INLINE v64 v64_shr_n_s16(v64 a, unsigned int c) {
return v64_shr_s16(a, c);
}
-SIMD_INLINE v64 v64_shl_n_32(v64 a, const unsigned int c) {
- return v64_shl_32(a, c);
-}
+SIMD_INLINE v64 v64_shl_n_32(v64 a, unsigned int c) { return v64_shl_32(a, c); }
-SIMD_INLINE v64 v64_shr_n_u32(v64 a, const unsigned int c) {
+SIMD_INLINE v64 v64_shr_n_u32(v64 a, unsigned int c) {
return v64_shr_u32(a, c);
}
-SIMD_INLINE v64 v64_shr_n_s32(v64 a, const unsigned int c) {
+SIMD_INLINE v64 v64_shr_n_s32(v64 a, unsigned int c) {
return v64_shr_s32(a, c);
}
diff --git a/aom_dsp/simd/v64_intrinsics_c.h b/aom_dsp/simd/v64_intrinsics_c.h
index 3633a02..5032238 100644
--- a/aom_dsp/simd/v64_intrinsics_c.h
+++ b/aom_dsp/simd/v64_intrinsics_c.h
@@ -860,19 +860,19 @@
return t;
}
-SIMD_INLINE c_v64 c_v64_shr_n_byte(c_v64 x, const unsigned int i) {
+SIMD_INLINE c_v64 c_v64_shr_n_byte(c_v64 x, unsigned int i) {
c_v64 t;
t.u64 = x.u64 >> i * 8;
return t;
}
-SIMD_INLINE c_v64 c_v64_shl_n_byte(c_v64 x, const unsigned int i) {
+SIMD_INLINE c_v64 c_v64_shl_n_byte(c_v64 x, unsigned int i) {
c_v64 t;
t.u64 = x.u64 << i * 8;
return t;
}
-SIMD_INLINE c_v64 c_v64_align(c_v64 a, c_v64 b, const unsigned int c) {
+SIMD_INLINE c_v64 c_v64_align(c_v64 a, c_v64 b, unsigned int c) {
if (SIMD_CHECK && c > 7) {
fprintf(stderr, "Error: undefined alignment %d\n", c);
abort();
@@ -880,39 +880,39 @@
return c ? c_v64_or(c_v64_shr_n_byte(b, c), c_v64_shl_n_byte(a, 8 - c)) : b;
}
-SIMD_INLINE c_v64 c_v64_shl_n_8(c_v64 a, const unsigned int c) {
+SIMD_INLINE c_v64 c_v64_shl_n_8(c_v64 a, unsigned int c) {
return c_v64_shl_8(a, c);
}
-SIMD_INLINE c_v64 c_v64_shr_n_u8(c_v64 a, const unsigned int c) {
+SIMD_INLINE c_v64 c_v64_shr_n_u8(c_v64 a, unsigned int c) {
return c_v64_shr_u8(a, c);
}
-SIMD_INLINE c_v64 c_v64_shr_n_s8(c_v64 a, const unsigned int c) {
+SIMD_INLINE c_v64 c_v64_shr_n_s8(c_v64 a, unsigned int c) {
return c_v64_shr_s8(a, c);
}
-SIMD_INLINE c_v64 c_v64_shl_n_16(c_v64 a, const unsigned int c) {
+SIMD_INLINE c_v64 c_v64_shl_n_16(c_v64 a, unsigned int c) {
return c_v64_shl_16(a, c);
}
-SIMD_INLINE c_v64 c_v64_shr_n_u16(c_v64 a, const unsigned int c) {
+SIMD_INLINE c_v64 c_v64_shr_n_u16(c_v64 a, unsigned int c) {
return c_v64_shr_u16(a, c);
}
-SIMD_INLINE c_v64 c_v64_shr_n_s16(c_v64 a, const unsigned int c) {
+SIMD_INLINE c_v64 c_v64_shr_n_s16(c_v64 a, unsigned int c) {
return c_v64_shr_s16(a, c);
}
-SIMD_INLINE c_v64 c_v64_shl_n_32(c_v64 a, const unsigned int c) {
+SIMD_INLINE c_v64 c_v64_shl_n_32(c_v64 a, unsigned int c) {
return c_v64_shl_32(a, c);
}
-SIMD_INLINE c_v64 c_v64_shr_n_u32(c_v64 a, const unsigned int c) {
+SIMD_INLINE c_v64 c_v64_shr_n_u32(c_v64 a, unsigned int c) {
return c_v64_shr_u32(a, c);
}
-SIMD_INLINE c_v64 c_v64_shr_n_s32(c_v64 a, const unsigned int c) {
+SIMD_INLINE c_v64 c_v64_shr_n_s32(c_v64 a, unsigned int c) {
return c_v64_shr_s32(a, c);
}
diff --git a/test/convolve_test.cc b/test/convolve_test.cc
index d7e7779..760b469 100644
--- a/test/convolve_test.cc
+++ b/test/convolve_test.cc
@@ -93,7 +93,7 @@
#define AV1_FILTER_SHIFT 7
uint8_t clip_pixel(int x) { return x < 0 ? 0 : x > 255 ? 255 : x; }
-void filter_block2d_8_c(const uint8_t *src_ptr, const unsigned int src_stride,
+void filter_block2d_8_c(const uint8_t *src_ptr, unsigned int src_stride,
const int16_t *HFilter, const int16_t *VFilter,
uint8_t *dst_ptr, unsigned int dst_stride,
unsigned int output_width, unsigned int output_height) {
@@ -278,10 +278,9 @@
}
void highbd_filter_average_block2d_8_c(
- const uint16_t *src_ptr, const unsigned int src_stride,
- const int16_t *HFilter, const int16_t *VFilter, uint16_t *dst_ptr,
- unsigned int dst_stride, unsigned int output_width,
- unsigned int output_height, int bd) {
+ const uint16_t *src_ptr, unsigned int src_stride, const int16_t *HFilter,
+ const int16_t *VFilter, uint16_t *dst_ptr, unsigned int dst_stride,
+ unsigned int output_width, unsigned int output_height, int bd) {
uint16_t tmp[kMaxDimension * kMaxDimension];
assert(output_width <= kMaxDimension);
@@ -474,10 +473,9 @@
}
void wrapper_filter_average_block2d_8_c(
- const uint8_t *src_ptr, const unsigned int src_stride,
- const int16_t *HFilter, const int16_t *VFilter, uint8_t *dst_ptr,
- unsigned int dst_stride, unsigned int output_width,
- unsigned int output_height) {
+ const uint8_t *src_ptr, unsigned int src_stride, const int16_t *HFilter,
+ const int16_t *VFilter, uint8_t *dst_ptr, unsigned int dst_stride,
+ unsigned int output_width, unsigned int output_height) {
#if CONFIG_HIGHBITDEPTH
if (UUT_->use_highbd_ == 0) {
filter_average_block2d_8_c(src_ptr, src_stride, HFilter, VFilter, dst_ptr,
@@ -494,13 +492,10 @@
#endif
}
- void wrapper_filter_block2d_8_c(const uint8_t *src_ptr,
- const unsigned int src_stride,
- const int16_t *HFilter,
- const int16_t *VFilter, uint8_t *dst_ptr,
- unsigned int dst_stride,
- unsigned int output_width,
- unsigned int output_height) {
+ void wrapper_filter_block2d_8_c(
+ const uint8_t *src_ptr, unsigned int src_stride, const int16_t *HFilter,
+ const int16_t *VFilter, uint8_t *dst_ptr, unsigned int dst_stride,
+ unsigned int output_width, unsigned int output_height) {
#if CONFIG_HIGHBITDEPTH
if (UUT_->use_highbd_ == 0) {
filter_block2d_8_c(src_ptr, src_stride, HFilter, VFilter, dst_ptr,
diff --git a/test/resize_test.cc b/test/resize_test.cc
index 9932e99..994b301 100644
--- a/test/resize_test.cc
+++ b/test/resize_test.cc
@@ -24,12 +24,12 @@
namespace {
#if WRITE_COMPRESSED_STREAM
-static void mem_put_le16(char *const mem, const unsigned int val) {
+static void mem_put_le16(char *const mem, unsigned int val) {
mem[0] = val;
mem[1] = val >> 8;
}
-static void mem_put_le32(char *const mem, const unsigned int val) {
+static void mem_put_le32(char *const mem, unsigned int val) {
mem[0] = val;
mem[1] = val >> 8;
mem[2] = val >> 16;