Fix MSVS build issues

Change-Id: Ifaf408e458c7201129222251472f282808819420
diff --git a/av1/encoder/x86/temporal_filter_avx2.c b/av1/encoder/x86/temporal_filter_avx2.c
index 3838039..c5f2ae9 100644
--- a/av1/encoder/x86/temporal_filter_avx2.c
+++ b/av1/encoder/x86/temporal_filter_avx2.c
@@ -19,19 +19,19 @@
 #define SSE_STRIDE (BW + 2)
 
 #if EXPERIMENT_TEMPORAL_FILTER
-DECLARE_ALIGNED(32, const uint32_t, sse_bytemask[4][8]) = {
+DECLARE_ALIGNED(32, static const uint32_t, sse_bytemask[4][8]) = {
   { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0x0000 },
   { 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000 },
   { 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000 },
   { 0x0000, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }
 };
 
-DECLARE_ALIGNED(32, const uint8_t, shufflemask_16b[2][16]) = {
+DECLARE_ALIGNED(32, static const uint8_t, shufflemask_16b[2][16]) = {
   { 0, 1, 0, 1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 },
   { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 10, 11, 10, 11 }
 };
 
-AOM_FORCE_INLINE void get_squared_error_16x16_avx2(
+static AOM_FORCE_INLINE void get_squared_error_16x16_avx2(
     uint8_t *frame1, unsigned int stride, uint8_t *frame2, unsigned int stride2,
     int block_width, int block_height, uint16_t *frame_sse,
     unsigned int sse_stride) {
@@ -59,7 +59,7 @@
   }
 }
 
-AOM_FORCE_INLINE void get_squared_error_32x32_avx2(
+static AOM_FORCE_INLINE void get_squared_error_32x32_avx2(
     uint8_t *frame1, unsigned int stride, uint8_t *frame2, unsigned int stride2,
     int block_width, int block_height, uint16_t *frame_sse,
     unsigned int sse_stride) {
@@ -94,8 +94,8 @@
   }
 }
 
-AOM_FORCE_INLINE __m256i xx_load_and_pad(uint16_t *src, int col,
-                                         int block_width) {
+static AOM_FORCE_INLINE __m256i xx_load_and_pad(uint16_t *src, int col,
+                                                int block_width) {
   __m128i v128tmp = _mm_loadu_si128((__m128i *)(src));
   if (col == 0) {
     // For the first column, replicate the first element twice to the left
@@ -108,7 +108,7 @@
   return _mm256_cvtepi16_epi32(v128tmp);
 }
 
-AOM_FORCE_INLINE int32_t xx_mask_and_hadd(__m256i vsum, int i) {
+static AOM_FORCE_INLINE int32_t xx_mask_and_hadd(__m256i vsum, int i) {
   // Mask the required 5 values inside the vector
   __m256i vtmp = _mm256_and_si256(vsum, *(__m256i *)sse_bytemask[i]);
   __m128i v128a, v128b;
diff --git a/av1/encoder/x86/temporal_filter_sse2.c b/av1/encoder/x86/temporal_filter_sse2.c
index b9ef6d5..c65e77e 100644
--- a/av1/encoder/x86/temporal_filter_sse2.c
+++ b/av1/encoder/x86/temporal_filter_sse2.c
@@ -21,16 +21,17 @@
 
 #if EXPERIMENT_TEMPORAL_FILTER
 
-DECLARE_ALIGNED(32, const uint32_t, sse_bytemask_2x4[4][2][4]) = {
+DECLARE_ALIGNED(32, static const uint32_t, sse_bytemask_2x4[4][2][4]) = {
   { { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, { 0xFFFF, 0x0000, 0x0000, 0x0000 } },
   { { 0x0000, 0xFFFF, 0xFFFF, 0xFFFF }, { 0xFFFF, 0xFFFF, 0x0000, 0x0000 } },
   { { 0x0000, 0x0000, 0xFFFF, 0xFFFF }, { 0xFFFF, 0xFFFF, 0xFFFF, 0x0000 } },
   { { 0x0000, 0x0000, 0x0000, 0xFFFF }, { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF } }
 };
 
-void get_squared_error(uint8_t *frame1, unsigned int stride, uint8_t *frame2,
-                       unsigned int stride2, int block_width, int block_height,
-                       uint16_t *frame_sse, unsigned int dst_stride) {
+static void get_squared_error(uint8_t *frame1, unsigned int stride,
+                              uint8_t *frame2, unsigned int stride2,
+                              int block_width, int block_height,
+                              uint16_t *frame_sse, unsigned int dst_stride) {
   uint8_t *src1 = frame1;
   uint8_t *src2 = frame2;
   uint16_t *dst = frame_sse;
@@ -68,7 +69,8 @@
   }
 }
 
-void xx_load_and_pad(uint16_t *src, __m128i *dstvec, int col, int block_width) {
+static void xx_load_and_pad(uint16_t *src, __m128i *dstvec, int col,
+                            int block_width) {
   __m128i vtmp = _mm_loadu_si128((__m128i *)src);
   __m128i vzero = _mm_setzero_si128();
   __m128i vtmp1 = _mm_unpacklo_epi16(vtmp, vzero);
@@ -79,7 +81,7 @@
   dstvec[1] = (col < block_width - 4) ? vtmp2 : _mm_shuffle_epi32(vtmp2, 0x54);
 }
 
-int32_t xx_mask_and_hadd(__m128i vsum1, __m128i vsum2, int i) {
+static int32_t xx_mask_and_hadd(__m128i vsum1, __m128i vsum2, int i) {
   __m128i veca, vecb;
   // Mask and obtain the required 5 values inside the vector
   veca = _mm_and_si128(vsum1, *(__m128i *)sse_bytemask_2x4[i][0]);