x86,cosmetics: remove unneeded casts w/xx_storel_32()

the destination parameter is a void*

Change-Id: Ib76b26438693bb43258358243db1559052d0d232
diff --git a/aom_dsp/x86/highbd_convolve_avx2.c b/aom_dsp/x86/highbd_convolve_avx2.c
index fdf9524..8361e2f 100644
--- a/aom_dsp/x86/highbd_convolve_avx2.c
+++ b/aom_dsp/x86/highbd_convolve_avx2.c
@@ -165,9 +165,9 @@
           res_a_round = _mm256_min_epi16(res_a_round, clip_pixel);
           res_a_round = _mm256_max_epi16(res_a_round, zero);
 
-          xx_storel_32((__m128i *)&dst[i * dst_stride + j],
+          xx_storel_32(&dst[i * dst_stride + j],
                        _mm256_castsi256_si128(res_a_round));
-          xx_storel_32((__m128i *)&dst[i * dst_stride + j + dst_stride],
+          xx_storel_32(&dst[i * dst_stride + j + dst_stride],
                        _mm256_extracti128_si256(res_a_round, 1));
         }
 
@@ -275,9 +275,8 @@
         _mm_storel_epi64((__m128i *)&dst[i * dst_stride + j + dst_stride],
                          _mm256_extracti128_si256(res, 1));
       } else {
-        xx_storel_32((__m128i *)&dst[i * dst_stride + j],
-                     _mm256_castsi256_si128(res));
-        xx_storel_32((__m128i *)&dst[i * dst_stride + j + dst_stride],
+        xx_storel_32(&dst[i * dst_stride + j], _mm256_castsi256_si128(res));
+        xx_storel_32(&dst[i * dst_stride + j + dst_stride],
                      _mm256_extracti128_si256(res, 1));
       }
     }
diff --git a/aom_dsp/x86/masked_variance_intrin_ssse3.c b/aom_dsp/x86/masked_variance_intrin_ssse3.c
index 6939aa4..0bf383f 100644
--- a/aom_dsp/x86/masked_variance_intrin_ssse3.c
+++ b/aom_dsp/x86/masked_variance_intrin_ssse3.c
@@ -312,7 +312,7 @@
     uint8_t *b = dst;
     for (i = 0; i < h + 1; ++i) {
       __m128i x = xx_loadl_32((__m128i *)src);
-      xx_storel_32((__m128i *)b, x);
+      xx_storel_32(b, x);
       src += src_stride;
       b += 4;
     }
@@ -321,7 +321,7 @@
     for (i = 0; i < h + 1; ++i) {
       __m128i x = _mm_loadl_epi64((__m128i *)src);
       __m128i z = _mm_srli_si128(x, 1);
-      xx_storel_32((__m128i *)b, _mm_avg_epu8(x, z));
+      xx_storel_32(b, _mm_avg_epu8(x, z));
       src += src_stride;
       b += 4;
     }
@@ -357,7 +357,7 @@
     v0 = _mm_maddubs_epi16(v0, hfilter_vec);
     v0 = xx_roundn_epu16(v0, FILTER_BITS);
 
-    xx_storel_32((__m128i *)b, _mm_packus_epi16(v0, v0));
+    xx_storel_32(b, _mm_packus_epi16(v0, v0));
   }
 
   // Vertical filter
@@ -367,7 +367,7 @@
     for (i = 0; i < h; ++i) {
       __m128i x = xx_loadl_32((__m128i *)dst);
       __m128i y = xx_loadl_32((__m128i *)&dst[4]);
-      xx_storel_32((__m128i *)dst, _mm_avg_epu8(x, y));
+      xx_storel_32(dst, _mm_avg_epu8(x, y));
       dst += 4;
     }
   } else {
diff --git a/av1/common/x86/highbd_convolve_2d_avx2.c b/av1/common/x86/highbd_convolve_2d_avx2.c
index 429f8f1..de850ee 100644
--- a/av1/common/x86/highbd_convolve_2d_avx2.c
+++ b/av1/common/x86/highbd_convolve_2d_avx2.c
@@ -181,9 +181,9 @@
           res_a_round = _mm256_min_epi16(res_a_round, clip_pixel);
           res_a_round = _mm256_max_epi16(res_a_round, zero);
 
-          xx_storel_32((__m128i *)&dst[i * dst_stride + j],
+          xx_storel_32(&dst[i * dst_stride + j],
                        _mm256_castsi256_si128(res_a_round));
-          xx_storel_32((__m128i *)&dst[i * dst_stride + j + dst_stride],
+          xx_storel_32(&dst[i * dst_stride + j + dst_stride],
                        _mm256_extracti128_si256(res_a_round, 1));
         }