Merge "Port convolve test refactor to master."
diff --git a/vpx_dsp/bitreader.c b/vpx_dsp/bitreader.c
index 6ad806a..8140e78 100644
--- a/vpx_dsp/bitreader.c
+++ b/vpx_dsp/bitreader.c
@@ -69,7 +69,7 @@
buffer += (bits >> 3);
value = r->value | (nv << (shift & 0x7));
} else {
- const int bits_over = (int)(shift + CHAR_BIT - bits_left);
+ const int bits_over = (int)(shift + CHAR_BIT - (int)bits_left);
int loop_end = 0;
if (bits_over >= 0) {
count += LOTS_OF_BITS;
diff --git a/vpx_dsp/x86/variance_sse2.c b/vpx_dsp/x86/variance_sse2.c
index e6c9365..43f4603 100644
--- a/vpx_dsp/x86/variance_sse2.c
+++ b/vpx_dsp/x86/variance_sse2.c
@@ -171,7 +171,7 @@
unsigned int *sse) {
int sum;
get4x4var_sse2(src, src_stride, ref, ref_stride, sse, &sum);
- return *sse - (((unsigned int)sum * sum) >> 4);
+ return *sse - ((sum * sum) >> 4);
}
unsigned int vpx_variance8x4_sse2(const uint8_t *src, int src_stride,
@@ -180,7 +180,7 @@
int sum;
variance_sse2(src, src_stride, ref, ref_stride, 8, 4,
sse, &sum, get4x4var_sse2, 4);
- return *sse - (((unsigned int)sum * sum) >> 5);
+ return *sse - ((sum * sum) >> 5);
}
unsigned int vpx_variance4x8_sse2(const uint8_t *src, int src_stride,
@@ -189,7 +189,7 @@
int sum;
variance_sse2(src, src_stride, ref, ref_stride, 4, 8,
sse, &sum, get4x4var_sse2, 4);
- return *sse - (((unsigned int)sum * sum) >> 5);
+ return *sse - ((sum * sum) >> 5);
}
unsigned int vpx_variance8x8_sse2(const unsigned char *src, int src_stride,
@@ -197,7 +197,7 @@
unsigned int *sse) {
int sum;
vpx_get8x8var_sse2(src, src_stride, ref, ref_stride, sse, &sum);
- return *sse - (((unsigned int)sum * sum) >> 6);
+ return *sse - ((sum * sum) >> 6);
}
unsigned int vpx_variance16x8_sse2(const unsigned char *src, int src_stride,
@@ -206,7 +206,7 @@
int sum;
variance_sse2(src, src_stride, ref, ref_stride, 16, 8,
sse, &sum, vpx_get8x8var_sse2, 8);
- return *sse - (((unsigned int)sum * sum) >> 7);
+ return *sse - ((sum * sum) >> 7);
}
unsigned int vpx_variance8x16_sse2(const unsigned char *src, int src_stride,
@@ -215,7 +215,7 @@
int sum;
variance_sse2(src, src_stride, ref, ref_stride, 8, 16,
sse, &sum, vpx_get8x8var_sse2, 8);
- return *sse - (((unsigned int)sum * sum) >> 7);
+ return *sse - ((sum * sum) >> 7);
}
unsigned int vpx_variance16x16_sse2(const unsigned char *src, int src_stride,
@@ -223,7 +223,7 @@
unsigned int *sse) {
int sum;
vpx_get16x16var_sse2(src, src_stride, ref, ref_stride, sse, &sum);
- return *sse - (((unsigned int)sum * sum) >> 8);
+ return *sse - (((uint32_t)((int64_t)sum * sum)) >> 8);
}
unsigned int vpx_variance32x32_sse2(const uint8_t *src, int src_stride,
@@ -329,7 +329,7 @@
#undef DECLS
#undef DECL
-#define FN(w, h, wf, wlog2, hlog2, opt, cast) \
+#define FN(w, h, wf, wlog2, hlog2, opt, cast_prod, cast) \
unsigned int vpx_sub_pixel_variance##w##x##h##_##opt(const uint8_t *src, \
int src_stride, \
int x_offset, \
@@ -365,23 +365,23 @@
} \
} \
*sse_ptr = sse; \
- return sse - ((cast se * se) >> (wlog2 + hlog2)); \
+ return sse - (cast_prod (cast se * se) >> (wlog2 + hlog2)); \
}
#define FNS(opt1, opt2) \
-FN(64, 64, 16, 6, 6, opt1, (int64_t)); \
-FN(64, 32, 16, 6, 5, opt1, (int64_t)); \
-FN(32, 64, 16, 5, 6, opt1, (int64_t)); \
-FN(32, 32, 16, 5, 5, opt1, (int64_t)); \
-FN(32, 16, 16, 5, 4, opt1, (int64_t)); \
-FN(16, 32, 16, 4, 5, opt1, (int64_t)); \
-FN(16, 16, 16, 4, 4, opt1, (uint32_t)); \
-FN(16, 8, 16, 4, 3, opt1, (uint32_t)); \
-FN(8, 16, 8, 3, 4, opt1, (uint32_t)); \
-FN(8, 8, 8, 3, 3, opt1, (uint32_t)); \
-FN(8, 4, 8, 3, 2, opt1, (uint32_t)); \
-FN(4, 8, 4, 2, 3, opt2, (uint32_t)); \
-FN(4, 4, 4, 2, 2, opt2, (uint32_t))
+FN(64, 64, 16, 6, 6, opt1, (int64_t), (int64_t)); \
+FN(64, 32, 16, 6, 5, opt1, (int64_t), (int64_t)); \
+FN(32, 64, 16, 5, 6, opt1, (int64_t), (int64_t)); \
+FN(32, 32, 16, 5, 5, opt1, (int64_t), (int64_t)); \
+FN(32, 16, 16, 5, 4, opt1, (int64_t), (int64_t)); \
+FN(16, 32, 16, 4, 5, opt1, (int64_t), (int64_t)); \
+FN(16, 16, 16, 4, 4, opt1, (uint32_t), (int64_t)); \
+FN(16, 8, 16, 4, 3, opt1, (int32_t), (int32_t)); \
+FN(8, 16, 8, 3, 4, opt1, (int32_t), (int32_t)); \
+FN(8, 8, 8, 3, 3, opt1, (int32_t), (int32_t)); \
+FN(8, 4, 8, 3, 2, opt1, (int32_t), (int32_t)); \
+FN(4, 8, 4, 2, 3, opt2, (int32_t), (int32_t)); \
+FN(4, 4, 4, 2, 2, opt2, (int32_t), (int32_t))
FNS(sse2, sse);
FNS(ssse3, ssse3);
@@ -410,7 +410,7 @@
#undef DECL
#undef DECLS
-#define FN(w, h, wf, wlog2, hlog2, opt, cast) \
+#define FN(w, h, wf, wlog2, hlog2, opt, cast_prod, cast) \
unsigned int vpx_sub_pixel_avg_variance##w##x##h##_##opt(const uint8_t *src, \
int src_stride, \
int x_offset, \
@@ -451,23 +451,23 @@
} \
} \
*sseptr = sse; \
- return sse - ((cast se * se) >> (wlog2 + hlog2)); \
+ return sse - (cast_prod (cast se * se) >> (wlog2 + hlog2)); \
}
#define FNS(opt1, opt2) \
-FN(64, 64, 16, 6, 6, opt1, (int64_t)); \
-FN(64, 32, 16, 6, 5, opt1, (int64_t)); \
-FN(32, 64, 16, 5, 6, opt1, (int64_t)); \
-FN(32, 32, 16, 5, 5, opt1, (int64_t)); \
-FN(32, 16, 16, 5, 4, opt1, (int64_t)); \
-FN(16, 32, 16, 4, 5, opt1, (int64_t)); \
-FN(16, 16, 16, 4, 4, opt1, (uint32_t)); \
-FN(16, 8, 16, 4, 3, opt1, (uint32_t)); \
-FN(8, 16, 8, 3, 4, opt1, (uint32_t)); \
-FN(8, 8, 8, 3, 3, opt1, (uint32_t)); \
-FN(8, 4, 8, 3, 2, opt1, (uint32_t)); \
-FN(4, 8, 4, 2, 3, opt2, (uint32_t)); \
-FN(4, 4, 4, 2, 2, opt2, (uint32_t))
+FN(64, 64, 16, 6, 6, opt1, (int64_t), (int64_t)); \
+FN(64, 32, 16, 6, 5, opt1, (int64_t), (int64_t)); \
+FN(32, 64, 16, 5, 6, opt1, (int64_t), (int64_t)); \
+FN(32, 32, 16, 5, 5, opt1, (int64_t), (int64_t)); \
+FN(32, 16, 16, 5, 4, opt1, (int64_t), (int64_t)); \
+FN(16, 32, 16, 4, 5, opt1, (int64_t), (int64_t)); \
+FN(16, 16, 16, 4, 4, opt1, (uint32_t), (int64_t)); \
+FN(16, 8, 16, 4, 3, opt1, (uint32_t), (int32_t)); \
+FN(8, 16, 8, 3, 4, opt1, (uint32_t), (int32_t)); \
+FN(8, 8, 8, 3, 3, opt1, (uint32_t), (int32_t)); \
+FN(8, 4, 8, 3, 2, opt1, (uint32_t), (int32_t)); \
+FN(4, 8, 4, 2, 3, opt2, (uint32_t), (int32_t)); \
+FN(4, 4, 4, 2, 2, opt2, (uint32_t), (int32_t))
FNS(sse2, sse);
FNS(ssse3, ssse3);