Change calcGCD() to use the int64_t type If `a` is of the int32_t type and equal to INT_MIN, then `-a` cannot be represented in int32_t (assuming two's complement). To allow us to negate INT_MIN, use the int64_t type instead. Fix https://crbug.com/oss-fuzz/48135.
diff --git a/src/avif.c b/src/avif.c index 739066c..5ee6ea6 100644 --- a/src/avif.c +++ b/src/avif.c
@@ -456,7 +456,9 @@ return f; } -static int32_t calcGCD(int32_t a, int32_t b) +// |a| and |b| hold int32_t values. The int64_t type is used so that we can negate INT_MIN without +// overflowing int32_t. +static int64_t calcGCD(int64_t a, int64_t b) { if (a < 0) { a *= -1; @@ -466,7 +468,7 @@ } while (a > 0) { if (a < b) { - int32_t t = a; + int64_t t = a; a = b; b = t; } @@ -477,7 +479,7 @@ static void clapFractionSimplify(clapFraction * f) { - int32_t gcd = calcGCD(f->n, f->d); + int64_t gcd = calcGCD(f->n, f->d); if (gcd > 1) { f->n /= gcd; f->d /= gcd;