Adjust .clang-format, re-run
diff --git a/src/avif.c b/src/avif.c
index 461a7a5..18dc775 100644
--- a/src/avif.c
+++ b/src/avif.c
@@ -17,10 +17,14 @@
 const char * avifPixelFormatToString(avifPixelFormat format)
 {
     switch (format) {
-        case AVIF_PIXEL_FORMAT_YUV444: return "YUV444";
-        case AVIF_PIXEL_FORMAT_YUV420: return "YUV420";
-        case AVIF_PIXEL_FORMAT_YUV422: return "YUV422";
-        case AVIF_PIXEL_FORMAT_YV12: return "YV12";
+        case AVIF_PIXEL_FORMAT_YUV444:
+            return "YUV444";
+        case AVIF_PIXEL_FORMAT_YUV420:
+            return "YUV420";
+        case AVIF_PIXEL_FORMAT_YUV422:
+            return "YUV422";
+        case AVIF_PIXEL_FORMAT_YV12:
+            return "YV12";
         case AVIF_PIXEL_FORMAT_NONE:
         default:
             break;
diff --git a/src/codec_aom.c b/src/codec_aom.c
index 40ffa58..019865c 100644
--- a/src/codec_aom.c
+++ b/src/codec_aom.c
@@ -54,7 +54,7 @@
     }
 
     aom_codec_iter_t iter = NULL;
-    codec->internal->images[planes] = aom_codec_get_frame(&codec->internal->decoders[planes], &iter); // It doesn't appear that I own this / need to free this
+    codec->internal->images[planes] = aom_codec_get_frame(&codec->internal->decoders[planes], &iter);
     return (codec->internal->images[planes]) ? AVIF_TRUE : AVIF_FALSE;
 }
 
@@ -228,10 +228,18 @@
             cfg.g_profile = 0;
         } else {
             switch (image->yuvFormat) {
-                case AVIF_PIXEL_FORMAT_YUV444: cfg.g_profile = 1; break;
-                case AVIF_PIXEL_FORMAT_YUV422: cfg.g_profile = 2; break;
-                case AVIF_PIXEL_FORMAT_YUV420: cfg.g_profile = 0; break;
-                case AVIF_PIXEL_FORMAT_YV12: cfg.g_profile = 0; break;
+                case AVIF_PIXEL_FORMAT_YUV444:
+                    cfg.g_profile = 1;
+                    break;
+                case AVIF_PIXEL_FORMAT_YUV422:
+                    cfg.g_profile = 2;
+                    break;
+                case AVIF_PIXEL_FORMAT_YUV420:
+                    cfg.g_profile = 0;
+                    break;
+                case AVIF_PIXEL_FORMAT_YV12:
+                    cfg.g_profile = 0;
+                    break;
                 case AVIF_PIXEL_FORMAT_NONE:
                 default:
                     break;
diff --git a/src/codec_dav1d.c b/src/codec_dav1d.c
index 64212e8..8916636 100644
--- a/src/codec_dav1d.c
+++ b/src/codec_dav1d.c
@@ -40,7 +40,9 @@
     Dav1dData dav1dData;
     uint8_t * dav1dDataPtr = dav1d_data_create(&dav1dData, obu->size);
     memcpy(dav1dDataPtr, obu->data, obu->size);
-    if (dav1d_send_data(codec->internal->dav1dContext[planes], &dav1dData) != 0) { // This could return DAV1D_ERR(EAGAIN) and not be a failure if we weren't sending the entire payload
+
+    if (dav1d_send_data(codec->internal->dav1dContext[planes], &dav1dData) != 0) {
+        // This could return DAV1D_ERR(EAGAIN) and not be a failure if we weren't sending the entire payload
         goto cleanup;
     }
 
@@ -66,7 +68,8 @@
 
 static avifBool dav1dCodecAlphaLimitedRange(avifCodec * codec)
 {
-    if (codec->internal->hasPicture[AVIF_CODEC_PLANES_ALPHA] && (codec->internal->colorRange[AVIF_CODEC_PLANES_ALPHA] == AVIF_RANGE_LIMITED)) {
+    if (codec->internal->hasPicture[AVIF_CODEC_PLANES_ALPHA] &&
+        (codec->internal->colorRange[AVIF_CODEC_PLANES_ALPHA] == AVIF_RANGE_LIMITED)) {
         return AVIF_TRUE;
     }
     return AVIF_FALSE;
diff --git a/src/colr.c b/src/colr.c
index ef29e17..15ff05e 100644
--- a/src/colr.c
+++ b/src/colr.c
@@ -48,14 +48,9 @@
 
 static avifBool primariesMatch(const float p1[8], const float p2[8])
 {
-    return matchesTo3RoundedPlaces(p1[0], p2[0]) &&
-           matchesTo3RoundedPlaces(p1[1], p2[1]) &&
-           matchesTo3RoundedPlaces(p1[2], p2[2]) &&
-           matchesTo3RoundedPlaces(p1[3], p2[3]) &&
-           matchesTo3RoundedPlaces(p1[4], p2[4]) &&
-           matchesTo3RoundedPlaces(p1[5], p2[5]) &&
-           matchesTo3RoundedPlaces(p1[6], p2[6]) &&
-           matchesTo3RoundedPlaces(p1[7], p2[7]);
+    return matchesTo3RoundedPlaces(p1[0], p2[0]) && matchesTo3RoundedPlaces(p1[1], p2[1]) &&
+           matchesTo3RoundedPlaces(p1[2], p2[2]) && matchesTo3RoundedPlaces(p1[3], p2[3]) && matchesTo3RoundedPlaces(p1[4], p2[4]) &&
+           matchesTo3RoundedPlaces(p1[5], p2[5]) && matchesTo3RoundedPlaces(p1[6], p2[6]) && matchesTo3RoundedPlaces(p1[7], p2[7]);
 }
 
 avifNclxColourPrimaries avifNclxColourPrimariesFind(float inPrimaries[8], const char ** outName)
diff --git a/src/read.c b/src/read.c
index 26d64f8..0612650 100644
--- a/src/read.c
+++ b/src/read.c
@@ -123,8 +123,10 @@
 
 static avifBool isAlphaURN(char * urn)
 {
-    if (!strcmp(urn, URN_ALPHA0)) return AVIF_TRUE;
-    if (!strcmp(urn, URN_ALPHA1)) return AVIF_TRUE;
+    if (!strcmp(urn, URN_ALPHA0))
+        return AVIF_TRUE;
+    if (!strcmp(urn, URN_ALPHA1))
+        return AVIF_TRUE;
     return AVIF_FALSE;
 }
 
@@ -215,10 +217,15 @@
         data->properties[propertyIndex].colr.icc = avifStreamCurrent(&s);
         data->properties[propertyIndex].colr.iccSize = avifStreamRemainingBytes(&s);
     } else if (!memcmp(colourType, "nclx", 4)) {
-        CHECK(avifStreamReadU16(&s, &data->properties[propertyIndex].colr.nclx.colourPrimaries));         // unsigned int(16) colour_primaries;
-        CHECK(avifStreamReadU16(&s, &data->properties[propertyIndex].colr.nclx.transferCharacteristics)); // unsigned int(16) transfer_characteristics;
-        CHECK(avifStreamReadU16(&s, &data->properties[propertyIndex].colr.nclx.matrixCoefficients));      // unsigned int(16) matrix_coefficients;
-        CHECK(avifStreamRead(&s, &data->properties[propertyIndex].colr.nclx.fullRangeFlag, 1));           // unsigned int(1) full_range_flag; unsigned int(7) reserved = 0;
+        // unsigned int(16) colour_primaries;
+        CHECK(avifStreamReadU16(&s, &data->properties[propertyIndex].colr.nclx.colourPrimaries));
+        // unsigned int(16) transfer_characteristics;
+        CHECK(avifStreamReadU16(&s, &data->properties[propertyIndex].colr.nclx.transferCharacteristics));
+        // unsigned int(16) matrix_coefficients;
+        CHECK(avifStreamReadU16(&s, &data->properties[propertyIndex].colr.nclx.matrixCoefficients));
+        // unsigned int(1) full_range_flag;
+        // unsigned int(7) reserved = 0;
+        CHECK(avifStreamRead(&s, &data->properties[propertyIndex].colr.nclx.fullRangeFlag, 1));
         data->properties[propertyIndex].colr.nclx.fullRangeFlag |= 0x80;
         data->properties[propertyIndex].colr.format = AVIF_PROFILE_FORMAT_NCLX;
     }
@@ -681,8 +688,10 @@
         }
     }
 
-    if ((colorOBUItem && colorOBUItem->ispePresent && ((colorOBUItem->ispe.width != colorPlanesSize.width) || (colorOBUItem->ispe.height != colorPlanesSize.height))) ||
-        (alphaOBUItem && alphaOBUItem->ispePresent && ((alphaOBUItem->ispe.width != alphaPlanesSize.width) || (alphaOBUItem->ispe.height != alphaPlanesSize.height)))) {
+    if ((colorOBUItem && colorOBUItem->ispePresent &&
+         ((colorOBUItem->ispe.width != colorPlanesSize.width) || (colorOBUItem->ispe.height != colorPlanesSize.height))) ||
+        (alphaOBUItem && alphaOBUItem->ispePresent &&
+         ((alphaOBUItem->ispe.width != alphaPlanesSize.width) || (alphaOBUItem->ispe.height != alphaPlanesSize.height)))) {
         avifCodecDestroy(codec);
         return AVIF_RESULT_ISPE_SIZE_MISMATCH;
     }
diff --git a/src/reformat.c b/src/reformat.c
index a45ce19..fbe99f3 100644
--- a/src/reformat.c
+++ b/src/reformat.c
@@ -71,6 +71,10 @@
     struct YUVBlock yuvBlock[2][2];
     float rgbPixel[3];
     float maxChannel = (float)((1 << image->depth) - 1);
+    uint8_t ** rgbPlanes = image->rgbPlanes;
+    uint32_t * rgbRowBytes = image->rgbRowBytes;
+    uint8_t ** yuvPlanes = image->yuvPlanes;
+    uint32_t * yuvRowBytes = image->yuvRowBytes;
     for (int outerJ = 0; outerJ < image->height; outerJ += 2) {
         for (int outerI = 0; outerI < image->width; outerI += 2) {
             int blockW = 2, blockH = 2;
@@ -89,13 +93,13 @@
 
                     // Unpack RGB into normalized float
                     if (state.usesU16) {
-                        rgbPixel[0] = *((uint16_t *)(&image->rgbPlanes[AVIF_CHAN_R][(i * 2) + (j * image->rgbRowBytes[AVIF_CHAN_R])])) / maxChannel;
-                        rgbPixel[1] = *((uint16_t *)(&image->rgbPlanes[AVIF_CHAN_G][(i * 2) + (j * image->rgbRowBytes[AVIF_CHAN_G])])) / maxChannel;
-                        rgbPixel[2] = *((uint16_t *)(&image->rgbPlanes[AVIF_CHAN_B][(i * 2) + (j * image->rgbRowBytes[AVIF_CHAN_B])])) / maxChannel;
+                        rgbPixel[0] = *((uint16_t *)(&rgbPlanes[AVIF_CHAN_R][(i * 2) + (j * rgbRowBytes[AVIF_CHAN_R])])) / maxChannel;
+                        rgbPixel[1] = *((uint16_t *)(&rgbPlanes[AVIF_CHAN_G][(i * 2) + (j * rgbRowBytes[AVIF_CHAN_G])])) / maxChannel;
+                        rgbPixel[2] = *((uint16_t *)(&rgbPlanes[AVIF_CHAN_B][(i * 2) + (j * rgbRowBytes[AVIF_CHAN_B])])) / maxChannel;
                     } else {
-                        rgbPixel[0] = image->rgbPlanes[AVIF_CHAN_R][i + (j * image->rgbRowBytes[AVIF_CHAN_R])] / maxChannel;
-                        rgbPixel[1] = image->rgbPlanes[AVIF_CHAN_G][i + (j * image->rgbRowBytes[AVIF_CHAN_G])] / maxChannel;
-                        rgbPixel[2] = image->rgbPlanes[AVIF_CHAN_B][i + (j * image->rgbRowBytes[AVIF_CHAN_B])] / maxChannel;
+                        rgbPixel[0] = rgbPlanes[AVIF_CHAN_R][i + (j * rgbRowBytes[AVIF_CHAN_R])] / maxChannel;
+                        rgbPixel[1] = rgbPlanes[AVIF_CHAN_G][i + (j * rgbRowBytes[AVIF_CHAN_G])] / maxChannel;
+                        rgbPixel[2] = rgbPlanes[AVIF_CHAN_B][i + (j * rgbRowBytes[AVIF_CHAN_B])] / maxChannel;
                     }
 
                     // RGB -> YUV conversion
@@ -105,21 +109,24 @@
                     yuvBlock[bI][bJ].v = (rgbPixel[0] - Y) / (2 * (1 - kr));
 
                     if (state.usesU16) {
-                        uint16_t * pY = (uint16_t *)&image->yuvPlanes[AVIF_CHAN_Y][(i * 2) + (j * image->yuvRowBytes[AVIF_CHAN_Y])];
+                        uint16_t * pY = (uint16_t *)&yuvPlanes[AVIF_CHAN_Y][(i * 2) + (j * yuvRowBytes[AVIF_CHAN_Y])];
                         *pY = (uint16_t)yuvToUNorm(AVIF_CHAN_Y, image->yuvRange, image->depth, maxChannel, yuvBlock[bI][bJ].y);
                         if (!state.formatInfo.chromaShiftX && !state.formatInfo.chromaShiftY) {
                             // YUV444, full chroma
-                            uint16_t * pU = (uint16_t *)&image->yuvPlanes[AVIF_CHAN_U][(i * 2) + (j * image->yuvRowBytes[AVIF_CHAN_U])];
+                            uint16_t * pU = (uint16_t *)&yuvPlanes[AVIF_CHAN_U][(i * 2) + (j * yuvRowBytes[AVIF_CHAN_U])];
                             *pU = (uint16_t)yuvToUNorm(AVIF_CHAN_U, image->yuvRange, image->depth, maxChannel, yuvBlock[bI][bJ].u);
-                            uint16_t * pV = (uint16_t *)&image->yuvPlanes[AVIF_CHAN_V][(i * 2) + (j * image->yuvRowBytes[AVIF_CHAN_V])];
+                            uint16_t * pV = (uint16_t *)&yuvPlanes[AVIF_CHAN_V][(i * 2) + (j * yuvRowBytes[AVIF_CHAN_V])];
                             *pV = (uint16_t)yuvToUNorm(AVIF_CHAN_V, image->yuvRange, image->depth, maxChannel, yuvBlock[bI][bJ].v);
                         }
                     } else {
-                        image->yuvPlanes[AVIF_CHAN_Y][i + (j * image->yuvRowBytes[AVIF_CHAN_Y])] = (uint8_t)yuvToUNorm(AVIF_CHAN_Y, image->yuvRange, image->depth, maxChannel, yuvBlock[bI][bJ].y);
+                        yuvPlanes[AVIF_CHAN_Y][i + (j * yuvRowBytes[AVIF_CHAN_Y])] =
+                            (uint8_t)yuvToUNorm(AVIF_CHAN_Y, image->yuvRange, image->depth, maxChannel, yuvBlock[bI][bJ].y);
                         if (!state.formatInfo.chromaShiftX && !state.formatInfo.chromaShiftY) {
                             // YUV444, full chroma
-                            image->yuvPlanes[AVIF_CHAN_U][i + (j * image->yuvRowBytes[AVIF_CHAN_U])] = (uint8_t)yuvToUNorm(AVIF_CHAN_U, image->yuvRange, image->depth, maxChannel, yuvBlock[bI][bJ].u);
-                            image->yuvPlanes[AVIF_CHAN_V][i + (j * image->yuvRowBytes[AVIF_CHAN_V])] = (uint8_t)yuvToUNorm(AVIF_CHAN_V, image->yuvRange, image->depth, maxChannel, yuvBlock[bI][bJ].v);
+                            yuvPlanes[AVIF_CHAN_U][i + (j * yuvRowBytes[AVIF_CHAN_U])] =
+                                (uint8_t)yuvToUNorm(AVIF_CHAN_U, image->yuvRange, image->depth, maxChannel, yuvBlock[bI][bJ].u);
+                            yuvPlanes[AVIF_CHAN_V][i + (j * yuvRowBytes[AVIF_CHAN_V])] =
+                                (uint8_t)yuvToUNorm(AVIF_CHAN_V, image->yuvRange, image->depth, maxChannel, yuvBlock[bI][bJ].v);
                         }
                     }
                 }
@@ -144,13 +151,15 @@
                 int uvI = outerI >> state.formatInfo.chromaShiftX;
                 int uvJ = outerJ >> state.formatInfo.chromaShiftY;
                 if (state.usesU16) {
-                    uint16_t * pU = (uint16_t *)&image->yuvPlanes[AVIF_CHAN_U][(uvI * 2) + (uvJ * image->yuvRowBytes[AVIF_CHAN_U])];
+                    uint16_t * pU = (uint16_t *)&yuvPlanes[AVIF_CHAN_U][(uvI * 2) + (uvJ * yuvRowBytes[AVIF_CHAN_U])];
                     *pU = (uint16_t)yuvToUNorm(AVIF_CHAN_U, image->yuvRange, image->depth, maxChannel, avgU);
-                    uint16_t * pV = (uint16_t *)&image->yuvPlanes[AVIF_CHAN_V][(uvI * 2) + (uvJ * image->yuvRowBytes[AVIF_CHAN_V])];
+                    uint16_t * pV = (uint16_t *)&yuvPlanes[AVIF_CHAN_V][(uvI * 2) + (uvJ * yuvRowBytes[AVIF_CHAN_V])];
                     *pV = (uint16_t)yuvToUNorm(AVIF_CHAN_V, image->yuvRange, image->depth, maxChannel, avgV);
                 } else {
-                    image->yuvPlanes[AVIF_CHAN_U][uvI + (uvJ * image->yuvRowBytes[AVIF_CHAN_U])] = (uint8_t)yuvToUNorm(AVIF_CHAN_U, image->yuvRange, image->depth, maxChannel, avgU);
-                    image->yuvPlanes[AVIF_CHAN_V][uvI + (uvJ * image->yuvRowBytes[AVIF_CHAN_V])] = (uint8_t)yuvToUNorm(AVIF_CHAN_V, image->yuvRange, image->depth, maxChannel, avgV);
+                    yuvPlanes[AVIF_CHAN_U][uvI + (uvJ * yuvRowBytes[AVIF_CHAN_U])] =
+                        (uint8_t)yuvToUNorm(AVIF_CHAN_U, image->yuvRange, image->depth, maxChannel, avgU);
+                    yuvPlanes[AVIF_CHAN_V][uvI + (uvJ * yuvRowBytes[AVIF_CHAN_V])] =
+                        (uint8_t)yuvToUNorm(AVIF_CHAN_V, image->yuvRange, image->depth, maxChannel, avgV);
                 }
             } else if (state.formatInfo.chromaShiftX && !state.formatInfo.chromaShiftY) {
                 // YUV422, average 2 samples (1x2), twice
@@ -169,13 +178,15 @@
                     int uvI = outerI >> state.formatInfo.chromaShiftX;
                     int uvJ = outerJ + bJ;
                     if (state.usesU16) {
-                        uint16_t * pU = (uint16_t *)&image->yuvPlanes[AVIF_CHAN_U][(uvI * 2) + (uvJ * image->yuvRowBytes[AVIF_CHAN_U])];
+                        uint16_t * pU = (uint16_t *)&yuvPlanes[AVIF_CHAN_U][(uvI * 2) + (uvJ * yuvRowBytes[AVIF_CHAN_U])];
                         *pU = (uint16_t)yuvToUNorm(AVIF_CHAN_U, image->yuvRange, image->depth, maxChannel, avgU);
-                        uint16_t * pV = (uint16_t *)&image->yuvPlanes[AVIF_CHAN_V][(uvI * 2) + (uvJ * image->yuvRowBytes[AVIF_CHAN_V])];
+                        uint16_t * pV = (uint16_t *)&yuvPlanes[AVIF_CHAN_V][(uvI * 2) + (uvJ * yuvRowBytes[AVIF_CHAN_V])];
                         *pV = (uint16_t)yuvToUNorm(AVIF_CHAN_V, image->yuvRange, image->depth, maxChannel, avgV);
                     } else {
-                        image->yuvPlanes[AVIF_CHAN_U][uvI + (uvJ * image->yuvRowBytes[AVIF_CHAN_U])] = (uint8_t)yuvToUNorm(AVIF_CHAN_U, image->yuvRange, image->depth, maxChannel, avgU);
-                        image->yuvPlanes[AVIF_CHAN_V][uvI + (uvJ * image->yuvRowBytes[AVIF_CHAN_V])] = (uint8_t)yuvToUNorm(AVIF_CHAN_V, image->yuvRange, image->depth, maxChannel, avgV);
+                        yuvPlanes[AVIF_CHAN_U][uvI + (uvJ * yuvRowBytes[AVIF_CHAN_U])] =
+                            (uint8_t)yuvToUNorm(AVIF_CHAN_U, image->yuvRange, image->depth, maxChannel, avgU);
+                        yuvPlanes[AVIF_CHAN_V][uvI + (uvJ * yuvRowBytes[AVIF_CHAN_V])] =
+                            (uint8_t)yuvToUNorm(AVIF_CHAN_V, image->yuvRange, image->depth, maxChannel, avgV);
                     }
                 }
             }
@@ -248,11 +259,11 @@
             rgbPixel[2] = AVIF_CLAMP(B, 0.0f, 1.0f);
 
             if (state.usesU16) {
-                uint16_t * pR = (uint16_t *)&image->rgbPlanes[AVIF_CHAN_R][(i * 2) + (j * image->rgbRowBytes[AVIF_CHAN_R])];
+                uint16_t * pR = (uint16_t *)&rgbPlanes[AVIF_CHAN_R][(i * 2) + (j * rgbRowBytes[AVIF_CHAN_R])];
                 *pR = (uint16_t)avifRoundf(rgbPixel[0] * maxChannel);
-                uint16_t * pG = (uint16_t *)&image->rgbPlanes[AVIF_CHAN_G][(i * 2) + (j * image->rgbRowBytes[AVIF_CHAN_G])];
+                uint16_t * pG = (uint16_t *)&rgbPlanes[AVIF_CHAN_G][(i * 2) + (j * rgbRowBytes[AVIF_CHAN_G])];
                 *pG = (uint16_t)avifRoundf(rgbPixel[1] * maxChannel);
-                uint16_t * pB = (uint16_t *)&image->rgbPlanes[AVIF_CHAN_B][(i * 2) + (j * image->rgbRowBytes[AVIF_CHAN_B])];
+                uint16_t * pB = (uint16_t *)&rgbPlanes[AVIF_CHAN_B][(i * 2) + (j * rgbRowBytes[AVIF_CHAN_B])];
                 *pB = (uint16_t)avifRoundf(rgbPixel[2] * maxChannel);
             } else {
                 image->rgbPlanes[AVIF_CHAN_R][i + (j * image->rgbRowBytes[AVIF_CHAN_R])] = (uint8_t)avifRoundf(rgbPixel[0] * maxChannel);
diff --git a/src/utils.c b/src/utils.c
index 7529868..f05205f 100644
--- a/src/utils.c
+++ b/src/utils.c
@@ -72,5 +72,6 @@
     uint8_t data[8];
     memcpy(&data, &l, sizeof(data));
 
-    return ((uint64_t)data[7] << 0) | ((uint64_t)data[6] << 8) | ((uint64_t)data[5] << 16) | ((uint64_t)data[4] << 24) | ((uint64_t)data[3] << 32) | ((uint64_t)data[2] << 40) | ((uint64_t)data[1] << 48) | ((uint64_t)data[0] << 56);
+    return ((uint64_t)data[7] << 0) | ((uint64_t)data[6] << 8) | ((uint64_t)data[5] << 16) | ((uint64_t)data[4] << 24) |
+           ((uint64_t)data[3] << 32) | ((uint64_t)data[2] << 40) | ((uint64_t)data[1] << 48) | ((uint64_t)data[0] << 56);
 }
diff --git a/src/write.c b/src/write.c
index a5fa05e..e24765b 100644
--- a/src/write.c
+++ b/src/write.c
@@ -66,7 +66,8 @@
         goto writeCleanup;
     }
 
-    if ((image->yuvFormat == AVIF_PIXEL_FORMAT_NONE) || !image->yuvPlanes[AVIF_CHAN_Y] || !image->yuvPlanes[AVIF_CHAN_U] || !image->yuvPlanes[AVIF_CHAN_V]) {
+    if ((image->yuvFormat == AVIF_PIXEL_FORMAT_NONE) || !image->yuvPlanes[AVIF_CHAN_Y] || !image->yuvPlanes[AVIF_CHAN_U] ||
+        !image->yuvPlanes[AVIF_CHAN_V]) {
         if (!image->rgbPlanes[AVIF_CHAN_R] || !image->rgbPlanes[AVIF_CHAN_G] || !image->rgbPlanes[AVIF_CHAN_B]) {
             result = AVIF_RESULT_NO_CONTENT;
             goto writeCleanup;
@@ -99,16 +100,16 @@
     // Write ftyp
 
     avifBoxMarker ftyp = avifStreamWriteBox(&s, "ftyp", -1, 0);
-    avifStreamWriteChars(&s, "avif", 4); // unsigned int(32) major_brand;
-    avifStreamWriteU32(&s, 0);           // unsigned int(32) minor_version;
-    avifStreamWriteChars(&s, "avif", 4); // unsigned int(32) compatible_brands[];
-    avifStreamWriteChars(&s, "mif1", 4); // ... compatible_brands[]
-    avifStreamWriteChars(&s, "miaf", 4); // ... compatible_brands[]
-    if ((image->depth == 8) || (image->depth == 10)) {
-        if (image->yuvFormat == AVIF_PIXEL_FORMAT_YUV420) {
-            avifStreamWriteChars(&s, "MA1B", 4); // ... compatible_brands[]
-        } else if (image->yuvFormat == AVIF_PIXEL_FORMAT_YUV444) {
-            avifStreamWriteChars(&s, "MA1A", 4); // ... compatible_brands[]
+    avifStreamWriteChars(&s, "avif", 4);                           // unsigned int(32) major_brand;
+    avifStreamWriteU32(&s, 0);                                     // unsigned int(32) minor_version;
+    avifStreamWriteChars(&s, "avif", 4);                           // unsigned int(32) compatible_brands[];
+    avifStreamWriteChars(&s, "mif1", 4);                           // ... compatible_brands[]
+    avifStreamWriteChars(&s, "miaf", 4);                           // ... compatible_brands[]
+    if ((image->depth == 8) || (image->depth == 10)) {             //
+        if (image->yuvFormat == AVIF_PIXEL_FORMAT_YUV420) {        //
+            avifStreamWriteChars(&s, "MA1B", 4);                   // ... compatible_brands[]
+        } else if (image->yuvFormat == AVIF_PIXEL_FORMAT_YUV444) { //
+            avifStreamWriteChars(&s, "MA1A", 4);                   // ... compatible_brands[]
         }
     }
     avifStreamFinishBox(&s, ftyp);
@@ -144,9 +145,11 @@
     avifBoxMarker iloc = avifStreamWriteBox(&s, "iloc", 0, 0);
 
     // iloc header
-    uint8_t offsetSizeAndLengthSize = (4 << 4) + (4 << 0); // unsigned int(4) offset_size; unsigned int(4) length_size;
+    uint8_t offsetSizeAndLengthSize = (4 << 4) + (4 << 0); // unsigned int(4) offset_size;
+                                                           // unsigned int(4) length_size;
     avifStreamWrite(&s, &offsetSizeAndLengthSize, 1);      //
-    avifStreamWriteZeros(&s, 1);                           // unsigned int(4) base_offset_size; unsigned int(4) reserved;
+    avifStreamWriteZeros(&s, 1);                           // unsigned int(4) base_offset_size;
+                                                           // unsigned int(4) reserved;
     avifStreamWriteU16(&s, hasAlpha ? 2 : 1);              // unsigned int(16) item_count;
 
     // Item ID #1 (Color OBU)
@@ -230,7 +233,8 @@
                 avifStreamWriteU16(&s, image->nclx.colourPrimaries);         // unsigned int(16) colour_primaries;
                 avifStreamWriteU16(&s, image->nclx.transferCharacteristics); // unsigned int(16) transfer_characteristics;
                 avifStreamWriteU16(&s, image->nclx.matrixCoefficients);      // unsigned int(16) matrix_coefficients;
-                avifStreamWriteU8(&s, image->nclx.fullRangeFlag & 0x80);     // unsigned int(1) full_range_flag; unsigned int(7) reserved = 0;
+                avifStreamWriteU8(&s, image->nclx.fullRangeFlag & 0x80);     // unsigned int(1) full_range_flag;
+                                                                             // unsigned int(7) reserved = 0;
                 avifStreamFinishBox(&s, colr);
                 ++ipcoIndex;
                 ipmaPush(&ipmaColor, ipcoIndex);
@@ -286,16 +290,16 @@
             int ipmaCount = hasAlpha ? 2 : 1;
             avifStreamWriteU32(&s, ipmaCount); // unsigned int(32) entry_count;
 
-            avifStreamWriteU16(&s, 1);              // unsigned int(16) item_ID;
-            avifStreamWriteU8(&s, ipmaColor.count); // unsigned int(8) association_count;
-            for (int i = 0; i < ipmaColor.count; ++i) {
+            avifStreamWriteU16(&s, 1);                            // unsigned int(16) item_ID;
+            avifStreamWriteU8(&s, ipmaColor.count);               // unsigned int(8) association_count;
+            for (int i = 0; i < ipmaColor.count; ++i) {           //
                 avifStreamWriteU8(&s, ipmaColor.associations[i]); // bit(1) essential; unsigned int(7) property_index;
             }
 
             if (hasAlpha) {
-                avifStreamWriteU16(&s, 2);              // unsigned int(16) item_ID;
-                avifStreamWriteU8(&s, ipmaAlpha.count); // unsigned int(8) association_count;
-                for (int i = 0; i < ipmaAlpha.count; ++i) {
+                avifStreamWriteU16(&s, 2);                            // unsigned int(16) item_ID;
+                avifStreamWriteU8(&s, ipmaAlpha.count);               // unsigned int(8) association_count;
+                for (int i = 0; i < ipmaAlpha.count; ++i) {           //
                     avifStreamWriteU8(&s, ipmaAlpha.associations[i]); // bit(1) essential; unsigned int(7) property_index;
                 }
             }