Rename variables appropriately in var_based_part.c

- For better readability some of the variables are renamed.
- Defined and used Macros for computing block indices

Change-Id: I34d82d08b8d2cf8727d530e6bbed3b155d09f8a5
diff --git a/av1/encoder/var_based_part.c b/av1/encoder/var_based_part.c
index 9e1d30f..75f619f 100644
--- a/av1/encoder/var_based_part.c
+++ b/av1/encoder/var_based_part.c
@@ -50,49 +50,49 @@
 
 static AOM_INLINE void tree_to_node(void *data, BLOCK_SIZE bsize,
                                     variance_node *node) {
-  int i;
   node->part_variances = NULL;
   switch (bsize) {
     case BLOCK_128X128: {
       VP128x128 *vt = (VP128x128 *)data;
       node->part_variances = &vt->part_variances;
-      for (i = 0; i < 4; i++)
-        node->split[i] = &vt->split[i].part_variances.none;
+      for (int split_idx = 0; split_idx < 4; split_idx++)
+        node->split[split_idx] = &vt->split[split_idx].part_variances.none;
       break;
     }
     case BLOCK_64X64: {
       VP64x64 *vt = (VP64x64 *)data;
       node->part_variances = &vt->part_variances;
-      for (i = 0; i < 4; i++)
-        node->split[i] = &vt->split[i].part_variances.none;
+      for (int split_idx = 0; split_idx < 4; split_idx++)
+        node->split[split_idx] = &vt->split[split_idx].part_variances.none;
       break;
     }
     case BLOCK_32X32: {
       VP32x32 *vt = (VP32x32 *)data;
       node->part_variances = &vt->part_variances;
-      for (i = 0; i < 4; i++)
-        node->split[i] = &vt->split[i].part_variances.none;
+      for (int split_idx = 0; split_idx < 4; split_idx++)
+        node->split[split_idx] = &vt->split[split_idx].part_variances.none;
       break;
     }
     case BLOCK_16X16: {
       VP16x16 *vt = (VP16x16 *)data;
       node->part_variances = &vt->part_variances;
-      for (i = 0; i < 4; i++)
-        node->split[i] = &vt->split[i].part_variances.none;
+      for (int split_idx = 0; split_idx < 4; split_idx++)
+        node->split[split_idx] = &vt->split[split_idx].part_variances.none;
       break;
     }
     case BLOCK_8X8: {
       VP8x8 *vt = (VP8x8 *)data;
       node->part_variances = &vt->part_variances;
-      for (i = 0; i < 4; i++)
-        node->split[i] = &vt->split[i].part_variances.none;
+      for (int split_idx = 0; split_idx < 4; split_idx++)
+        node->split[split_idx] = &vt->split[split_idx].part_variances.none;
       break;
     }
     default: {
       VP4x4 *vt = (VP4x4 *)data;
       assert(bsize == BLOCK_4X4);
       node->part_variances = &vt->part_variances;
-      for (i = 0; i < 4; i++) node->split[i] = &vt->split[i];
+      for (int split_idx = 0; split_idx < 4; split_idx++)
+        node->split[split_idx] = &vt->split[split_idx];
       break;
     }
   }
@@ -217,12 +217,14 @@
     if (mi_row + bs_height_check <= tile->mi_row_end &&
         mi_col + bs_width_vert_check <= tile->mi_col_end) {
       BLOCK_SIZE subsize = get_partition_subsize(bsize, PARTITION_VERT);
+      BLOCK_SIZE plane_bsize =
+          get_plane_block_size(subsize, xd->plane[AOM_PLANE_U].subsampling_x,
+                               xd->plane[AOM_PLANE_U].subsampling_y);
       get_variance(&vt.part_variances->vert[0]);
       get_variance(&vt.part_variances->vert[1]);
       if (vt.part_variances->vert[0].variance < threshold &&
           vt.part_variances->vert[1].variance < threshold &&
-          get_plane_block_size(subsize, xd->plane[1].subsampling_x,
-                               xd->plane[1].subsampling_y) < BLOCK_INVALID) {
+          plane_bsize < BLOCK_INVALID) {
         set_block_size(cpi, mi_row, mi_col, subsize);
         set_block_size(cpi, mi_row, mi_col + block_width / 2, subsize);
         return 1;
@@ -232,12 +234,14 @@
     if (mi_col + bs_width_check <= tile->mi_col_end &&
         mi_row + bs_height_horiz_check <= tile->mi_row_end) {
       BLOCK_SIZE subsize = get_partition_subsize(bsize, PARTITION_HORZ);
+      BLOCK_SIZE plane_bsize =
+          get_plane_block_size(subsize, xd->plane[AOM_PLANE_U].subsampling_x,
+                               xd->plane[AOM_PLANE_U].subsampling_y);
       get_variance(&vt.part_variances->horz[0]);
       get_variance(&vt.part_variances->horz[1]);
       if (vt.part_variances->horz[0].variance < threshold &&
           vt.part_variances->horz[1].variance < threshold &&
-          get_plane_block_size(subsize, xd->plane[1].subsampling_x,
-                               xd->plane[1].subsampling_y) < BLOCK_INVALID) {
+          plane_bsize < BLOCK_INVALID) {
         set_block_size(cpi, mi_row, mi_col, subsize);
         set_block_size(cpi, mi_row + block_height / 2, mi_col, subsize);
         return 1;
@@ -251,9 +255,9 @@
 static AOM_INLINE int all_blks_inside(int x16_idx, int y16_idx, int pixels_wide,
                                       int pixels_high) {
   int all_inside = 1;
-  for (int k = 0; k < 4; k++) {
-    all_inside &= ((x16_idx + ((k & 1) << 3)) < pixels_wide);
-    all_inside &= ((y16_idx + ((k >> 1) << 3)) < pixels_high);
+  for (int idx = 0; idx < 4; idx++) {
+    all_inside &= ((x16_idx + GET_BLK_IDX_X(idx, 3)) < pixels_wide);
+    all_inside &= ((y16_idx + GET_BLK_IDX_Y(idx, 3)) < pixels_high);
   }
   return all_inside;
 }
@@ -261,59 +265,63 @@
 #if CONFIG_AV1_HIGHBITDEPTH
 // TODO(yunqingwang): Perform average of four 8x8 blocks similar to lowbd
 static AOM_INLINE void fill_variance_8x8avg_highbd(
-    const uint8_t *s, int sp, const uint8_t *d, int dp, int x16_idx,
-    int y16_idx, VP16x16 *vst, int pixels_wide, int pixels_high) {
-  for (int k = 0; k < 4; k++) {
-    const int x8_idx = x16_idx + ((k & 1) << 3);
-    const int y8_idx = y16_idx + ((k >> 1) << 3);
+    const uint8_t *src_buf, int src_stride, const uint8_t *dst_buf,
+    int dst_stride, int x16_idx, int y16_idx, VP16x16 *vst, int pixels_wide,
+    int pixels_high) {
+  for (int idx = 0; idx < 4; idx++) {
+    const int x8_idx = x16_idx + GET_BLK_IDX_X(idx, 3);
+    const int y8_idx = y16_idx + GET_BLK_IDX_Y(idx, 3);
     unsigned int sse = 0;
     int sum = 0;
     if (x8_idx < pixels_wide && y8_idx < pixels_high) {
-      int s_avg;
-      int d_avg = 128;
-      s_avg = aom_highbd_avg_8x8(s + y8_idx * sp + x8_idx, sp);
-      d_avg = aom_highbd_avg_8x8(d + y8_idx * dp + x8_idx, dp);
+      int src_avg;
+      int dst_avg = 128;
+      src_avg = aom_highbd_avg_8x8(src_buf + y8_idx * src_stride + x8_idx,
+                                   src_stride);
+      dst_avg = aom_highbd_avg_8x8(dst_buf + y8_idx * dst_stride + x8_idx,
+                                   dst_stride);
 
-      sum = s_avg - d_avg;
+      sum = src_avg - dst_avg;
       sse = sum * sum;
     }
-    fill_variance(sse, sum, 0, &vst->split[k].part_variances.none);
+    fill_variance(sse, sum, 0, &vst->split[idx].part_variances.none);
   }
 }
 #endif
 
-static AOM_INLINE void fill_variance_8x8avg_lowbd(const uint8_t *s, int sp,
-                                                  const uint8_t *d, int dp,
-                                                  int x16_idx, int y16_idx,
-                                                  VP16x16 *vst, int pixels_wide,
-                                                  int pixels_high) {
+static AOM_INLINE void fill_variance_8x8avg_lowbd(
+    const uint8_t *src_buf, int src_stride, const uint8_t *dst_buf,
+    int dst_stride, int x16_idx, int y16_idx, VP16x16 *vst, int pixels_wide,
+    int pixels_high) {
   unsigned int sse[4] = { 0 };
   int sum[4] = { 0 };
-  int d_avg[4] = { 128, 128, 128, 128 };
-  int s_avg[4];
+  int dst_avg[4] = { 128, 128, 128, 128 };
+  int src_avg[4];
 
   if (all_blks_inside(x16_idx, y16_idx, pixels_wide, pixels_high)) {
-    aom_avg_8x8_quad(s, sp, x16_idx, y16_idx, s_avg);
-    aom_avg_8x8_quad(d, dp, x16_idx, y16_idx, d_avg);
-    for (int k = 0; k < 4; k++) {
-      sum[k] = s_avg[k] - d_avg[k];
-      sse[k] = sum[k] * sum[k];
+    aom_avg_8x8_quad(src_buf, src_stride, x16_idx, y16_idx, src_avg);
+    aom_avg_8x8_quad(dst_buf, dst_stride, x16_idx, y16_idx, dst_avg);
+    for (int idx = 0; idx < 4; idx++) {
+      sum[idx] = src_avg[idx] - dst_avg[idx];
+      sse[idx] = sum[idx] * sum[idx];
     }
   } else {
-    for (int k = 0; k < 4; k++) {
-      const int x8_idx = x16_idx + ((k & 1) << 3);
-      const int y8_idx = y16_idx + ((k >> 1) << 3);
+    for (int idx = 0; idx < 4; idx++) {
+      const int x8_idx = x16_idx + GET_BLK_IDX_X(idx, 3);
+      const int y8_idx = y16_idx + GET_BLK_IDX_Y(idx, 3);
       if (x8_idx < pixels_wide && y8_idx < pixels_high) {
-        s_avg[k] = aom_avg_8x8(s + y8_idx * sp + x8_idx, sp);
-        d_avg[k] = aom_avg_8x8(d + y8_idx * dp + x8_idx, dp);
-        sum[k] = s_avg[k] - d_avg[k];
-        sse[k] = sum[k] * sum[k];
+        src_avg[idx] =
+            aom_avg_8x8(src_buf + y8_idx * src_stride + x8_idx, src_stride);
+        dst_avg[idx] =
+            aom_avg_8x8(dst_buf + y8_idx * dst_stride + x8_idx, dst_stride);
+        sum[idx] = src_avg[idx] - dst_avg[idx];
+        sse[idx] = sum[idx] * sum[idx];
       }
     }
   }
 
-  for (int k = 0; k < 4; k++) {
-    fill_variance(sse[k], sum[k], 0, &vst->split[k].part_variances.none);
+  for (int idx = 0; idx < 4; idx++) {
+    fill_variance(sse[idx], sum[idx], 0, &vst->split[idx].part_variances.none);
   }
 }
 
@@ -321,51 +329,54 @@
 // at 8x8 sub-block level for a given 16x16 block.
 // The function can be called only when is_key_frame is false since sum is
 // computed between source and reference frames.
-static AOM_INLINE void fill_variance_8x8avg(const uint8_t *s, int sp,
-                                            const uint8_t *d, int dp,
-                                            int x16_idx, int y16_idx,
-                                            VP16x16 *vst, int highbd_flag,
-                                            int pixels_wide, int pixels_high) {
+static AOM_INLINE void fill_variance_8x8avg(
+    const uint8_t *src_buf, int src_stride, const uint8_t *dst_buf,
+    int dst_stride, int x16_idx, int y16_idx, VP16x16 *vst, int highbd_flag,
+    int pixels_wide, int pixels_high) {
 #if CONFIG_AV1_HIGHBITDEPTH
   if (highbd_flag) {
-    fill_variance_8x8avg_highbd(s, sp, d, dp, x16_idx, y16_idx, vst,
-                                pixels_wide, pixels_high);
+    fill_variance_8x8avg_highbd(src_buf, src_stride, dst_buf, dst_stride,
+                                x16_idx, y16_idx, vst, pixels_wide,
+                                pixels_high);
     return;
   }
 #else
   (void)highbd_flag;
 #endif  // CONFIG_AV1_HIGHBITDEPTH
-  fill_variance_8x8avg_lowbd(s, sp, d, dp, x16_idx, y16_idx, vst, pixels_wide,
-                             pixels_high);
+  fill_variance_8x8avg_lowbd(src_buf, src_stride, dst_buf, dst_stride, x16_idx,
+                             y16_idx, vst, pixels_wide, pixels_high);
 }
 
-static int compute_minmax_8x8(const uint8_t *s, int sp, const uint8_t *d,
-                              int dp, int x16_idx, int y16_idx,
+static int compute_minmax_8x8(const uint8_t *src_buf, int src_stride,
+                              const uint8_t *dst_buf, int dst_stride,
+                              int x16_idx, int y16_idx,
 #if CONFIG_AV1_HIGHBITDEPTH
                               int highbd_flag,
 #endif
                               int pixels_wide, int pixels_high) {
-  int k;
   int minmax_max = 0;
   int minmax_min = 255;
   // Loop over the 4 8x8 subblocks.
-  for (k = 0; k < 4; k++) {
-    int x8_idx = x16_idx + ((k & 1) << 3);
-    int y8_idx = y16_idx + ((k >> 1) << 3);
+  for (int idx = 0; idx < 4; idx++) {
+    int x8_idx = x16_idx + GET_BLK_IDX_X(idx, 3);
+    int y8_idx = y16_idx + GET_BLK_IDX_Y(idx, 3);
     int min = 0;
     int max = 0;
     if (x8_idx < pixels_wide && y8_idx < pixels_high) {
 #if CONFIG_AV1_HIGHBITDEPTH
       if (highbd_flag & YV12_FLAG_HIGHBITDEPTH) {
-        aom_highbd_minmax_8x8(s + y8_idx * sp + x8_idx, sp,
-                              d + y8_idx * dp + x8_idx, dp, &min, &max);
+        aom_highbd_minmax_8x8(
+            src_buf + y8_idx * src_stride + x8_idx, src_stride,
+            dst_buf + y8_idx * dst_stride + x8_idx, dst_stride, &min, &max);
       } else {
-        aom_minmax_8x8(s + y8_idx * sp + x8_idx, sp, d + y8_idx * dp + x8_idx,
-                       dp, &min, &max);
+        aom_minmax_8x8(src_buf + y8_idx * src_stride + x8_idx, src_stride,
+                       dst_buf + y8_idx * dst_stride + x8_idx, dst_stride, &min,
+                       &max);
       }
 #else
-      aom_minmax_8x8(s + y8_idx * sp + x8_idx, sp, d + y8_idx * dp + x8_idx, dp,
-                     &min, &max);
+      aom_minmax_8x8(src_buf + y8_idx * src_stride + x8_idx, src_stride,
+                     dst_buf + y8_idx * dst_stride + x8_idx, dst_stride, &min,
+                     &max);
 #endif
       if ((max - min) > minmax_max) minmax_max = (max - min);
       if ((max - min) < minmax_min) minmax_min = (max - min);
@@ -377,37 +388,39 @@
 // Function to compute average and variance of 4x4 sub-block.
 // The function can be called only when is_key_frame is true since sum is
 // computed using source frame only.
-static AOM_INLINE void fill_variance_4x4avg(const uint8_t *s, int sp,
-                                            int x8_idx, int y8_idx, VP8x8 *vst,
+static AOM_INLINE void fill_variance_4x4avg(const uint8_t *src_buf,
+                                            int src_stride, int x8_idx,
+                                            int y8_idx, VP8x8 *vst,
 #if CONFIG_AV1_HIGHBITDEPTH
                                             int highbd_flag,
 #endif
                                             int pixels_wide, int pixels_high,
                                             int border_offset_4x4) {
-  int k;
-  for (k = 0; k < 4; k++) {
-    int x4_idx = x8_idx + ((k & 1) << 2);
-    int y4_idx = y8_idx + ((k >> 1) << 2);
+  for (int idx = 0; idx < 4; idx++) {
+    int x4_idx = x8_idx + GET_BLK_IDX_X(idx, 2);
+    int y4_idx = y8_idx + GET_BLK_IDX_Y(idx, 2);
     unsigned int sse = 0;
     int sum = 0;
     if (x4_idx < pixels_wide - border_offset_4x4 &&
         y4_idx < pixels_high - border_offset_4x4) {
-      int s_avg;
-      int d_avg = 128;
+      int src_avg;
+      int dst_avg = 128;
 #if CONFIG_AV1_HIGHBITDEPTH
       if (highbd_flag & YV12_FLAG_HIGHBITDEPTH) {
-        s_avg = aom_highbd_avg_4x4(s + y4_idx * sp + x4_idx, sp);
+        src_avg = aom_highbd_avg_4x4(src_buf + y4_idx * src_stride + x4_idx,
+                                     src_stride);
       } else {
-        s_avg = aom_avg_4x4(s + y4_idx * sp + x4_idx, sp);
+        src_avg =
+            aom_avg_4x4(src_buf + y4_idx * src_stride + x4_idx, src_stride);
       }
 #else
-      s_avg = aom_avg_4x4(s + y4_idx * sp + x4_idx, sp);
+      src_avg = aom_avg_4x4(src_buf + y4_idx * src_stride + x4_idx, src_stride);
 #endif
 
-      sum = s_avg - d_avg;
+      sum = src_avg - dst_avg;
       sse = sum * sum;
     }
-    fill_variance(sse, sum, 0, &vst->split[k].part_variances.none);
+    fill_variance(sse, sum, 0, &vst->split[idx].part_variances.none);
   }
 }
 
@@ -444,7 +457,7 @@
 }
 
 static AOM_INLINE void set_vbp_thresholds(AV1_COMP *cpi, int64_t thresholds[],
-                                          int q, int content_lowsumdiff,
+                                          int qindex, int content_lowsumdiff,
                                           int source_sad_nonrd,
                                           int source_sad_rd, int segment_id,
                                           uint64_t blk_sad,
@@ -452,7 +465,7 @@
   AV1_COMMON *const cm = &cpi->common;
   const int is_key_frame = frame_is_intra_only(cm);
   const int threshold_multiplier = is_key_frame ? 120 : 1;
-  const int ac_q = av1_ac_quant_QTX(q, 0, cm->seq_params->bit_depth);
+  const int ac_q = av1_ac_quant_QTX(qindex, 0, cm->seq_params->bit_depth);
   int64_t threshold_base = (int64_t)(threshold_multiplier * ac_q);
   const int current_qindex = cm->quant_params.base_qindex;
   const int threshold_left_shift = cpi->sf.rt_sf.var_part_split_threshold_shift;
@@ -589,8 +602,8 @@
       weight =
           1.0 - (current_qindex - QINDEX_LARGE_BLOCK_THR + win) / (2 * win);
     if (num_pixels > RESOLUTION_480P) {
-      for (int i = 0; i < 4; i++) {
-        thresholds[i] <<= 1;
+      for (int idx = 0; idx < 4; idx++) {
+        thresholds[idx] <<= 1;
       }
     }
     if (num_pixels <= RESOLUTION_288P) {
@@ -659,42 +672,43 @@
     if ((vt->part_variances).none.variance < (thresholds[0] >> 1))
       part_info->variance_low[0] = 1;
   } else if (xd->mi[0]->bsize == BLOCK_64X32) {
-    for (int i = 0; i < 2; i++) {
-      if (vt->part_variances.horz[i].variance < (thresholds[0] >> 2))
-        part_info->variance_low[i + 1] = 1;
+    for (int part_idx = 0; part_idx < 2; part_idx++) {
+      if (vt->part_variances.horz[part_idx].variance < (thresholds[0] >> 2))
+        part_info->variance_low[part_idx + 1] = 1;
     }
   } else if (xd->mi[0]->bsize == BLOCK_32X64) {
-    for (int i = 0; i < 2; i++) {
-      if (vt->part_variances.vert[i].variance < (thresholds[0] >> 2))
-        part_info->variance_low[i + 3] = 1;
+    for (int part_idx = 0; part_idx < 2; part_idx++) {
+      if (vt->part_variances.vert[part_idx].variance < (thresholds[0] >> 2))
+        part_info->variance_low[part_idx + 3] = 1;
     }
   } else {
     static const int idx[4][2] = { { 0, 0 }, { 0, 8 }, { 8, 0 }, { 8, 8 } };
-    for (int i = 0; i < 4; i++) {
-      const int idx_str =
-          mi_params->mi_stride * (mi_row + idx[i][0]) + mi_col + idx[i][1];
+    for (int lvl1_idx = 0; lvl1_idx < 4; lvl1_idx++) {
+      const int idx_str = mi_params->mi_stride * (mi_row + idx[lvl1_idx][0]) +
+                          mi_col + idx[lvl1_idx][1];
       MB_MODE_INFO **this_mi = mi_params->mi_grid_base + idx_str;
 
-      if (mi_params->mi_cols <= mi_col + idx[i][1] ||
-          mi_params->mi_rows <= mi_row + idx[i][0])
+      if (mi_params->mi_cols <= mi_col + idx[lvl1_idx][1] ||
+          mi_params->mi_rows <= mi_row + idx[lvl1_idx][0])
         continue;
 
       if (*this_mi == NULL) continue;
 
       if ((*this_mi)->bsize == BLOCK_32X32) {
         int64_t threshold_32x32 = (5 * thresholds[1]) >> 3;
-        if (vt->split[i].part_variances.none.variance < threshold_32x32)
-          part_info->variance_low[i + 5] = 1;
+        if (vt->split[lvl1_idx].part_variances.none.variance < threshold_32x32)
+          part_info->variance_low[lvl1_idx + 5] = 1;
       } else {
         // For 32x16 and 16x32 blocks, the flag is set on each 16x16 block
         // inside.
         if ((*this_mi)->bsize == BLOCK_16X16 ||
             (*this_mi)->bsize == BLOCK_32X16 ||
             (*this_mi)->bsize == BLOCK_16X32) {
-          for (int j = 0; j < 4; j++) {
-            if (vt->split[i].split[j].part_variances.none.variance <
-                (thresholds[2] >> 8))
-              part_info->variance_low[(i << 2) + j + 9] = 1;
+          for (int lvl2_idx = 0; lvl2_idx < 4; lvl2_idx++) {
+            if (vt->split[lvl1_idx]
+                    .split[lvl2_idx]
+                    .part_variances.none.variance < (thresholds[2] >> 8))
+              part_info->variance_low[(lvl1_idx << 2) + lvl2_idx + 9] = 1;
           }
         }
       }
@@ -710,68 +724,74 @@
     if (vt->part_variances.none.variance < (thresholds[0] >> 1))
       part_info->variance_low[0] = 1;
   } else if (xd->mi[0]->bsize == BLOCK_128X64) {
-    for (int i = 0; i < 2; i++) {
-      if (vt->part_variances.horz[i].variance < (thresholds[0] >> 2))
-        part_info->variance_low[i + 1] = 1;
+    for (int part_idx = 0; part_idx < 2; part_idx++) {
+      if (vt->part_variances.horz[part_idx].variance < (thresholds[0] >> 2))
+        part_info->variance_low[part_idx + 1] = 1;
     }
   } else if (xd->mi[0]->bsize == BLOCK_64X128) {
-    for (int i = 0; i < 2; i++) {
-      if (vt->part_variances.vert[i].variance < (thresholds[0] >> 2))
-        part_info->variance_low[i + 3] = 1;
+    for (int part_idx = 0; part_idx < 2; part_idx++) {
+      if (vt->part_variances.vert[part_idx].variance < (thresholds[0] >> 2))
+        part_info->variance_low[part_idx + 3] = 1;
     }
   } else {
     static const int idx64[4][2] = {
       { 0, 0 }, { 0, 16 }, { 16, 0 }, { 16, 16 }
     };
     static const int idx32[4][2] = { { 0, 0 }, { 0, 8 }, { 8, 0 }, { 8, 8 } };
-    for (int i = 0; i < 4; i++) {
-      const int idx_str =
-          mi_params->mi_stride * (mi_row + idx64[i][0]) + mi_col + idx64[i][1];
+    for (int lvl1_idx = 0; lvl1_idx < 4; lvl1_idx++) {
+      const int idx_str = mi_params->mi_stride * (mi_row + idx64[lvl1_idx][0]) +
+                          mi_col + idx64[lvl1_idx][1];
       MB_MODE_INFO **mi_64 = mi_params->mi_grid_base + idx_str;
       if (*mi_64 == NULL) continue;
-      if (mi_params->mi_cols <= mi_col + idx64[i][1] ||
-          mi_params->mi_rows <= mi_row + idx64[i][0])
+      if (mi_params->mi_cols <= mi_col + idx64[lvl1_idx][1] ||
+          mi_params->mi_rows <= mi_row + idx64[lvl1_idx][0])
         continue;
       const int64_t threshold_64x64 = (5 * thresholds[1]) >> 3;
       if ((*mi_64)->bsize == BLOCK_64X64) {
-        if (vt->split[i].part_variances.none.variance < threshold_64x64)
-          part_info->variance_low[5 + i] = 1;
+        if (vt->split[lvl1_idx].part_variances.none.variance < threshold_64x64)
+          part_info->variance_low[5 + lvl1_idx] = 1;
       } else if ((*mi_64)->bsize == BLOCK_64X32) {
-        for (int j = 0; j < 2; j++)
-          if (vt->split[i].part_variances.horz[j].variance <
+        for (int part_idx = 0; part_idx < 2; part_idx++)
+          if (vt->split[lvl1_idx].part_variances.horz[part_idx].variance <
               (threshold_64x64 >> 1))
-            part_info->variance_low[9 + (i << 1) + j] = 1;
+            part_info->variance_low[9 + (lvl1_idx << 1) + part_idx] = 1;
       } else if ((*mi_64)->bsize == BLOCK_32X64) {
-        for (int j = 0; j < 2; j++)
-          if (vt->split[i].part_variances.vert[j].variance <
+        for (int part_idx = 0; part_idx < 2; part_idx++)
+          if (vt->split[lvl1_idx].part_variances.vert[part_idx].variance <
               (threshold_64x64 >> 1))
-            part_info->variance_low[17 + (i << 1) + j] = 1;
+            part_info->variance_low[17 + (lvl1_idx << 1) + part_idx] = 1;
       } else {
-        for (int k = 0; k < 4; k++) {
-          const int idx_str1 = mi_params->mi_stride * idx32[k][0] + idx32[k][1];
+        for (int lvl2_idx = 0; lvl2_idx < 4; lvl2_idx++) {
+          const int idx_str1 =
+              mi_params->mi_stride * idx32[lvl2_idx][0] + idx32[lvl2_idx][1];
           MB_MODE_INFO **mi_32 = mi_params->mi_grid_base + idx_str + idx_str1;
           if (*mi_32 == NULL) continue;
 
-          if (mi_params->mi_cols <= mi_col + idx64[i][1] + idx32[k][1] ||
-              mi_params->mi_rows <= mi_row + idx64[i][0] + idx32[k][0])
+          if (mi_params->mi_cols <=
+                  mi_col + idx64[lvl1_idx][1] + idx32[lvl2_idx][1] ||
+              mi_params->mi_rows <=
+                  mi_row + idx64[lvl1_idx][0] + idx32[lvl2_idx][0])
             continue;
           const int64_t threshold_32x32 = (5 * thresholds[2]) >> 3;
           if ((*mi_32)->bsize == BLOCK_32X32) {
-            if (vt->split[i].split[k].part_variances.none.variance <
-                threshold_32x32)
-              part_info->variance_low[25 + (i << 2) + k] = 1;
+            if (vt->split[lvl1_idx]
+                    .split[lvl2_idx]
+                    .part_variances.none.variance < threshold_32x32)
+              part_info->variance_low[25 + (lvl1_idx << 2) + lvl2_idx] = 1;
           } else {
             // For 32x16 and 16x32 blocks, the flag is set on each 16x16 block
             // inside.
             if ((*mi_32)->bsize == BLOCK_16X16 ||
                 (*mi_32)->bsize == BLOCK_32X16 ||
                 (*mi_32)->bsize == BLOCK_16X32) {
-              for (int j = 0; j < 4; j++) {
-                if (vt->split[i]
-                        .split[k]
-                        .split[j]
-                        .part_variances.none.variance < (thresholds[3] >> 8))
-                  part_info->variance_low[41 + (i << 4) + (k << 2) + j] = 1;
+              for (int lvl3_idx = 0; lvl3_idx < 4; lvl3_idx++) {
+                VPartVar *none_var = &vt->split[lvl1_idx]
+                                          .split[lvl2_idx]
+                                          .split[lvl3_idx]
+                                          .part_variances.none;
+                if (none_var->variance < (thresholds[3] >> 8))
+                  part_info->variance_low[41 + (lvl1_idx << 4) +
+                                          (lvl2_idx << 2) + lvl3_idx] = 1;
               }
             }
           }
@@ -926,16 +946,16 @@
   return force_skip_low_temp_var;
 }
 
-void av1_set_variance_partition_thresholds(AV1_COMP *cpi, int q,
+void av1_set_variance_partition_thresholds(AV1_COMP *cpi, int qindex,
                                            int content_lowsumdiff) {
   SPEED_FEATURES *const sf = &cpi->sf;
   if (sf->part_sf.partition_search_type != VAR_BASED_PARTITION) {
     return;
   } else {
-    set_vbp_thresholds(cpi, cpi->vbp_info.thresholds, q, content_lowsumdiff, 0,
-                       0, 0, 0, 0);
+    set_vbp_thresholds(cpi, cpi->vbp_info.thresholds, qindex,
+                       content_lowsumdiff, 0, 0, 0, 0, 0);
     // The threshold below is not changed locally.
-    cpi->vbp_info.threshold_minmax = 15 + (q >> 3);
+    cpi->vbp_info.threshold_minmax = 15 + (qindex >> 3);
   }
 }
 
@@ -943,7 +963,6 @@
                                     BLOCK_SIZE bsize, unsigned int y_sad,
                                     unsigned int y_sad_g, bool is_key_frame,
                                     bool zero_motion, unsigned int *uv_sad) {
-  int i;
   MACROBLOCKD *xd = &x->e_mbd;
   int shift = 3;
   if (is_key_frame || cpi->oxcf.tool_cfg.enable_monochrome) return;
@@ -961,9 +980,9 @@
   struct buf_2d dst;
   unsigned int uv_sad_g = 0;
 
-  for (i = 1; i <= 2; ++i) {
-    struct macroblock_plane *p = &x->plane[i];
-    struct macroblockd_plane *pd = &xd->plane[i];
+  for (int plane = AOM_PLANE_U; plane < MAX_MB_PLANE; ++plane) {
+    struct macroblock_plane *p = &x->plane[plane];
+    struct macroblockd_plane *pd = &xd->plane[plane];
     const BLOCK_SIZE bs =
         get_plane_block_size(bsize, pd->subsampling_x, pd->subsampling_y);
 
@@ -971,53 +990,53 @@
       // For last:
       if (zero_motion) {
         if (mi->ref_frame[0] == LAST_FRAME) {
-          uv_sad[i - 1] = cpi->ppi->fn_ptr[bs].sdf(
+          uv_sad[plane - 1] = cpi->ppi->fn_ptr[bs].sdf(
               p->src.buf, p->src.stride, pd->pre[0].buf, pd->pre[0].stride);
         } else {
-          uint8_t *src = (i == 1) ? yv12->u_buffer : yv12->v_buffer;
+          uint8_t *src = (plane == 1) ? yv12->u_buffer : yv12->v_buffer;
           setup_pred_plane(&dst, xd->mi[0]->bsize, src, yv12->uv_crop_width,
                            yv12->uv_crop_height, yv12->uv_stride, xd->mi_row,
-                           xd->mi_col, sf, xd->plane[i].subsampling_x,
-                           xd->plane[i].subsampling_y);
+                           xd->mi_col, sf, xd->plane[plane].subsampling_x,
+                           xd->plane[plane].subsampling_y);
 
-          uv_sad[i - 1] = cpi->ppi->fn_ptr[bs].sdf(p->src.buf, p->src.stride,
-                                                   dst.buf, dst.stride);
+          uv_sad[plane - 1] = cpi->ppi->fn_ptr[bs].sdf(
+              p->src.buf, p->src.stride, dst.buf, dst.stride);
         }
       } else {
-        uv_sad[i - 1] = cpi->ppi->fn_ptr[bs].sdf(p->src.buf, p->src.stride,
-                                                 pd->dst.buf, pd->dst.stride);
+        uv_sad[plane - 1] = cpi->ppi->fn_ptr[bs].sdf(
+            p->src.buf, p->src.stride, pd->dst.buf, pd->dst.stride);
       }
 
       // For golden:
       if (y_sad_g != UINT_MAX) {
-        uint8_t *src = (i == 1) ? yv12_g->u_buffer : yv12_g->v_buffer;
+        uint8_t *src = (plane == 1) ? yv12_g->u_buffer : yv12_g->v_buffer;
         setup_pred_plane(&dst, xd->mi[0]->bsize, src, yv12_g->uv_crop_width,
                          yv12_g->uv_crop_height, yv12_g->uv_stride, xd->mi_row,
-                         xd->mi_col, sf, xd->plane[i].subsampling_x,
-                         xd->plane[i].subsampling_y);
+                         xd->mi_col, sf, xd->plane[plane].subsampling_x,
+                         xd->plane[plane].subsampling_y);
         uv_sad_g = cpi->ppi->fn_ptr[bs].sdf(p->src.buf, p->src.stride, dst.buf,
                                             dst.stride);
       }
     }
 
-    if (uv_sad[i - 1] > (y_sad >> 1))
-      x->color_sensitivity_sb[i - 1] = 1;
-    else if (uv_sad[i - 1] < (y_sad >> shift))
-      x->color_sensitivity_sb[i - 1] = 0;
+    if (uv_sad[plane - 1] > (y_sad >> 1))
+      x->color_sensitivity_sb[COLOR_SENS_IDX(plane)] = 1;
+    else if (uv_sad[plane - 1] < (y_sad >> shift))
+      x->color_sensitivity_sb[COLOR_SENS_IDX(plane)] = 0;
     // Borderline case: to be refined at coding block level in nonrd_pickmode,
     // for coding block size < sb_size.
     else
-      x->color_sensitivity_sb[i - 1] = 2;
+      x->color_sensitivity_sb[COLOR_SENS_IDX(plane)] = 2;
 
-    x->color_sensitivity_sb_g[i - 1] = uv_sad_g > y_sad_g / 6;
+    x->color_sensitivity_sb_g[COLOR_SENS_IDX(plane)] = uv_sad_g > y_sad_g / 6;
   }
 }
 
 static void fill_variance_tree_leaves(
     AV1_COMP *cpi, MACROBLOCK *x, VP128x128 *vt, PART_EVAL_STATUS *force_split,
     int avg_16x16[][4], int maxvar_16x16[][4], int minvar_16x16[][4],
-    int *variance4x4downsample, int64_t *thresholds, const uint8_t *src,
-    int src_stride, const uint8_t *dst, int dst_stride, bool is_key_frame,
+    int *variance4x4downsample, int64_t *thresholds, const uint8_t *src_buf,
+    int src_stride, const uint8_t *dst_buf, int dst_stride, bool is_key_frame,
     const bool is_small_sb) {
   MACROBLOCKD *xd = &x->e_mbd;
   const int num_64x64_blocks = is_small_sb ? 1 : 4;
@@ -1043,30 +1062,30 @@
   // Temporal filtering is never done on key frames.
   if (!is_key_frame && temporal_denoising) border_offset_4x4 = 4;
   for (int blk64_idx = 0; blk64_idx < num_64x64_blocks; blk64_idx++) {
-    const int x64_idx = ((blk64_idx & 1) << 6);
-    const int y64_idx = ((blk64_idx >> 1) << 6);
+    const int x64_idx = GET_BLK_IDX_X(blk64_idx, 6);
+    const int y64_idx = GET_BLK_IDX_Y(blk64_idx, 6);
     const int blk64_scale_idx = blk64_idx << 2;
     force_split[blk64_idx + 1] = PART_EVAL_ALL;
 
     for (int lvl1_idx = 0; lvl1_idx < 4; lvl1_idx++) {
-      const int x32_idx = x64_idx + ((lvl1_idx & 1) << 5);
-      const int y32_idx = y64_idx + ((lvl1_idx >> 1) << 5);
+      const int x32_idx = x64_idx + GET_BLK_IDX_X(lvl1_idx, 5);
+      const int y32_idx = y64_idx + GET_BLK_IDX_Y(lvl1_idx, 5);
       const int lvl1_scale_idx = (blk64_scale_idx + lvl1_idx) << 2;
       force_split[5 + blk64_scale_idx + lvl1_idx] = PART_EVAL_ALL;
       avg_16x16[blk64_idx][lvl1_idx] = 0;
       maxvar_16x16[blk64_idx][lvl1_idx] = 0;
       minvar_16x16[blk64_idx][lvl1_idx] = INT_MAX;
       for (int lvl2_idx = 0; lvl2_idx < 4; lvl2_idx++) {
-        const int x16_idx = x32_idx + ((lvl2_idx & 1) << 4);
-        const int y16_idx = y32_idx + ((lvl2_idx >> 1) << 4);
+        const int x16_idx = x32_idx + GET_BLK_IDX_X(lvl2_idx, 4);
+        const int y16_idx = y32_idx + GET_BLK_IDX_Y(lvl2_idx, 4);
         const int split_index = 21 + lvl1_scale_idx + lvl2_idx;
         VP16x16 *vst = &vt->split[blk64_idx].split[lvl1_idx].split[lvl2_idx];
         force_split[split_index] = PART_EVAL_ALL;
         variance4x4downsample[lvl1_scale_idx + lvl2_idx] = 0;
         if (!is_key_frame) {
-          fill_variance_8x8avg(src, src_stride, dst, dst_stride, x16_idx,
-                               y16_idx, vst, is_cur_buf_hbd(xd), pixels_wide,
-                               pixels_high);
+          fill_variance_8x8avg(src_buf, src_stride, dst_buf, dst_stride,
+                               x16_idx, y16_idx, vst, is_cur_buf_hbd(xd),
+                               pixels_wide, pixels_high);
 
           fill_variance_tree(vst, BLOCK_16X16);
           VPartVar *none_var = &vt->split[blk64_idx]
@@ -1093,8 +1112,8 @@
             // We have some nominal amount of 16x16 variance (based on average),
             // compute the minmax over the 8x8 sub-blocks, and if above
             // threshold, force split to 8x8 block for this 16x16 block.
-            int minmax = compute_minmax_8x8(src, src_stride, dst, dst_stride,
-                                            x16_idx, y16_idx,
+            int minmax = compute_minmax_8x8(src_buf, src_stride, dst_buf,
+                                            dst_stride, x16_idx, y16_idx,
 #if CONFIG_AV1_HIGHBITDEPTH
                                             xd->cur_buf->flags,
 #endif
@@ -1113,10 +1132,10 @@
           // Go down to 4x4 down-sampling for variance.
           variance4x4downsample[lvl1_scale_idx + lvl2_idx] = 1;
           for (int lvl3_idx = 0; lvl3_idx < 4; lvl3_idx++) {
-            int x8_idx = x16_idx + ((lvl3_idx & 1) << 3);
-            int y8_idx = y16_idx + ((lvl3_idx >> 1) << 3);
+            int x8_idx = x16_idx + GET_BLK_IDX_X(lvl3_idx, 3);
+            int y8_idx = y16_idx + GET_BLK_IDX_Y(lvl3_idx, 3);
             VP8x8 *vst2 = &vst->split[lvl3_idx];
-            fill_variance_4x4avg(src, src_stride, x8_idx, y8_idx, vst2,
+            fill_variance_4x4avg(src_buf, src_stride, x8_idx, y8_idx, vst2,
 #if CONFIG_AV1_HIGHBITDEPTH
                                  xd->cur_buf->flags,
 #endif
@@ -1160,8 +1179,9 @@
       av1_setup_pre_planes(xd, 0, yv12_g, mi_row, mi_col,
                            get_ref_scale_factors(cm, GOLDEN_FRAME), num_planes);
       *y_sad_g = cpi->ppi->fn_ptr[bsize].sdf(
-          x->plane[0].src.buf, x->plane[0].src.stride, xd->plane[0].pre[0].buf,
-          xd->plane[0].pre[0].stride);
+          x->plane[AOM_PLANE_Y].src.buf, x->plane[AOM_PLANE_Y].src.stride,
+          xd->plane[AOM_PLANE_Y].pre[0].buf,
+          xd->plane[AOM_PLANE_Y].pre[0].stride);
     }
   }
 
@@ -1175,8 +1195,9 @@
       av1_setup_pre_planes(xd, 0, yv12_alt, mi_row, mi_col,
                            get_ref_scale_factors(cm, ALTREF_FRAME), num_planes);
       *y_sad_alt = cpi->ppi->fn_ptr[bsize].sdf(
-          x->plane[0].src.buf, x->plane[0].src.stride, xd->plane[0].pre[0].buf,
-          xd->plane[0].pre[0].stride);
+          x->plane[AOM_PLANE_Y].src.buf, x->plane[AOM_PLANE_Y].src.stride,
+          xd->plane[AOM_PLANE_Y].pre[0].buf,
+          xd->plane[AOM_PLANE_Y].pre[0].stride);
     }
   }
 
@@ -1197,8 +1218,9 @@
     }
     if (*y_sad == UINT_MAX) {
       *y_sad = cpi->ppi->fn_ptr[bsize].sdf(
-          x->plane[0].src.buf, x->plane[0].src.stride, xd->plane[0].pre[0].buf,
-          xd->plane[0].pre[0].stride);
+          x->plane[AOM_PLANE_Y].src.buf, x->plane[AOM_PLANE_Y].src.stride,
+          xd->plane[AOM_PLANE_Y].pre[0].buf,
+          xd->plane[AOM_PLANE_Y].pre[0].stride);
     }
     *y_sad_last = *y_sad;
   }
@@ -1241,9 +1263,10 @@
 static AOM_INLINE PART_EVAL_STATUS get_part_eval_based_on_sub_blk_var(
     VP16x16 *var_16x16_info, int64_t threshold16) {
   int max_8x8_var = 0, min_8x8_var = INT_MAX;
-  for (int k = 0; k < 4; k++) {
-    get_variance(&var_16x16_info->split[k].part_variances.none);
-    int this_8x8_var = var_16x16_info->split[k].part_variances.none.variance;
+  for (int split_idx = 0; split_idx < 4; split_idx++) {
+    get_variance(&var_16x16_info->split[split_idx].part_variances.none);
+    int this_8x8_var =
+        var_16x16_info->split[split_idx].part_variances.none.variance;
     max_8x8_var = AOMMAX(this_8x8_var, max_8x8_var);
     min_8x8_var = AOMMIN(this_8x8_var, min_8x8_var);
   }
@@ -1294,7 +1317,7 @@
   const uint8_t *src_buf;
   const uint8_t *dst_buf;
   int dst_stride;
-  unsigned int uv_sad[2];
+  unsigned int uv_sad[MAX_MB_PLANE - 1];
   NOISE_LEVEL noise_level = kLow;
   bool is_zero_motion = true;
 
@@ -1359,8 +1382,8 @@
   // For non keyframes, disable 4x4 average for low resolution when speed = 8
   threshold_4x4avg = INT64_MAX;
 
-  src_buf = x->plane[0].src.buf;
-  int src_stride = x->plane[0].src.stride;
+  src_buf = x->plane[AOM_PLANE_Y].src.buf;
+  int src_stride = x->plane[AOM_PLANE_Y].src.stride;
 
   // Index for force_split: 0 for 64x64, 1-4 for 32x32 blocks,
   // 5-20 for the 16x16 blocks.
@@ -1390,12 +1413,12 @@
     MB_MODE_INFO *mi = xd->mi[0];
     // Use reference SB directly for zero mv.
     if (mi->mv[0].as_int != 0) {
-      dst_buf = xd->plane[0].dst.buf;
-      dst_stride = xd->plane[0].dst.stride;
+      dst_buf = xd->plane[AOM_PLANE_Y].dst.buf;
+      dst_stride = xd->plane[AOM_PLANE_Y].dst.stride;
       is_zero_motion = false;
     } else {
-      dst_buf = xd->plane[0].pre[0].buf;
-      dst_stride = xd->plane[0].pre[0].stride;
+      dst_buf = xd->plane[AOM_PLANE_Y].pre[0].buf;
+      dst_stride = xd->plane[AOM_PLANE_Y].pre[0].stride;
     }
   } else {
     dst_buf = AV1_VAR_OFFS;
@@ -1403,8 +1426,7 @@
   }
 
   // check and set the color sensitivity of sb.
-  uv_sad[0] = 0;
-  uv_sad[1] = 0;
+  av1_zero(uv_sad);
   chroma_check(cpi, x, bsize, y_sad_last, y_sad_g, is_key_frame, is_zero_motion,
                uv_sad);
 
@@ -1432,8 +1454,9 @@
         CALC_CHROMA_THRESH_FOR_ZEROMV_SKIP(thresh_exit_part_y);
     if (mi_col + block_width <= tile->mi_col_end &&
         mi_row + block_height <= tile->mi_row_end &&
-        y_sad < thresh_exit_part_y && uv_sad[0] < thresh_exit_part_uv &&
-        uv_sad[1] < thresh_exit_part_uv) {
+        y_sad < thresh_exit_part_y &&
+        uv_sad[AOM_PLANE_U - 1] < thresh_exit_part_uv &&
+        uv_sad[AOM_PLANE_V - 1] < thresh_exit_part_uv) {
       set_block_size(cpi, mi_row, mi_col, bsize);
       x->force_zeromv_skip_for_sb = 1;
       if (vt2) aom_free(vt2);
@@ -1572,8 +1595,8 @@
       !set_vt_partitioning(cpi, xd, tile, vt, BLOCK_128X128, mi_row, mi_col,
                            thresholds[0], BLOCK_16X16, force_split[0])) {
     for (int blk64_idx = 0; blk64_idx < num_64x64_blocks; ++blk64_idx) {
-      const int x64_idx = ((blk64_idx & 1) << 4);
-      const int y64_idx = ((blk64_idx >> 1) << 4);
+      const int x64_idx = GET_BLK_IDX_X(blk64_idx, 4);
+      const int y64_idx = GET_BLK_IDX_Y(blk64_idx, 4);
       const int blk64_scale_idx = blk64_idx << 2;
 
       // Now go through the entire structure, splitting every block size until
@@ -1583,8 +1606,8 @@
                               BLOCK_16X16, force_split[1 + blk64_idx]))
         continue;
       for (int lvl1_idx = 0; lvl1_idx < 4; ++lvl1_idx) {
-        const int x32_idx = ((lvl1_idx & 1) << 3);
-        const int y32_idx = ((lvl1_idx >> 1) << 3);
+        const int x32_idx = GET_BLK_IDX_X(lvl1_idx, 3);
+        const int y32_idx = GET_BLK_IDX_Y(lvl1_idx, 3);
         const int lvl1_scale_idx = (blk64_scale_idx + lvl1_idx) << 2;
         if (set_vt_partitioning(
                 cpi, xd, tile, &vt->split[blk64_idx].split[lvl1_idx],
@@ -1593,8 +1616,8 @@
                 force_split[5 + blk64_scale_idx + lvl1_idx]))
           continue;
         for (int lvl2_idx = 0; lvl2_idx < 4; ++lvl2_idx) {
-          const int x16_idx = ((lvl2_idx & 1) << 2);
-          const int y16_idx = ((lvl2_idx >> 1) << 2);
+          const int x16_idx = GET_BLK_IDX_X(lvl2_idx, 2);
+          const int y16_idx = GET_BLK_IDX_Y(lvl2_idx, 2);
           const int split_index = 21 + lvl1_scale_idx + lvl2_idx;
           // For inter frames: if variance4x4downsample[] == 1 for this
           // 16x16 block, then the variance is based on 4x4 down-sampling,
@@ -1611,8 +1634,8 @@
                                   force_split[split_index]))
             continue;
           for (int lvl3_idx = 0; lvl3_idx < 4; ++lvl3_idx) {
-            const int x8_idx = (lvl3_idx & 1) << 1;
-            const int y8_idx = (lvl3_idx >> 1) << 1;
+            const int x8_idx = GET_BLK_IDX_X(lvl3_idx, 1);
+            const int y8_idx = GET_BLK_IDX_Y(lvl3_idx, 1);
             set_block_size(cpi, (mi_row + y64_idx + y32_idx + y16_idx + y8_idx),
                            (mi_col + x64_idx + x32_idx + x16_idx + x8_idx),
                            BLOCK_8X8);
diff --git a/av1/encoder/var_based_part.h b/av1/encoder/var_based_part.h
index 7febc0e..f912458 100644
--- a/av1/encoder/var_based_part.h
+++ b/av1/encoder/var_based_part.h
@@ -20,6 +20,10 @@
 
 #include "av1/encoder/encoder.h"
 
+// Calculate block index x and y from split level and index
+#define GET_BLK_IDX_X(idx, level) (((idx) & (0x01)) << (level))
+#define GET_BLK_IDX_Y(idx, level) (((idx) >> (0x01)) << (level))
+
 #ifdef __cplusplus
 extern "C" {
 #endif