Remove VAR_BASED_PARTITION.

BUG=aomedia:526

Change-Id: I5d9b86a36f412ded2d6f20e198d2f4de4f97aaeb
diff --git a/aom_dsp/aom_dsp.cmake b/aom_dsp/aom_dsp.cmake
index f1fd021..c42bd73 100644
--- a/aom_dsp/aom_dsp.cmake
+++ b/aom_dsp/aom_dsp.cmake
@@ -327,7 +327,6 @@
         "${AOM_ROOT}/aom_dsp/x86/quantize_avx_x86_64.asm")
 
     set(AOM_DSP_ENCODER_INTRIN_MSA
-        "${AOM_ROOT}/aom_dsp/mips/avg_msa.c"
         "${AOM_ROOT}/aom_dsp/mips/sad_msa.c"
         "${AOM_ROOT}/aom_dsp/mips/subtract_msa.c"
         "${AOM_ROOT}/aom_dsp/mips/variance_msa.c"
diff --git a/aom_dsp/aom_dsp.mk b/aom_dsp/aom_dsp.mk
index 3be8143..6e2d563 100644
--- a/aom_dsp/aom_dsp.mk
+++ b/aom_dsp/aom_dsp.mk
@@ -302,7 +302,6 @@
 DSP_SRCS-yes           += avg.c
 DSP_SRCS-$(HAVE_SSE2)  += x86/avg_intrin_sse2.c
 DSP_SRCS-$(HAVE_NEON)  += arm/avg_neon.c
-DSP_SRCS-$(HAVE_MSA)   += mips/avg_msa.c
 DSP_SRCS-$(HAVE_NEON)  += arm/hadamard_neon.c
 ifeq ($(ARCH_X86_64),yes)
 DSP_SRCS-$(HAVE_SSSE3) += x86/avg_ssse3_x86_64.asm
diff --git a/aom_dsp/aom_dsp_rtcd_defs.pl b/aom_dsp/aom_dsp_rtcd_defs.pl
index 9d42d69..8047cbc 100755
--- a/aom_dsp/aom_dsp_rtcd_defs.pl
+++ b/aom_dsp/aom_dsp_rtcd_defs.pl
@@ -617,13 +617,8 @@
   #
   # Avg
   #
-  add_proto qw/unsigned int aom_avg_8x8/, "const uint8_t *, int p";
   specialize qw/aom_avg_8x8 sse2 neon msa/;
-  add_proto qw/unsigned int aom_avg_4x4/, "const uint8_t *, int p";
-  specialize qw/aom_avg_4x4 sse2 neon msa/;
   if (aom_config("CONFIG_HIGHBITDEPTH") eq "yes") {
-    add_proto qw/unsigned int aom_highbd_avg_8x8/, "const uint8_t *, int p";
-    add_proto qw/unsigned int aom_highbd_avg_4x4/, "const uint8_t *, int p";
     add_proto qw/void aom_highbd_subtract_block/, "int rows, int cols, int16_t *diff_ptr, ptrdiff_t diff_stride, const uint8_t *src_ptr, ptrdiff_t src_stride, const uint8_t *pred_ptr, ptrdiff_t pred_stride, int bd";
     specialize qw/aom_highbd_subtract_block sse2/;
   }
diff --git a/aom_dsp/arm/avg_neon.c b/aom_dsp/arm/avg_neon.c
index e730ccb..6ff7600 100644
--- a/aom_dsp/arm/avg_neon.c
+++ b/aom_dsp/arm/avg_neon.c
@@ -25,44 +25,6 @@
   return vget_lane_u32(c, 0);
 }
 
-unsigned int aom_avg_4x4_neon(const uint8_t *s, int p) {
-  uint16x8_t v_sum;
-  uint32x2_t v_s0 = vdup_n_u32(0);
-  uint32x2_t v_s1 = vdup_n_u32(0);
-  v_s0 = vld1_lane_u32((const uint32_t *)s, v_s0, 0);
-  v_s0 = vld1_lane_u32((const uint32_t *)(s + p), v_s0, 1);
-  v_s1 = vld1_lane_u32((const uint32_t *)(s + 2 * p), v_s1, 0);
-  v_s1 = vld1_lane_u32((const uint32_t *)(s + 3 * p), v_s1, 1);
-  v_sum = vaddl_u8(vreinterpret_u8_u32(v_s0), vreinterpret_u8_u32(v_s1));
-  return (horizontal_add_u16x8(v_sum) + 8) >> 4;
-}
-
-unsigned int aom_avg_8x8_neon(const uint8_t *s, int p) {
-  uint8x8_t v_s0 = vld1_u8(s);
-  const uint8x8_t v_s1 = vld1_u8(s + p);
-  uint16x8_t v_sum = vaddl_u8(v_s0, v_s1);
-
-  v_s0 = vld1_u8(s + 2 * p);
-  v_sum = vaddw_u8(v_sum, v_s0);
-
-  v_s0 = vld1_u8(s + 3 * p);
-  v_sum = vaddw_u8(v_sum, v_s0);
-
-  v_s0 = vld1_u8(s + 4 * p);
-  v_sum = vaddw_u8(v_sum, v_s0);
-
-  v_s0 = vld1_u8(s + 5 * p);
-  v_sum = vaddw_u8(v_sum, v_s0);
-
-  v_s0 = vld1_u8(s + 6 * p);
-  v_sum = vaddw_u8(v_sum, v_s0);
-
-  v_s0 = vld1_u8(s + 7 * p);
-  v_sum = vaddw_u8(v_sum, v_s0);
-
-  return (horizontal_add_u16x8(v_sum) + 32) >> 6;
-}
-
 // coeff: 16 bits, dynamic range [-32640, 32640].
 // length: value range {16, 64, 256, 1024}.
 int aom_satd_neon(const int16_t *coeff, int length) {
diff --git a/aom_dsp/avg.c b/aom_dsp/avg.c
index eb60597..f732224 100644
--- a/aom_dsp/avg.c
+++ b/aom_dsp/avg.c
@@ -13,26 +13,6 @@
 #include "./aom_dsp_rtcd.h"
 #include "aom_ports/mem.h"
 
-unsigned int aom_avg_8x8_c(const uint8_t *src, int stride) {
-  int i, j;
-  int sum = 0;
-  for (i = 0; i < 8; ++i, src += stride)
-    for (j = 0; j < 8; sum += src[j], ++j) {
-    }
-
-  return ROUND_POWER_OF_TWO(sum, 6);
-}
-
-unsigned int aom_avg_4x4_c(const uint8_t *src, int stride) {
-  int i, j;
-  int sum = 0;
-  for (i = 0; i < 4; ++i, src += stride)
-    for (j = 0; j < 4; sum += src[j], ++j) {
-    }
-
-  return ROUND_POWER_OF_TWO(sum, 4);
-}
-
 // src_diff: first pass, 9 bit, dynamic range [-255, 255]
 //           second pass, 12 bit, dynamic range [-2040, 2040]
 static void hadamard_col8(const int16_t *src_diff, int src_stride,
@@ -192,28 +172,6 @@
 }
 
 #if CONFIG_HIGHBITDEPTH
-unsigned int aom_highbd_avg_8x8_c(const uint8_t *src, int stride) {
-  int i, j;
-  int sum = 0;
-  const uint16_t *s = CONVERT_TO_SHORTPTR(src);
-  for (i = 0; i < 8; ++i, s += stride)
-    for (j = 0; j < 8; sum += s[j], ++j) {
-    }
-
-  return ROUND_POWER_OF_TWO(sum, 6);
-}
-
-unsigned int aom_highbd_avg_4x4_c(const uint8_t *src, int stride) {
-  int i, j;
-  int sum = 0;
-  const uint16_t *s = CONVERT_TO_SHORTPTR(src);
-  for (i = 0; i < 4; ++i, s += stride)
-    for (j = 0; j < 4; sum += s[j], ++j) {
-    }
-
-  return ROUND_POWER_OF_TWO(sum, 4);
-}
-
 void aom_highbd_minmax_8x8_c(const uint8_t *s8, int p, const uint8_t *d8,
                              int dp, int *min, int *max) {
   int i, j;
diff --git a/aom_dsp/mips/avg_msa.c b/aom_dsp/mips/avg_msa.c
deleted file mode 100644
index 0e17281..0000000
--- a/aom_dsp/mips/avg_msa.c
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Copyright (c) 2016, Alliance for Open Media. All rights reserved
- *
- * This source code is subject to the terms of the BSD 2 Clause License and
- * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
- * was not distributed with this source code in the LICENSE file, you can
- * obtain it at www.aomedia.org/license/software. If the Alliance for Open
- * Media Patent License 1.0 was not distributed with this source code in the
- * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
- */
-
-#include "./aom_dsp_rtcd.h"
-#include "aom_dsp/mips/macros_msa.h"
-
-uint32_t aom_avg_8x8_msa(const uint8_t *src, int32_t src_stride) {
-  uint32_t sum_out;
-  v16u8 src0, src1, src2, src3, src4, src5, src6, src7;
-  v8u16 sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7;
-  v4u32 sum = { 0 };
-
-  LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7);
-  HADD_UB4_UH(src0, src1, src2, src3, sum0, sum1, sum2, sum3);
-  HADD_UB4_UH(src4, src5, src6, src7, sum4, sum5, sum6, sum7);
-  ADD4(sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7, sum0, sum2, sum4, sum6);
-  ADD2(sum0, sum2, sum4, sum6, sum0, sum4);
-  sum0 += sum4;
-
-  sum = __msa_hadd_u_w(sum0, sum0);
-  sum0 = (v8u16)__msa_pckev_h((v8i16)sum, (v8i16)sum);
-  sum = __msa_hadd_u_w(sum0, sum0);
-  sum = (v4u32)__msa_srari_w((v4i32)sum, 6);
-  sum_out = __msa_copy_u_w((v4i32)sum, 0);
-
-  return sum_out;
-}
-
-uint32_t aom_avg_4x4_msa(const uint8_t *src, int32_t src_stride) {
-  uint32_t sum_out;
-  uint32_t src0, src1, src2, src3;
-  v16u8 vec = { 0 };
-  v8u16 sum0;
-  v4u32 sum1;
-  v2u64 sum2;
-
-  LW4(src, src_stride, src0, src1, src2, src3);
-  INSERT_W4_UB(src0, src1, src2, src3, vec);
-
-  sum0 = __msa_hadd_u_h(vec, vec);
-  sum1 = __msa_hadd_u_w(sum0, sum0);
-  sum0 = (v8u16)__msa_pckev_h((v8i16)sum1, (v8i16)sum1);
-  sum1 = __msa_hadd_u_w(sum0, sum0);
-  sum2 = __msa_hadd_u_d(sum1, sum1);
-  sum1 = (v4u32)__msa_srari_w((v4i32)sum2, 4);
-  sum_out = __msa_copy_u_w((v4i32)sum1, 0);
-
-  return sum_out;
-}
diff --git a/aom_dsp/x86/avg_intrin_sse2.c b/aom_dsp/x86/avg_intrin_sse2.c
index bcdc20f..1a64574 100644
--- a/aom_dsp/x86/avg_intrin_sse2.c
+++ b/aom_dsp/x86/avg_intrin_sse2.c
@@ -94,52 +94,6 @@
   *min = _mm_extract_epi16(minabsdiff, 0);
 }
 
-unsigned int aom_avg_8x8_sse2(const uint8_t *s, int p) {
-  __m128i s0, s1, u0;
-  unsigned int avg = 0;
-  u0 = _mm_setzero_si128();
-  s0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s)), u0);
-  s1 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + p)), u0);
-  s0 = _mm_adds_epu16(s0, s1);
-  s1 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + 2 * p)), u0);
-  s0 = _mm_adds_epu16(s0, s1);
-  s1 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + 3 * p)), u0);
-  s0 = _mm_adds_epu16(s0, s1);
-  s1 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + 4 * p)), u0);
-  s0 = _mm_adds_epu16(s0, s1);
-  s1 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + 5 * p)), u0);
-  s0 = _mm_adds_epu16(s0, s1);
-  s1 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + 6 * p)), u0);
-  s0 = _mm_adds_epu16(s0, s1);
-  s1 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + 7 * p)), u0);
-  s0 = _mm_adds_epu16(s0, s1);
-
-  s0 = _mm_adds_epu16(s0, _mm_srli_si128(s0, 8));
-  s0 = _mm_adds_epu16(s0, _mm_srli_epi64(s0, 32));
-  s0 = _mm_adds_epu16(s0, _mm_srli_epi64(s0, 16));
-  avg = _mm_extract_epi16(s0, 0);
-  return (avg + 32) >> 6;
-}
-
-unsigned int aom_avg_4x4_sse2(const uint8_t *s, int p) {
-  __m128i s0, s1, u0;
-  unsigned int avg = 0;
-
-  u0 = _mm_setzero_si128();
-  s0 = _mm_unpacklo_epi8(xx_loadl_32(s), u0);
-  s1 = _mm_unpacklo_epi8(xx_loadl_32(s + p), u0);
-  s0 = _mm_adds_epu16(s0, s1);
-  s1 = _mm_unpacklo_epi8(xx_loadl_32(s + 2 * p), u0);
-  s0 = _mm_adds_epu16(s0, s1);
-  s1 = _mm_unpacklo_epi8(xx_loadl_32(s + 3 * p), u0);
-  s0 = _mm_adds_epu16(s0, s1);
-
-  s0 = _mm_adds_epu16(s0, _mm_srli_si128(s0, 4));
-  s0 = _mm_adds_epu16(s0, _mm_srli_epi64(s0, 16));
-  avg = _mm_extract_epi16(s0, 0);
-  return (avg + 8) >> 4;
-}
-
 static void hadamard_col8_sse2(__m128i *in, int iter) {
   __m128i a0 = in[0];
   __m128i a1 = in[1];
diff --git a/av1/av1.cmake b/av1/av1.cmake
index d2124e2..c206c57 100644
--- a/av1/av1.cmake
+++ b/av1/av1.cmake
@@ -145,9 +145,7 @@
     "${AOM_ROOT}/av1/encoder/tokenize.c"
     "${AOM_ROOT}/av1/encoder/tokenize.h"
     "${AOM_ROOT}/av1/encoder/treewriter.c"
-    "${AOM_ROOT}/av1/encoder/treewriter.h"
-    "${AOM_ROOT}/av1/encoder/variance_tree.c"
-    "${AOM_ROOT}/av1/encoder/variance_tree.h")
+    "${AOM_ROOT}/av1/encoder/treewriter.h")
 
 set(AOM_AV1_COMMON_INTRIN_SSE2
     "${AOM_ROOT}/av1/common/x86/idct_intrin_sse2.c")
diff --git a/av1/av1_cx.mk b/av1/av1_cx.mk
index 4e4734b..6af5c61 100644
--- a/av1/av1_cx.mk
+++ b/av1/av1_cx.mk
@@ -23,8 +23,6 @@
 AV1_CX_SRCS-yes += encoder/bitstream.c
 AV1_CX_SRCS-yes += encoder/context_tree.c
 AV1_CX_SRCS-yes += encoder/context_tree.h
-AV1_CX_SRCS-yes += encoder/variance_tree.c
-AV1_CX_SRCS-yes += encoder/variance_tree.h
 AV1_CX_SRCS-yes += encoder/cost.h
 AV1_CX_SRCS-yes += encoder/cost.c
 AV1_CX_SRCS-yes += encoder/dct.c
diff --git a/av1/encoder/encodeframe.c b/av1/encoder/encodeframe.c
index 25ba529..a0888ab 100644
--- a/av1/encoder/encodeframe.c
+++ b/av1/encoder/encodeframe.c
@@ -449,563 +449,6 @@
 }
 #endif  // CONFIG_SUPERTX
 
-static void set_block_size(AV1_COMP *const cpi, MACROBLOCK *const x,
-                           MACROBLOCKD *const xd, int mi_row, int mi_col,
-                           BLOCK_SIZE bsize) {
-  if (cpi->common.mi_cols > mi_col && cpi->common.mi_rows > mi_row) {
-    const int mi_width = AOMMAX(mi_size_wide[bsize], mi_size_wide[BLOCK_8X8]);
-    const int mi_height = AOMMAX(mi_size_high[bsize], mi_size_high[BLOCK_8X8]);
-    for (int r = 0; r < mi_height; ++r) {
-      for (int c = 0; c < mi_width; ++c) {
-        set_mode_info_offsets(cpi, x, xd, mi_row + r, mi_col + c);
-        xd->mi[0]->mbmi.sb_type = bsize;
-      }
-    }
-  }
-}
-
-static void set_vt_partitioning(AV1_COMP *cpi, MACROBLOCK *const x,
-                                MACROBLOCKD *const xd, VAR_TREE *vt, int mi_row,
-                                int mi_col, const int64_t *const threshold,
-                                const BLOCK_SIZE *const bsize_min) {
-  AV1_COMMON *const cm = &cpi->common;
-  const int hbw = mi_size_wide[vt->bsize] / 2;
-  const int hbh = mi_size_high[vt->bsize] / 2;
-  const int has_cols = mi_col + hbw < cm->mi_cols;
-  const int has_rows = mi_row + hbh < cm->mi_rows;
-
-  if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
-
-  assert(vt->bsize >= BLOCK_8X8);
-
-  assert(hbh == hbw);
-
-  if (vt->bsize == BLOCK_8X8 && cm->frame_type != KEY_FRAME) {
-    set_block_size(cpi, x, xd, mi_row, mi_col, BLOCK_8X8);
-    return;
-  }
-
-  if (vt->force_split || (!has_cols && !has_rows)) goto split;
-
-  // For bsize=bsize_min (16x16/8x8 for 8x8/4x4 downsampling), select if
-  // variance is below threshold, otherwise split will be selected.
-  // No check for vert/horiz split as too few samples for variance.
-  if (vt->bsize == bsize_min[0]) {
-    if (has_cols && has_rows && vt->variances.none.variance < threshold[0]) {
-      set_block_size(cpi, x, xd, mi_row, mi_col, vt->bsize);
-      return;
-    } else {
-      BLOCK_SIZE subsize = get_subsize(vt->bsize, PARTITION_SPLIT);
-      set_block_size(cpi, x, xd, mi_row, mi_col, subsize);
-      if (vt->bsize > BLOCK_8X8) {
-        set_block_size(cpi, x, xd, mi_row, mi_col + hbw, subsize);
-        set_block_size(cpi, x, xd, mi_row + hbh, mi_col, subsize);
-        set_block_size(cpi, x, xd, mi_row + hbh, mi_col + hbw, subsize);
-      }
-      return;
-    }
-  } else if (vt->bsize > bsize_min[0]) {
-    // For key frame: take split for bsize above 32X32 or very high variance.
-    if (cm->frame_type == KEY_FRAME &&
-        (vt->bsize > BLOCK_32X32 ||
-         vt->variances.none.variance > (threshold[0] << 4))) {
-      goto split;
-    }
-    // If variance is low, take the bsize (no split).
-    if (has_cols && has_rows && vt->variances.none.variance < threshold[0]) {
-      set_block_size(cpi, x, xd, mi_row, mi_col, vt->bsize);
-      return;
-    }
-
-    // Check vertical split.
-    if (has_rows) {
-      BLOCK_SIZE subsize = get_subsize(vt->bsize, PARTITION_VERT);
-      if (vt->variances.vert[0].variance < threshold[0] &&
-          vt->variances.vert[1].variance < threshold[0] &&
-          get_plane_block_size(subsize, &xd->plane[1]) < BLOCK_INVALID) {
-        set_block_size(cpi, x, xd, mi_row, mi_col, subsize);
-        set_block_size(cpi, x, xd, mi_row, mi_col + hbw, subsize);
-        return;
-      }
-    }
-    // Check horizontal split.
-    if (has_cols) {
-      BLOCK_SIZE subsize = get_subsize(vt->bsize, PARTITION_HORZ);
-      if (vt->variances.horz[0].variance < threshold[0] &&
-          vt->variances.horz[1].variance < threshold[0] &&
-          get_plane_block_size(subsize, &xd->plane[1]) < BLOCK_INVALID) {
-        set_block_size(cpi, x, xd, mi_row, mi_col, subsize);
-        set_block_size(cpi, x, xd, mi_row + hbh, mi_col, subsize);
-        return;
-      }
-    }
-  }
-
-split : {
-  set_vt_partitioning(cpi, x, xd, vt->split[0], mi_row, mi_col, threshold + 1,
-                      bsize_min + 1);
-  set_vt_partitioning(cpi, x, xd, vt->split[1], mi_row, mi_col + hbw,
-                      threshold + 1, bsize_min + 1);
-  set_vt_partitioning(cpi, x, xd, vt->split[2], mi_row + hbh, mi_col,
-                      threshold + 1, bsize_min + 1);
-  set_vt_partitioning(cpi, x, xd, vt->split[3], mi_row + hbh, mi_col + hbw,
-                      threshold + 1, bsize_min + 1);
-  return;
-}
-}
-
-// Set the variance split thresholds for following the block sizes:
-// 0 - threshold_64x64, 1 - threshold_32x32, 2 - threshold_16x16,
-// 3 - vbp_threshold_8x8. vbp_threshold_8x8 (to split to 4x4 partition) is
-// currently only used on key frame.
-static void set_vbp_thresholds(AV1_COMP *cpi, int64_t thresholds[], int q) {
-  AV1_COMMON *const cm = &cpi->common;
-  const int is_key_frame = (cm->frame_type == KEY_FRAME);
-  const int threshold_multiplier = is_key_frame ? 20 : 1;
-  const int64_t threshold_base =
-      (int64_t)(threshold_multiplier * cpi->dequants.y_dequant[q][1]);
-  if (is_key_frame) {
-    thresholds[1] = threshold_base;
-    thresholds[2] = threshold_base >> 2;
-    thresholds[3] = threshold_base >> 2;
-    thresholds[4] = threshold_base << 2;
-  } else {
-    thresholds[2] = threshold_base;
-    if (cm->width <= 352 && cm->height <= 288) {
-      thresholds[1] = threshold_base >> 2;
-      thresholds[3] = threshold_base << 3;
-    } else {
-      thresholds[1] = threshold_base;
-      thresholds[2] = (5 * threshold_base) >> 2;
-      if (cm->width >= 1920 && cm->height >= 1080)
-        thresholds[2] = (7 * threshold_base) >> 2;
-      thresholds[3] = threshold_base << cpi->oxcf.speed;
-    }
-  }
-  thresholds[0] = INT64_MIN;
-}
-
-void av1_set_variance_partition_thresholds(AV1_COMP *cpi, int q) {
-  AV1_COMMON *const cm = &cpi->common;
-  SPEED_FEATURES *const sf = &cpi->sf;
-  const int is_key_frame = (cm->frame_type == KEY_FRAME);
-  if (sf->partition_search_type != VAR_BASED_PARTITION &&
-      sf->partition_search_type != REFERENCE_PARTITION) {
-    return;
-  } else {
-    set_vbp_thresholds(cpi, cpi->vbp_thresholds, q);
-    // The thresholds below are not changed locally.
-    if (is_key_frame) {
-      cpi->vbp_threshold_sad = 0;
-      cpi->vbp_bsize_min = BLOCK_8X8;
-    } else {
-      if (cm->width <= 352 && cm->height <= 288)
-        cpi->vbp_threshold_sad = 100;
-      else
-        cpi->vbp_threshold_sad = (cpi->dequants.y_dequant[q][1] << 1) > 1000
-                                     ? (cpi->dequants.y_dequant[q][1] << 1)
-                                     : 1000;
-      cpi->vbp_bsize_min = BLOCK_16X16;
-    }
-    cpi->vbp_threshold_minmax = 15 + (q >> 3);
-  }
-}
-
-// Compute the minmax over the 8x8 subblocks.
-static int compute_minmax_8x8(const uint8_t *src, int src_stride,
-                              const uint8_t *ref, int ref_stride,
-#if CONFIG_HIGHBITDEPTH
-                              int highbd,
-#endif
-                              int pixels_wide, int pixels_high) {
-  int k;
-  int minmax_max = 0;
-  int minmax_min = 255;
-  // Loop over the 4 8x8 subblocks.
-  for (k = 0; k < 4; k++) {
-    const int x8_idx = ((k & 1) << 3);
-    const int y8_idx = ((k >> 1) << 3);
-    int min = 0;
-    int max = 0;
-    if (x8_idx < pixels_wide && y8_idx < pixels_high) {
-      const int src_offset = y8_idx * src_stride + x8_idx;
-      const int ref_offset = y8_idx * ref_stride + x8_idx;
-#if CONFIG_HIGHBITDEPTH
-      if (highbd) {
-        aom_highbd_minmax_8x8(src + src_offset, src_stride, ref + ref_offset,
-                              ref_stride, &min, &max);
-      } else {
-        aom_minmax_8x8(src + src_offset, src_stride, ref + ref_offset,
-                       ref_stride, &min, &max);
-      }
-#else
-      aom_minmax_8x8(src + src_offset, src_stride, ref + ref_offset, ref_stride,
-                     &min, &max);
-#endif
-      if ((max - min) > minmax_max) minmax_max = (max - min);
-      if ((max - min) < minmax_min) minmax_min = (max - min);
-    }
-  }
-  return (minmax_max - minmax_min);
-}
-
-#if CONFIG_HIGHBITDEPTH
-static INLINE int avg_4x4(const uint8_t *const src, const int stride,
-                          const int highbd) {
-  if (highbd) {
-    return aom_highbd_avg_4x4(src, stride);
-  } else {
-    return aom_avg_4x4(src, stride);
-  }
-}
-#else
-static INLINE int avg_4x4(const uint8_t *const src, const int stride) {
-  return aom_avg_4x4(src, stride);
-}
-#endif
-
-#if CONFIG_HIGHBITDEPTH
-static INLINE int avg_8x8(const uint8_t *const src, const int stride,
-                          const int highbd) {
-  if (highbd) {
-    return aom_highbd_avg_8x8(src, stride);
-  } else {
-    return aom_avg_8x8(src, stride);
-  }
-}
-#else
-static INLINE int avg_8x8(const uint8_t *const src, const int stride) {
-  return aom_avg_8x8(src, stride);
-}
-#endif
-
-static void init_variance_tree(VAR_TREE *const vt,
-#if CONFIG_HIGHBITDEPTH
-                               const int highbd,
-#endif
-                               BLOCK_SIZE bsize, BLOCK_SIZE leaf_size,
-                               const int width, const int height,
-                               const uint8_t *const src, const int src_stride,
-                               const uint8_t *const ref, const int ref_stride) {
-  assert(bsize >= leaf_size);
-
-  vt->bsize = bsize;
-
-  vt->force_split = 0;
-
-  vt->src = src;
-  vt->src_stride = src_stride;
-  vt->ref = ref;
-  vt->ref_stride = ref_stride;
-
-  vt->width = width;
-  vt->height = height;
-
-#if CONFIG_HIGHBITDEPTH
-  vt->highbd = highbd;
-#endif  // CONFIG_HIGHBITDEPTH
-
-  if (bsize > leaf_size) {
-    const BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_SPLIT);
-    const int px = block_size_wide[subsize];
-
-    init_variance_tree(vt->split[0],
-#if CONFIG_HIGHBITDEPTH
-                       highbd,
-#endif  // CONFIG_HIGHBITDEPTH
-                       subsize, leaf_size, AOMMIN(px, width),
-                       AOMMIN(px, height), src, src_stride, ref, ref_stride);
-    init_variance_tree(vt->split[1],
-#if CONFIG_HIGHBITDEPTH
-                       highbd,
-#endif  // CONFIG_HIGHBITDEPTH
-                       subsize, leaf_size, width - px, AOMMIN(px, height),
-                       src + px, src_stride, ref + px, ref_stride);
-    init_variance_tree(vt->split[2],
-#if CONFIG_HIGHBITDEPTH
-                       highbd,
-#endif  // CONFIG_HIGHBITDEPTH
-                       subsize, leaf_size, AOMMIN(px, width), height - px,
-                       src + px * src_stride, src_stride, ref + px * ref_stride,
-                       ref_stride);
-    init_variance_tree(vt->split[3],
-#if CONFIG_HIGHBITDEPTH
-                       highbd,
-#endif  // CONFIG_HIGHBITDEPTH
-                       subsize, leaf_size, width - px, height - px,
-                       src + px * src_stride + px, src_stride,
-                       ref + px * ref_stride + px, ref_stride);
-  }
-}
-
-// Fill the variance tree based on averaging pixel values (sub-sampling), at
-// the leaf node size.
-static void fill_variance_tree(VAR_TREE *const vt, const BLOCK_SIZE leaf_size) {
-  if (vt->bsize > leaf_size) {
-    fill_variance_tree(vt->split[0], leaf_size);
-    fill_variance_tree(vt->split[1], leaf_size);
-    fill_variance_tree(vt->split[2], leaf_size);
-    fill_variance_tree(vt->split[3], leaf_size);
-    fill_variance_node(vt);
-  } else if (vt->width <= 0 || vt->height <= 0) {
-    fill_variance(0, 0, 0, &vt->variances.none);
-  } else {
-    unsigned int sse = 0;
-    int sum = 0;
-    int src_avg;
-    int ref_avg;
-    assert(leaf_size == BLOCK_4X4 || leaf_size == BLOCK_8X8);
-    if (leaf_size == BLOCK_4X4) {
-      src_avg = avg_4x4(vt->src, vt->src_stride IF_HBD(, vt->highbd));
-      ref_avg = avg_4x4(vt->ref, vt->ref_stride IF_HBD(, vt->highbd));
-    } else {
-      src_avg = avg_8x8(vt->src, vt->src_stride IF_HBD(, vt->highbd));
-      ref_avg = avg_8x8(vt->ref, vt->ref_stride IF_HBD(, vt->highbd));
-    }
-    sum = src_avg - ref_avg;
-    sse = sum * sum;
-    fill_variance(sse, sum, 0, &vt->variances.none);
-  }
-}
-
-static void refine_variance_tree(VAR_TREE *const vt, const int64_t threshold) {
-  if (vt->bsize >= BLOCK_8X8) {
-    if (vt->bsize == BLOCK_16X16) {
-      if (vt->variances.none.variance <= threshold)
-        return;
-      else
-        vt->force_split = 0;
-    }
-
-    refine_variance_tree(vt->split[0], threshold);
-    refine_variance_tree(vt->split[1], threshold);
-    refine_variance_tree(vt->split[2], threshold);
-    refine_variance_tree(vt->split[3], threshold);
-
-    if (vt->bsize <= BLOCK_16X16) fill_variance_node(vt);
-  } else if (vt->width <= 0 || vt->height <= 0) {
-    fill_variance(0, 0, 0, &vt->variances.none);
-  } else {
-    const int src_avg = avg_4x4(vt->src, vt->src_stride IF_HBD(, vt->highbd));
-    const int ref_avg = avg_4x4(vt->ref, vt->ref_stride IF_HBD(, vt->highbd));
-    const int sum = src_avg - ref_avg;
-    const unsigned int sse = sum * sum;
-    assert(vt->bsize == BLOCK_4X4);
-    fill_variance(sse, sum, 0, &vt->variances.none);
-  }
-}
-
-static int check_split_key_frame(VAR_TREE *const vt, const int64_t threshold) {
-  if (vt->bsize == BLOCK_32X32) {
-    vt->force_split = vt->variances.none.variance > threshold;
-  } else {
-    vt->force_split |= check_split_key_frame(vt->split[0], threshold);
-    vt->force_split |= check_split_key_frame(vt->split[1], threshold);
-    vt->force_split |= check_split_key_frame(vt->split[2], threshold);
-    vt->force_split |= check_split_key_frame(vt->split[3], threshold);
-  }
-  return vt->force_split;
-}
-
-static int check_split(AV1_COMP *const cpi, VAR_TREE *const vt,
-                       const int segment_id, const int64_t *const thresholds) {
-  if (vt->bsize == BLOCK_16X16) {
-    vt->force_split = vt->variances.none.variance > thresholds[0];
-    if (!vt->force_split && vt->variances.none.variance > thresholds[-1] &&
-        !cyclic_refresh_segment_id_boosted(segment_id)) {
-      // We have some nominal amount of 16x16 variance (based on average),
-      // compute the minmax over the 8x8 sub-blocks, and if above threshold,
-      // force split to 8x8 block for this 16x16 block.
-      int minmax =
-          compute_minmax_8x8(vt->src, vt->src_stride, vt->ref, vt->ref_stride,
-#if CONFIG_HIGHBITDEPTH
-                             vt->highbd,
-#endif
-                             vt->width, vt->height);
-      vt->force_split = minmax > cpi->vbp_threshold_minmax;
-    }
-  } else {
-    vt->force_split |=
-        check_split(cpi, vt->split[0], segment_id, thresholds + 1);
-    vt->force_split |=
-        check_split(cpi, vt->split[1], segment_id, thresholds + 1);
-    vt->force_split |=
-        check_split(cpi, vt->split[2], segment_id, thresholds + 1);
-    vt->force_split |=
-        check_split(cpi, vt->split[3], segment_id, thresholds + 1);
-
-    if (vt->bsize == BLOCK_32X32 && !vt->force_split) {
-      vt->force_split = vt->variances.none.variance > thresholds[0];
-    }
-  }
-
-  return vt->force_split;
-}
-
-// This function chooses partitioning based on the variance between source and
-// reconstructed last (or golden), where variance is computed for down-sampled
-// inputs.
-static void choose_partitioning(AV1_COMP *const cpi, ThreadData *const td,
-                                const TileInfo *const tile, MACROBLOCK *const x,
-                                const int mi_row, const int mi_col) {
-  AV1_COMMON *const cm = &cpi->common;
-  MACROBLOCKD *const xd = &x->e_mbd;
-  VAR_TREE *const vt = td->var_root[cm->mib_size_log2 - MIN_MIB_SIZE_LOG2];
-#if CONFIG_DUAL_FILTER
-  int i;
-#endif
-  const uint8_t *src;
-  const uint8_t *ref;
-  int src_stride;
-  int ref_stride;
-  int pixels_wide = MI_SIZE * mi_size_wide[cm->sb_size];
-  int pixels_high = MI_SIZE * mi_size_high[cm->sb_size];
-  int64_t thresholds[5] = {
-    cpi->vbp_thresholds[0], cpi->vbp_thresholds[1], cpi->vbp_thresholds[2],
-    cpi->vbp_thresholds[3], cpi->vbp_thresholds[4],
-  };
-  BLOCK_SIZE bsize_min[5] = { BLOCK_16X16, BLOCK_16X16, BLOCK_16X16,
-                              cpi->vbp_bsize_min, BLOCK_8X8 };
-  const int start_level = cm->sb_size == BLOCK_64X64 ? 1 : 0;
-  const int64_t *const thre = thresholds + start_level;
-  const BLOCK_SIZE *const bmin = bsize_min + start_level;
-
-  const int is_key_frame = (cm->frame_type == KEY_FRAME);
-  const int low_res = (cm->width <= 352 && cm->height <= 288);
-
-  int segment_id = CR_SEGMENT_ID_BASE;
-
-  if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->seg.enabled) {
-    const uint8_t *const map =
-        cm->seg.update_map ? cpi->segmentation_map : cm->last_frame_seg_map;
-    segment_id = get_segment_id(cm, map, cm->sb_size, mi_row, mi_col);
-
-    if (cyclic_refresh_segment_id_boosted(segment_id)) {
-      int q = av1_get_qindex(&cm->seg, segment_id, cm->base_qindex);
-      set_vbp_thresholds(cpi, thresholds, q);
-    }
-  }
-
-  set_offsets(cpi, tile, x, mi_row, mi_col, cm->sb_size);
-
-  if (xd->mb_to_right_edge < 0) pixels_wide += (xd->mb_to_right_edge >> 3);
-  if (xd->mb_to_bottom_edge < 0) pixels_high += (xd->mb_to_bottom_edge >> 3);
-
-  src = x->plane[0].src.buf;
-  src_stride = x->plane[0].src.stride;
-
-  if (!is_key_frame) {
-    MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
-    const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, LAST_FRAME);
-    const YV12_BUFFER_CONFIG *yv12_g = get_ref_frame_buffer(cpi, GOLDEN_FRAME);
-    unsigned int y_sad, y_sad_g;
-
-    const int hbs = cm->mib_size / 2;
-    const int split_vert = mi_col + hbs >= cm->mi_cols;
-    const int split_horz = mi_row + hbs >= cm->mi_rows;
-    BLOCK_SIZE bsize;
-
-    if (split_vert && split_horz)
-      bsize = get_subsize(cm->sb_size, PARTITION_SPLIT);
-    else if (split_vert)
-      bsize = get_subsize(cm->sb_size, PARTITION_VERT);
-    else if (split_horz)
-      bsize = get_subsize(cm->sb_size, PARTITION_HORZ);
-    else
-      bsize = cm->sb_size;
-
-    assert(yv12 != NULL);
-
-    if (yv12_g && yv12_g != yv12) {
-      av1_setup_pre_planes(xd, 0, yv12_g, mi_row, mi_col,
-                           &cm->frame_refs[GOLDEN_FRAME - 1].sf);
-      y_sad_g = cpi->fn_ptr[bsize].sdf(
-          x->plane[0].src.buf, x->plane[0].src.stride, xd->plane[0].pre[0].buf,
-          xd->plane[0].pre[0].stride);
-    } else {
-      y_sad_g = UINT_MAX;
-    }
-
-    av1_setup_pre_planes(xd, 0, yv12, mi_row, mi_col,
-                         &cm->frame_refs[LAST_FRAME - 1].sf);
-    mbmi->ref_frame[0] = LAST_FRAME;
-    mbmi->ref_frame[1] = NONE_FRAME;
-    mbmi->sb_type = cm->sb_size;
-    mbmi->mv[0].as_int = 0;
-#if CONFIG_DUAL_FILTER
-    for (i = 0; i < 4; ++i) mbmi->interp_filter[i] = BILINEAR;
-#else
-    mbmi->interp_filter = BILINEAR;
-#endif
-
-    y_sad = av1_int_pro_motion_estimation(cpi, x, bsize, mi_row, mi_col);
-
-    if (y_sad_g < y_sad) {
-      av1_setup_pre_planes(xd, 0, yv12_g, mi_row, mi_col,
-                           &cm->frame_refs[GOLDEN_FRAME - 1].sf);
-      mbmi->ref_frame[0] = GOLDEN_FRAME;
-      mbmi->mv[0].as_int = 0;
-      y_sad = y_sad_g;
-    } else {
-      x->pred_mv[LAST_FRAME] = mbmi->mv[0].as_mv;
-    }
-
-    av1_build_inter_predictors_sb(cm, xd, mi_row, mi_col, NULL, cm->sb_size);
-
-    ref = xd->plane[0].dst.buf;
-    ref_stride = xd->plane[0].dst.stride;
-
-    // If the y_sad is very small, take the largest partition and exit.
-    // Don't check on boosted segment for now, as largest is suppressed there.
-    if (segment_id == CR_SEGMENT_ID_BASE && y_sad < cpi->vbp_threshold_sad) {
-      if (!split_vert && !split_horz) {
-        set_block_size(cpi, x, xd, mi_row, mi_col, cm->sb_size);
-        return;
-      }
-    }
-  } else {
-    ref = AV1_VAR_OFFS;
-    ref_stride = 0;
-#if CONFIG_HIGHBITDEPTH
-    if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
-      switch (xd->bd) {
-        case 10: ref = CONVERT_TO_BYTEPTR(AV1_HIGH_VAR_OFFS_10); break;
-        case 12: ref = CONVERT_TO_BYTEPTR(AV1_HIGH_VAR_OFFS_12); break;
-        case 8:
-        default: ref = CONVERT_TO_BYTEPTR(AV1_HIGH_VAR_OFFS_8); break;
-      }
-    }
-#endif  // CONFIG_HIGHBITDEPTH
-  }
-
-  init_variance_tree(
-      vt,
-#if CONFIG_HIGHBITDEPTH
-      xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH,
-#endif  // CONFIG_HIGHBITDEPTH
-      cm->sb_size, (is_key_frame || low_res) ? BLOCK_4X4 : BLOCK_8X8,
-      pixels_wide, pixels_high, src, src_stride, ref, ref_stride);
-
-  // Fill in the entire tree of variances and compute splits.
-  if (is_key_frame) {
-    fill_variance_tree(vt, BLOCK_4X4);
-    check_split_key_frame(vt, thre[1]);
-  } else {
-    fill_variance_tree(vt, BLOCK_8X8);
-    check_split(cpi, vt, segment_id, thre);
-    if (low_res) {
-      refine_variance_tree(vt, thre[1] << 1);
-    }
-  }
-
-  vt->force_split |= mi_col + cm->mib_size > cm->mi_cols ||
-                     mi_row + cm->mib_size > cm->mi_rows;
-
-  // Now go through the entire structure, splitting every block size until
-  // we get to one that's got a variance lower than our threshold.
-  set_vt_partitioning(cpi, x, xd, vt, mi_row, mi_col, thre, bmin);
-}
-
 #if CONFIG_DUAL_FILTER
 static void reset_intmv_filter_type(const AV1_COMMON *const cm, MACROBLOCKD *xd,
                                     MB_MODE_INFO *mbmi) {
@@ -4735,14 +4178,6 @@
                        &dummy_rate_nocoef,
 #endif  // CONFIG_SUPERTX
                        1, pc_root);
-    } else if (sf->partition_search_type == VAR_BASED_PARTITION) {
-      choose_partitioning(cpi, td, tile_info, x, mi_row, mi_col);
-      rd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col, cm->sb_size,
-                       &dummy_rate, &dummy_dist,
-#if CONFIG_SUPERTX
-                       &dummy_rate_nocoef,
-#endif  // CONFIG_SUPERTX
-                       1, pc_root);
     } else {
       // If required set upper and lower partition size limits
       if (sf->auto_min_max_partition_size) {
@@ -5375,10 +4810,6 @@
   av1_zero(x->blk_skip_drl);
 #endif
 
-  if (cpi->sf.partition_search_type == VAR_BASED_PARTITION &&
-      cpi->td.var_root[0] == NULL)
-    av1_setup_var_tree(&cpi->common, &cpi->td);
-
   {
     struct aom_usec_timer emr_timer;
     aom_usec_timer_start(&emr_timer);
diff --git a/av1/encoder/encodeframe.h b/av1/encoder/encodeframe.h
index 9095d5c..46a99e1 100644
--- a/av1/encoder/encodeframe.h
+++ b/av1/encoder/encodeframe.h
@@ -35,8 +35,6 @@
 void av1_encode_tile(struct AV1_COMP *cpi, struct ThreadData *td, int tile_row,
                      int tile_col);
 
-void av1_set_variance_partition_thresholds(struct AV1_COMP *cpi, int q);
-
 void av1_update_tx_type_count(const struct AV1Common *cm, MACROBLOCKD *xd,
 #if CONFIG_TXK_SEL
                               int block, int plane,
diff --git a/av1/encoder/encoder.c b/av1/encoder/encoder.c
index fa6a88e..15ad823 100644
--- a/av1/encoder/encoder.c
+++ b/av1/encoder/encoder.c
@@ -483,7 +483,6 @@
   cpi->tile_tok[0][0] = 0;
 
   av1_free_pc_tree(&cpi->td);
-  av1_free_var_tree(&cpi->td);
 
 #if CONFIG_PALETTE
   if (cpi->common.allow_screen_content_tools)
@@ -2588,7 +2587,6 @@
 #endif  // CONFIG_PALETTE
       aom_free(thread_data->td->counts);
       av1_free_pc_tree(thread_data->td);
-      av1_free_var_tree(thread_data->td);
       aom_free(thread_data->td);
     }
   }
@@ -3963,7 +3961,6 @@
     reset_use_upsampled_references(cpi);
 
   av1_set_quantizer(cm, q);
-  av1_set_variance_partition_thresholds(cpi, q);
   setup_frame(cpi);
   suppress_active_map(cpi);
 
diff --git a/av1/encoder/encoder.h b/av1/encoder/encoder.h
index 2ef0871..9e1fd2e 100644
--- a/av1/encoder/encoder.h
+++ b/av1/encoder/encoder.h
@@ -37,7 +37,6 @@
 #include "av1/encoder/rd.h"
 #include "av1/encoder/speed_features.h"
 #include "av1/encoder/tokenize.h"
-#include "av1/encoder/variance_tree.h"
 #if CONFIG_XIPHRC
 #include "av1/encoder/ratectrl_xiph.h"
 #endif
@@ -327,9 +326,6 @@
   PC_TREE *pc_tree;
   PC_TREE *pc_root[MAX_MIB_SIZE_LOG2 - MIN_MIB_SIZE_LOG2 + 1];
 
-  VAR_TREE *var_tree;
-  VAR_TREE *var_root[MAX_MIB_SIZE_LOG2 - MIN_MIB_SIZE_LOG2 + 1];
-
 #if CONFIG_PALETTE
   PALETTE_BUFFER *palette_buffer;
 #endif  // CONFIG_PALETTE
@@ -620,17 +616,6 @@
   int superres_pending;
 #endif  // CONFIG_FRAME_SUPERRES
 
-  // VAR_BASED_PARTITION thresholds
-  // 0 - threshold_128x128;
-  // 1 - threshold_64x64;
-  // 2 - threshold_32x32;
-  // 3 - threshold_16x16;
-  // 4 - threshold_8x8;
-  int64_t vbp_thresholds[5];
-  int64_t vbp_threshold_minmax;
-  int64_t vbp_threshold_sad;
-  BLOCK_SIZE vbp_bsize_min;
-
   // VARIANCE_AQ segment map refresh
   int vaq_refresh;
 
diff --git a/av1/encoder/ethread.c b/av1/encoder/ethread.c
index 454fc19..df4981f 100644
--- a/av1/encoder/ethread.c
+++ b/av1/encoder/ethread.c
@@ -93,10 +93,6 @@
         thread_data->td->pc_tree = NULL;
         av1_setup_pc_tree(cm, thread_data->td);
 
-        // Set up variance tree if needed.
-        if (cpi->sf.partition_search_type == VAR_BASED_PARTITION)
-          av1_setup_var_tree(cm, thread_data->td);
-
         // Allocate frame counters in thread data.
         CHECK_MEM_ERROR(cm, thread_data->td->counts,
                         aom_calloc(1, sizeof(*thread_data->td->counts)));
diff --git a/av1/encoder/rd.c b/av1/encoder/rd.c
index 65db046..939abd7 100644
--- a/av1/encoder/rd.c
+++ b/av1/encoder/rd.c
@@ -374,8 +374,7 @@
   if (cpi->oxcf.pass != 1) {
     av1_fill_token_costs(x->token_costs, cm->fc->coef_probs);
 
-    if (cpi->sf.partition_search_type != VAR_BASED_PARTITION ||
-        cm->frame_type == KEY_FRAME) {
+    if (cm->frame_type == KEY_FRAME) {
 #if CONFIG_EXT_PARTITION_TYPES
       for (i = 0; i < PARTITION_PLOFFSET; ++i)
         av1_cost_tokens(cpi->partition_cost[i], cm->fc->partition_prob[i],
diff --git a/av1/encoder/speed_features.h b/av1/encoder/speed_features.h
index 97d09e0..5710d77 100644
--- a/av1/encoder/speed_features.h
+++ b/av1/encoder/speed_features.h
@@ -195,11 +195,7 @@
   // Always use a fixed size partition
   FIXED_PARTITION,
 
-  REFERENCE_PARTITION,
-
-  // Use an arbitrary partitioning scheme based on source variance within
-  // a 64X64 SB
-  VAR_BASED_PARTITION
+  REFERENCE_PARTITION
 } PARTITION_SEARCH_TYPE;
 
 typedef enum {
diff --git a/av1/encoder/variance_tree.c b/av1/encoder/variance_tree.c
deleted file mode 100644
index 9384cd7..0000000
--- a/av1/encoder/variance_tree.c
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Copyright (c) 2016, Alliance for Open Media. All rights reserved
- *
- * This source code is subject to the terms of the BSD 2 Clause License and
- * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
- * was not distributed with this source code in the LICENSE file, you can
- * obtain it at www.aomedia.org/license/software. If the Alliance for Open
- * Media Patent License 1.0 was not distributed with this source code in the
- * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
- */
-
-#include "av1/encoder/variance_tree.h"
-#include "av1/encoder/encoder.h"
-
-void av1_setup_var_tree(struct AV1Common *cm, ThreadData *td) {
-  int i, j;
-#if CONFIG_EXT_PARTITION
-  const int leaf_nodes = 1024;
-  const int tree_nodes = 1024 + 256 + 64 + 16 + 4 + 1;
-#else
-  const int leaf_nodes = 256;
-  const int tree_nodes = 256 + 64 + 16 + 4 + 1;
-#endif  // CONFIG_EXT_PARTITION
-  int index = 0;
-  VAR_TREE *this_var;
-  int nodes;
-
-  aom_free(td->var_tree);
-  CHECK_MEM_ERROR(cm, td->var_tree,
-                  aom_calloc(tree_nodes, sizeof(*td->var_tree)));
-
-  this_var = &td->var_tree[0];
-
-  // Sets up all the leaf nodes in the tree.
-  for (index = 0; index < leaf_nodes; ++index) {
-    VAR_TREE *const leaf = &td->var_tree[index];
-    leaf->split[0] = NULL;
-  }
-
-  // Each node has 4 leaf nodes, fill in the child pointers
-  // from leafs to the root.
-  for (nodes = leaf_nodes >> 2; nodes > 0; nodes >>= 2) {
-    for (i = 0; i < nodes; ++i, ++index) {
-      VAR_TREE *const node = &td->var_tree[index];
-      for (j = 0; j < 4; j++) node->split[j] = this_var++;
-    }
-  }
-
-  // Set up the root node for the largest superblock size
-  i = MAX_MIB_SIZE_LOG2 - MIN_MIB_SIZE_LOG2;
-  td->var_root[i] = &td->var_tree[tree_nodes - 1];
-  // Set up the root nodes for the rest of the possible superblock sizes
-  while (--i >= 0) {
-    td->var_root[i] = td->var_root[i + 1]->split[0];
-  }
-}
-
-void av1_free_var_tree(ThreadData *td) {
-  aom_free(td->var_tree);
-  td->var_tree = NULL;
-}
diff --git a/av1/encoder/variance_tree.h b/av1/encoder/variance_tree.h
deleted file mode 100644
index a9f2730..0000000
--- a/av1/encoder/variance_tree.h
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Copyright (c) 2016, Alliance for Open Media. All rights reserved
- *
- * This source code is subject to the terms of the BSD 2 Clause License and
- * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
- * was not distributed with this source code in the LICENSE file, you can
- * obtain it at www.aomedia.org/license/software. If the Alliance for Open
- * Media Patent License 1.0 was not distributed with this source code in the
- * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
- */
-
-#ifndef AV1_ENCODER_VARIANCE_TREE_H_
-#define AV1_ENCODER_VARIANCE_TREE_H_
-
-#include <assert.h>
-
-#include "./aom_config.h"
-
-#include "aom/aom_integer.h"
-
-#include "av1/common/enums.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-struct AV1Common;
-struct ThreadData;
-
-typedef struct {
-  int64_t sum_square_error;
-  int64_t sum_error;
-  int log2_count;
-  int variance;
-} VAR;
-
-typedef struct {
-  VAR none;
-  VAR horz[2];
-  VAR vert[2];
-} partition_variance;
-
-typedef struct VAR_TREE {
-  int force_split;
-  partition_variance variances;
-  struct VAR_TREE *split[4];
-  BLOCK_SIZE bsize;
-  const uint8_t *src;
-  const uint8_t *ref;
-  int src_stride;
-  int ref_stride;
-  int width;
-  int height;
-#if CONFIG_HIGHBITDEPTH
-  int highbd;
-#endif  // CONFIG_HIGHBITDEPTH
-} VAR_TREE;
-
-void av1_setup_var_tree(struct AV1Common *cm, struct ThreadData *td);
-void av1_free_var_tree(struct ThreadData *td);
-
-// Set variance values given sum square error, sum error, count.
-static INLINE void fill_variance(int64_t s2, int64_t s, int c, VAR *v) {
-  v->sum_square_error = s2;
-  v->sum_error = s;
-  v->log2_count = c;
-  v->variance =
-      (int)(256 * (v->sum_square_error -
-                   ((v->sum_error * v->sum_error) >> v->log2_count)) >>
-            v->log2_count);
-}
-
-static INLINE void sum_2_variances(const VAR *a, const VAR *b, VAR *r) {
-  assert(a->log2_count == b->log2_count);
-  fill_variance(a->sum_square_error + b->sum_square_error,
-                a->sum_error + b->sum_error, a->log2_count + 1, r);
-}
-
-static INLINE void fill_variance_node(VAR_TREE *vt) {
-  sum_2_variances(&vt->split[0]->variances.none, &vt->split[1]->variances.none,
-                  &vt->variances.horz[0]);
-  sum_2_variances(&vt->split[2]->variances.none, &vt->split[3]->variances.none,
-                  &vt->variances.horz[1]);
-  sum_2_variances(&vt->split[0]->variances.none, &vt->split[2]->variances.none,
-                  &vt->variances.vert[0]);
-  sum_2_variances(&vt->split[1]->variances.none, &vt->split[3]->variances.none,
-                  &vt->variances.vert[1]);
-  sum_2_variances(&vt->variances.vert[0], &vt->variances.vert[1],
-                  &vt->variances.none);
-}
-
-#ifdef __cplusplus
-}  // extern "C"
-#endif
-
-#endif /* AV1_ENCODER_VARIANCE_TREE_H_ */
diff --git a/test/avg_test.cc b/test/avg_test.cc
index b040f6a..e83a75c 100644
--- a/test/avg_test.cc
+++ b/test/avg_test.cc
@@ -53,21 +53,6 @@
     rnd_.Reset(ACMRandom::DeterministicSeed());
   }
 
-  // Sum Pixels
-  static unsigned int ReferenceAverage8x8(const uint8_t *source, int pitch) {
-    unsigned int average = 0;
-    for (int h = 0; h < 8; ++h)
-      for (int w = 0; w < 8; ++w) average += source[h * pitch + w];
-    return ((average + 32) >> 6);
-  }
-
-  static unsigned int ReferenceAverage4x4(const uint8_t *source, int pitch) {
-    unsigned int average = 0;
-    for (int h = 0; h < 4; ++h)
-      for (int w = 0; w < 4; ++w) average += source[h * pitch + w];
-    return ((average + 8) >> 4);
-  }
-
   void FillConstant(uint8_t fill_constant) {
     for (int i = 0; i < width_ * height_; ++i) {
       source_data_[i] = fill_constant;
@@ -86,35 +71,6 @@
 
   ACMRandom rnd_;
 };
-typedef unsigned int (*AverageFunction)(const uint8_t *s, int pitch);
-
-typedef std::tr1::tuple<int, int, int, int, AverageFunction> AvgFunc;
-
-class AverageTest : public AverageTestBase,
-                    public ::testing::WithParamInterface<AvgFunc> {
- public:
-  AverageTest() : AverageTestBase(GET_PARAM(0), GET_PARAM(1)) {}
-
- protected:
-  void CheckAverages() {
-    const int block_size = GET_PARAM(3);
-    unsigned int expected = 0;
-    if (block_size == 8) {
-      expected =
-          ReferenceAverage8x8(source_data_ + GET_PARAM(2), source_stride_);
-    } else if (block_size == 4) {
-      expected =
-          ReferenceAverage4x4(source_data_ + GET_PARAM(2), source_stride_);
-    }
-
-    ASM_REGISTER_STATE_CHECK(
-        GET_PARAM(4)(source_data_ + GET_PARAM(2), source_stride_));
-    unsigned int actual =
-        GET_PARAM(4)(source_data_ + GET_PARAM(2), source_stride_);
-
-    EXPECT_EQ(expected, actual);
-  }
-};
 
 typedef void (*IntProRowFunc)(int16_t hbuf[16], uint8_t const *ref,
                               const int ref_stride, const int height);
@@ -229,25 +185,6 @@
 
 uint8_t *AverageTestBase::source_data_ = NULL;
 
-TEST_P(AverageTest, MinValue) {
-  FillConstant(0);
-  CheckAverages();
-}
-
-TEST_P(AverageTest, MaxValue) {
-  FillConstant(255);
-  CheckAverages();
-}
-
-TEST_P(AverageTest, Random) {
-  // The reference frame, but not the source frame, may be unaligned for
-  // certain types of searches.
-  for (int i = 0; i < 1000; i++) {
-    FillRandom();
-    CheckAverages();
-  }
-}
-
 TEST_P(IntProRowTest, MinValue) {
   FillConstant(0);
   RunComparison();
@@ -309,11 +246,6 @@
 
 using std::tr1::make_tuple;
 
-INSTANTIATE_TEST_CASE_P(
-    C, AverageTest,
-    ::testing::Values(make_tuple(16, 16, 1, 8, &aom_avg_8x8_c),
-                      make_tuple(16, 16, 1, 4, &aom_avg_4x4_c)));
-
 INSTANTIATE_TEST_CASE_P(C, SatdTest,
                         ::testing::Values(make_tuple(16, &aom_satd_c),
                                           make_tuple(64, &aom_satd_c),
@@ -322,15 +254,6 @@
 
 #if HAVE_SSE2
 INSTANTIATE_TEST_CASE_P(
-    SSE2, AverageTest,
-    ::testing::Values(make_tuple(16, 16, 0, 8, &aom_avg_8x8_sse2),
-                      make_tuple(16, 16, 5, 8, &aom_avg_8x8_sse2),
-                      make_tuple(32, 32, 15, 8, &aom_avg_8x8_sse2),
-                      make_tuple(16, 16, 0, 4, &aom_avg_4x4_sse2),
-                      make_tuple(16, 16, 5, 4, &aom_avg_4x4_sse2),
-                      make_tuple(32, 32, 15, 4, &aom_avg_4x4_sse2)));
-
-INSTANTIATE_TEST_CASE_P(
     SSE2, IntProRowTest,
     ::testing::Values(make_tuple(16, &aom_int_pro_row_sse2, &aom_int_pro_row_c),
                       make_tuple(32, &aom_int_pro_row_sse2, &aom_int_pro_row_c),
@@ -353,15 +276,6 @@
 
 #if HAVE_NEON
 INSTANTIATE_TEST_CASE_P(
-    NEON, AverageTest,
-    ::testing::Values(make_tuple(16, 16, 0, 8, &aom_avg_8x8_neon),
-                      make_tuple(16, 16, 5, 8, &aom_avg_8x8_neon),
-                      make_tuple(32, 32, 15, 8, &aom_avg_8x8_neon),
-                      make_tuple(16, 16, 0, 4, &aom_avg_4x4_neon),
-                      make_tuple(16, 16, 5, 4, &aom_avg_4x4_neon),
-                      make_tuple(32, 32, 15, 4, &aom_avg_4x4_neon)));
-
-INSTANTIATE_TEST_CASE_P(
     NEON, IntProRowTest,
     ::testing::Values(make_tuple(16, &aom_int_pro_row_neon, &aom_int_pro_row_c),
                       make_tuple(32, &aom_int_pro_row_neon, &aom_int_pro_row_c),
@@ -382,15 +296,4 @@
                                           make_tuple(1024, &aom_satd_neon)));
 #endif
 
-#if HAVE_MSA
-INSTANTIATE_TEST_CASE_P(
-    MSA, AverageTest,
-    ::testing::Values(make_tuple(16, 16, 0, 8, &aom_avg_8x8_msa),
-                      make_tuple(16, 16, 5, 8, &aom_avg_8x8_msa),
-                      make_tuple(32, 32, 15, 8, &aom_avg_8x8_msa),
-                      make_tuple(16, 16, 0, 4, &aom_avg_4x4_msa),
-                      make_tuple(16, 16, 5, 4, &aom_avg_4x4_msa),
-                      make_tuple(32, 32, 15, 4, &aom_avg_4x4_msa)));
-#endif
-
 }  // namespace