Port folder renaming changes from AOM

Manually cherry-picked commits:
ceef058 libvpx->libaom part2
3d26d91 libvpx -> libaom
cfea7dd vp10/ -> av1/
3a8eff7 Fix a build issue for a test
bf4202e Rename vpx to aom

Change-Id: I1b0eb5a40796e3aaf41c58984b4229a439a597dc
diff --git a/av1/encoder/aq_complexity.c b/av1/encoder/aq_complexity.c
new file mode 100644
index 0000000..173556e
--- /dev/null
+++ b/av1/encoder/aq_complexity.c
@@ -0,0 +1,161 @@
+/*
+ *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <limits.h>
+#include <math.h>
+
+#include "av1/encoder/aq_complexity.h"
+#include "av1/encoder/aq_variance.h"
+#include "av1/encoder/encodeframe.h"
+#include "av1/common/seg_common.h"
+#include "av1/encoder/segmentation.h"
+#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_ports/system_state.h"
+
+#define AQ_C_SEGMENTS 5
+#define DEFAULT_AQ2_SEG 3  // Neutral Q segment
+#define AQ_C_STRENGTHS 3
+static const double aq_c_q_adj_factor[AQ_C_STRENGTHS][AQ_C_SEGMENTS] = {
+  { 1.75, 1.25, 1.05, 1.00, 0.90 },
+  { 2.00, 1.50, 1.15, 1.00, 0.85 },
+  { 2.50, 1.75, 1.25, 1.00, 0.80 }
+};
+static const double aq_c_transitions[AQ_C_STRENGTHS][AQ_C_SEGMENTS] = {
+  { 0.15, 0.30, 0.55, 2.00, 100.0 },
+  { 0.20, 0.40, 0.65, 2.00, 100.0 },
+  { 0.25, 0.50, 0.75, 2.00, 100.0 }
+};
+static const double aq_c_var_thresholds[AQ_C_STRENGTHS][AQ_C_SEGMENTS] = {
+  { -4.0, -3.0, -2.0, 100.00, 100.0 },
+  { -3.5, -2.5, -1.5, 100.00, 100.0 },
+  { -3.0, -2.0, -1.0, 100.00, 100.0 }
+};
+
+#define DEFAULT_COMPLEXITY 64
+
+static int get_aq_c_strength(int q_index, vpx_bit_depth_t bit_depth) {
+  // Approximate base quatizer (truncated to int)
+  const int base_quant = vp10_ac_quant(q_index, 0, bit_depth) / 4;
+  return (base_quant > 10) + (base_quant > 25);
+}
+
+void vp10_setup_in_frame_q_adj(VP10_COMP *cpi) {
+  VP10_COMMON *const cm = &cpi->common;
+  struct segmentation *const seg = &cm->seg;
+
+  // Make SURE use of floating point in this function is safe.
+  vpx_clear_system_state();
+
+  if (frame_is_intra_only(cm) || cm->error_resilient_mode ||
+      cpi->refresh_alt_ref_frame ||
+      (cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref)) {
+    int segment;
+    const int aq_strength = get_aq_c_strength(cm->base_qindex, cm->bit_depth);
+
+    // Clear down the segment map.
+    memset(cpi->segmentation_map, DEFAULT_AQ2_SEG, cm->mi_rows * cm->mi_cols);
+
+    vp10_clearall_segfeatures(seg);
+
+    // Segmentation only makes sense if the target bits per SB is above a
+    // threshold. Below this the overheads will usually outweigh any benefit.
+    if (cpi->rc.sb64_target_rate < 256) {
+      vp10_disable_segmentation(seg);
+      return;
+    }
+
+    vp10_enable_segmentation(seg);
+
+    // Select delta coding method.
+    seg->abs_delta = SEGMENT_DELTADATA;
+
+    // Default segment "Q" feature is disabled so it defaults to the baseline Q.
+    vp10_disable_segfeature(seg, DEFAULT_AQ2_SEG, SEG_LVL_ALT_Q);
+
+    // Use some of the segments for in frame Q adjustment.
+    for (segment = 0; segment < AQ_C_SEGMENTS; ++segment) {
+      int qindex_delta;
+
+      if (segment == DEFAULT_AQ2_SEG) continue;
+
+      qindex_delta = vp10_compute_qdelta_by_rate(
+          &cpi->rc, cm->frame_type, cm->base_qindex,
+          aq_c_q_adj_factor[aq_strength][segment], cm->bit_depth);
+
+      // For AQ complexity mode, we dont allow Q0 in a segment if the base
+      // Q is not 0. Q0 (lossless) implies 4x4 only and in AQ mode 2 a segment
+      // Q delta is sometimes applied without going back around the rd loop.
+      // This could lead to an illegal combination of partition size and q.
+      if ((cm->base_qindex != 0) && ((cm->base_qindex + qindex_delta) == 0)) {
+        qindex_delta = -cm->base_qindex + 1;
+      }
+      if ((cm->base_qindex + qindex_delta) > 0) {
+        vp10_enable_segfeature(seg, segment, SEG_LVL_ALT_Q);
+        vp10_set_segdata(seg, segment, SEG_LVL_ALT_Q, qindex_delta);
+      }
+    }
+  }
+}
+
+#define DEFAULT_LV_THRESH 10.0
+#define MIN_DEFAULT_LV_THRESH 8.0
+#define VAR_STRENGTH_STEP 0.25
+// Select a segment for the current block.
+// The choice of segment for a block depends on the ratio of the projected
+// bits for the block vs a target average and its spatial complexity.
+void vp10_caq_select_segment(VP10_COMP *cpi, MACROBLOCK *mb, BLOCK_SIZE bs,
+                             int mi_row, int mi_col, int projected_rate) {
+  VP10_COMMON *const cm = &cpi->common;
+
+  const int mi_offset = mi_row * cm->mi_cols + mi_col;
+  const int xmis = VPXMIN(cm->mi_cols - mi_col, num_8x8_blocks_wide_lookup[bs]);
+  const int ymis = VPXMIN(cm->mi_rows - mi_row, num_8x8_blocks_high_lookup[bs]);
+  int x, y;
+  int i;
+  unsigned char segment;
+
+  if (0) {
+    segment = DEFAULT_AQ2_SEG;
+  } else {
+    // Rate depends on fraction of a SB64 in frame (xmis * ymis / bw * bh).
+    // It is converted to bits * 256 units.
+    const int target_rate = (cpi->rc.sb64_target_rate * xmis * ymis * 256) /
+                            (cm->mib_size * cm->mib_size);
+    double logvar;
+    double low_var_thresh;
+    const int aq_strength = get_aq_c_strength(cm->base_qindex, cm->bit_depth);
+
+    vpx_clear_system_state();
+    low_var_thresh = (cpi->oxcf.pass == 2) ? VPXMAX(cpi->twopass.mb_av_energy,
+                                                    MIN_DEFAULT_LV_THRESH)
+                                           : DEFAULT_LV_THRESH;
+
+    vp10_setup_src_planes(mb, cpi->Source, mi_row, mi_col);
+    logvar = vp10_log_block_var(cpi, mb, bs);
+
+    segment = AQ_C_SEGMENTS - 1;  // Just in case no break out below.
+    for (i = 0; i < AQ_C_SEGMENTS; ++i) {
+      // Test rate against a threshold value and variance against a threshold.
+      // Increasing segment number (higher variance and complexity) = higher Q.
+      if ((projected_rate < target_rate * aq_c_transitions[aq_strength][i]) &&
+          (logvar < (low_var_thresh + aq_c_var_thresholds[aq_strength][i]))) {
+        segment = i;
+        break;
+      }
+    }
+  }
+
+  // Fill in the entires in the segment map corresponding to this SB64.
+  for (y = 0; y < ymis; y++) {
+    for (x = 0; x < xmis; x++) {
+      cpi->segmentation_map[mi_offset + y * cm->mi_cols + x] = segment;
+    }
+  }
+}
diff --git a/av1/encoder/aq_complexity.h b/av1/encoder/aq_complexity.h
new file mode 100644
index 0000000..db85406
--- /dev/null
+++ b/av1/encoder/aq_complexity.h
@@ -0,0 +1,36 @@
+/*
+ *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP10_ENCODER_AQ_COMPLEXITY_H_
+#define VP10_ENCODER_AQ_COMPLEXITY_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "av1/common/enums.h"
+
+struct VP10_COMP;
+struct macroblock;
+
+// Select a segment for the current Block.
+void vp10_caq_select_segment(struct VP10_COMP *cpi, struct macroblock *,
+                             BLOCK_SIZE bs, int mi_row, int mi_col,
+                             int projected_rate);
+
+// This function sets up a set of segments with delta Q values around
+// the baseline frame quantizer.
+void vp10_setup_in_frame_q_adj(struct VP10_COMP *cpi);
+
+#ifdef __cplusplus
+}  // extern "C"
+#endif
+
+#endif  // VP10_ENCODER_AQ_COMPLEXITY_H_
diff --git a/av1/encoder/aq_cyclicrefresh.c b/av1/encoder/aq_cyclicrefresh.c
new file mode 100644
index 0000000..b7897f9
--- /dev/null
+++ b/av1/encoder/aq_cyclicrefresh.c
@@ -0,0 +1,566 @@
+/*
+ *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <limits.h>
+#include <math.h>
+
+#include "av1/common/seg_common.h"
+#include "av1/encoder/aq_cyclicrefresh.h"
+#include "av1/encoder/ratectrl.h"
+#include "av1/encoder/segmentation.h"
+#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_ports/system_state.h"
+
+struct CYCLIC_REFRESH {
+  // Percentage of blocks per frame that are targeted as candidates
+  // for cyclic refresh.
+  int percent_refresh;
+  // Maximum q-delta as percentage of base q.
+  int max_qdelta_perc;
+  // Superblock starting index for cycling through the frame.
+  int sb_index;
+  // Controls how long block will need to wait to be refreshed again, in
+  // excess of the cycle time, i.e., in the case of all zero motion, block
+  // will be refreshed every (100/percent_refresh + time_for_refresh) frames.
+  int time_for_refresh;
+  // Target number of (8x8) blocks that are set for delta-q.
+  int target_num_seg_blocks;
+  // Actual number of (8x8) blocks that were applied delta-q.
+  int actual_num_seg1_blocks;
+  int actual_num_seg2_blocks;
+  // RD mult. parameters for segment 1.
+  int rdmult;
+  // Cyclic refresh map.
+  signed char *map;
+  // Map of the last q a block was coded at.
+  uint8_t *last_coded_q_map;
+  // Thresholds applied to the projected rate/distortion of the coding block,
+  // when deciding whether block should be refreshed.
+  int64_t thresh_rate_sb;
+  int64_t thresh_dist_sb;
+  // Threshold applied to the motion vector (in units of 1/8 pel) of the
+  // coding block, when deciding whether block should be refreshed.
+  int16_t motion_thresh;
+  // Rate target ratio to set q delta.
+  double rate_ratio_qdelta;
+  // Boost factor for rate target ratio, for segment CR_SEGMENT_ID_BOOST2.
+  int rate_boost_fac;
+  double low_content_avg;
+  int qindex_delta[3];
+};
+
+CYCLIC_REFRESH *vp10_cyclic_refresh_alloc(int mi_rows, int mi_cols) {
+  size_t last_coded_q_map_size;
+  CYCLIC_REFRESH *const cr = vpx_calloc(1, sizeof(*cr));
+  if (cr == NULL) return NULL;
+
+  cr->map = vpx_calloc(mi_rows * mi_cols, sizeof(*cr->map));
+  if (cr->map == NULL) {
+    vp10_cyclic_refresh_free(cr);
+    return NULL;
+  }
+  last_coded_q_map_size = mi_rows * mi_cols * sizeof(*cr->last_coded_q_map);
+  cr->last_coded_q_map = vpx_malloc(last_coded_q_map_size);
+  if (cr->last_coded_q_map == NULL) {
+    vp10_cyclic_refresh_free(cr);
+    return NULL;
+  }
+  assert(MAXQ <= 255);
+  memset(cr->last_coded_q_map, MAXQ, last_coded_q_map_size);
+
+  return cr;
+}
+
+void vp10_cyclic_refresh_free(CYCLIC_REFRESH *cr) {
+  vpx_free(cr->map);
+  vpx_free(cr->last_coded_q_map);
+  vpx_free(cr);
+}
+
+// Check if we should turn off cyclic refresh based on bitrate condition.
+static int apply_cyclic_refresh_bitrate(const VP10_COMMON *cm,
+                                        const RATE_CONTROL *rc) {
+  // Turn off cyclic refresh if bits available per frame is not sufficiently
+  // larger than bit cost of segmentation. Segment map bit cost should scale
+  // with number of seg blocks, so compare available bits to number of blocks.
+  // Average bits available per frame = avg_frame_bandwidth
+  // Number of (8x8) blocks in frame = mi_rows * mi_cols;
+  const float factor = 0.25;
+  const int number_blocks = cm->mi_rows * cm->mi_cols;
+  // The condition below corresponds to turning off at target bitrates:
+  // (at 30fps), ~12kbps for CIF, 36kbps for VGA, 100kps for HD/720p.
+  // Also turn off at very small frame sizes, to avoid too large fraction of
+  // superblocks to be refreshed per frame. Threshold below is less than QCIF.
+  if (rc->avg_frame_bandwidth < factor * number_blocks ||
+      number_blocks / 64 < 5)
+    return 0;
+  else
+    return 1;
+}
+
+// Check if this coding block, of size bsize, should be considered for refresh
+// (lower-qp coding). Decision can be based on various factors, such as
+// size of the coding block (i.e., below min_block size rejected), coding
+// mode, and rate/distortion.
+static int candidate_refresh_aq(const CYCLIC_REFRESH *cr,
+                                const MB_MODE_INFO *mbmi, int64_t rate,
+                                int64_t dist, int bsize) {
+  MV mv = mbmi->mv[0].as_mv;
+  // Reject the block for lower-qp coding if projected distortion
+  // is above the threshold, and any of the following is true:
+  // 1) mode uses large mv
+  // 2) mode is an intra-mode
+  // Otherwise accept for refresh.
+  if (dist > cr->thresh_dist_sb &&
+      (mv.row > cr->motion_thresh || mv.row < -cr->motion_thresh ||
+       mv.col > cr->motion_thresh || mv.col < -cr->motion_thresh ||
+       !is_inter_block(mbmi)))
+    return CR_SEGMENT_ID_BASE;
+  else if (bsize >= BLOCK_16X16 && rate < cr->thresh_rate_sb &&
+           is_inter_block(mbmi) && mbmi->mv[0].as_int == 0 &&
+           cr->rate_boost_fac > 10)
+    // More aggressive delta-q for bigger blocks with zero motion.
+    return CR_SEGMENT_ID_BOOST2;
+  else
+    return CR_SEGMENT_ID_BOOST1;
+}
+
+// Compute delta-q for the segment.
+static int compute_deltaq(const VP10_COMP *cpi, int q, double rate_factor) {
+  const CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
+  const RATE_CONTROL *const rc = &cpi->rc;
+  int deltaq = vp10_compute_qdelta_by_rate(rc, cpi->common.frame_type, q,
+                                           rate_factor, cpi->common.bit_depth);
+  if ((-deltaq) > cr->max_qdelta_perc * q / 100) {
+    deltaq = -cr->max_qdelta_perc * q / 100;
+  }
+  return deltaq;
+}
+
+// For the just encoded frame, estimate the bits, incorporating the delta-q
+// from non-base segment. For now ignore effect of multiple segments
+// (with different delta-q). Note this function is called in the postencode
+// (called from rc_update_rate_correction_factors()).
+int vp10_cyclic_refresh_estimate_bits_at_q(const VP10_COMP *cpi,
+                                           double correction_factor) {
+  const VP10_COMMON *const cm = &cpi->common;
+  const CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
+  int estimated_bits;
+  int mbs = cm->MBs;
+  int num8x8bl = mbs << 2;
+  // Weight for non-base segments: use actual number of blocks refreshed in
+  // previous/just encoded frame. Note number of blocks here is in 8x8 units.
+  double weight_segment1 = (double)cr->actual_num_seg1_blocks / num8x8bl;
+  double weight_segment2 = (double)cr->actual_num_seg2_blocks / num8x8bl;
+  // Take segment weighted average for estimated bits.
+  estimated_bits =
+      (int)((1.0 - weight_segment1 - weight_segment2) *
+                vp10_estimate_bits_at_q(cm->frame_type, cm->base_qindex, mbs,
+                                        correction_factor, cm->bit_depth) +
+            weight_segment1 *
+                vp10_estimate_bits_at_q(cm->frame_type,
+                                        cm->base_qindex + cr->qindex_delta[1],
+                                        mbs, correction_factor, cm->bit_depth) +
+            weight_segment2 *
+                vp10_estimate_bits_at_q(cm->frame_type,
+                                        cm->base_qindex + cr->qindex_delta[2],
+                                        mbs, correction_factor, cm->bit_depth));
+  return estimated_bits;
+}
+
+// Prior to encoding the frame, estimate the bits per mb, for a given q = i and
+// a corresponding delta-q (for segment 1). This function is called in the
+// rc_regulate_q() to set the base qp index.
+// Note: the segment map is set to either 0/CR_SEGMENT_ID_BASE (no refresh) or
+// to 1/CR_SEGMENT_ID_BOOST1 (refresh) for each superblock, prior to encoding.
+int vp10_cyclic_refresh_rc_bits_per_mb(const VP10_COMP *cpi, int i,
+                                       double correction_factor) {
+  const VP10_COMMON *const cm = &cpi->common;
+  CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
+  int bits_per_mb;
+  int num8x8bl = cm->MBs << 2;
+  // Weight for segment prior to encoding: take the average of the target
+  // number for the frame to be encoded and the actual from the previous frame.
+  double weight_segment =
+      (double)((cr->target_num_seg_blocks + cr->actual_num_seg1_blocks +
+                cr->actual_num_seg2_blocks) >>
+               1) /
+      num8x8bl;
+  // Compute delta-q corresponding to qindex i.
+  int deltaq = compute_deltaq(cpi, i, cr->rate_ratio_qdelta);
+  // Take segment weighted average for bits per mb.
+  bits_per_mb =
+      (int)((1.0 - weight_segment) * vp10_rc_bits_per_mb(cm->frame_type, i,
+                                                         correction_factor,
+                                                         cm->bit_depth) +
+            weight_segment * vp10_rc_bits_per_mb(cm->frame_type, i + deltaq,
+                                                 correction_factor,
+                                                 cm->bit_depth));
+  return bits_per_mb;
+}
+
+// Prior to coding a given prediction block, of size bsize at (mi_row, mi_col),
+// check if we should reset the segment_id, and update the cyclic_refresh map
+// and segmentation map.
+void vp10_cyclic_refresh_update_segment(VP10_COMP *const cpi,
+                                        MB_MODE_INFO *const mbmi, int mi_row,
+                                        int mi_col, BLOCK_SIZE bsize,
+                                        int64_t rate, int64_t dist, int skip) {
+  const VP10_COMMON *const cm = &cpi->common;
+  CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
+  const int bw = num_8x8_blocks_wide_lookup[bsize];
+  const int bh = num_8x8_blocks_high_lookup[bsize];
+  const int xmis = VPXMIN(cm->mi_cols - mi_col, bw);
+  const int ymis = VPXMIN(cm->mi_rows - mi_row, bh);
+  const int block_index = mi_row * cm->mi_cols + mi_col;
+  const int refresh_this_block =
+      candidate_refresh_aq(cr, mbmi, rate, dist, bsize);
+  // Default is to not update the refresh map.
+  int new_map_value = cr->map[block_index];
+  int x = 0;
+  int y = 0;
+
+  // If this block is labeled for refresh, check if we should reset the
+  // segment_id.
+  if (cyclic_refresh_segment_id_boosted(mbmi->segment_id)) {
+    mbmi->segment_id = refresh_this_block;
+    // Reset segment_id if will be skipped.
+    if (skip) mbmi->segment_id = CR_SEGMENT_ID_BASE;
+  }
+
+  // Update the cyclic refresh map, to be used for setting segmentation map
+  // for the next frame. If the block  will be refreshed this frame, mark it
+  // as clean. The magnitude of the -ve influences how long before we consider
+  // it for refresh again.
+  if (cyclic_refresh_segment_id_boosted(mbmi->segment_id)) {
+    new_map_value = -cr->time_for_refresh;
+  } else if (refresh_this_block) {
+    // Else if it is accepted as candidate for refresh, and has not already
+    // been refreshed (marked as 1) then mark it as a candidate for cleanup
+    // for future time (marked as 0), otherwise don't update it.
+    if (cr->map[block_index] == 1) new_map_value = 0;
+  } else {
+    // Leave it marked as block that is not candidate for refresh.
+    new_map_value = 1;
+  }
+
+  // Update entries in the cyclic refresh map with new_map_value, and
+  // copy mbmi->segment_id into global segmentation map.
+  for (y = 0; y < ymis; y++)
+    for (x = 0; x < xmis; x++) {
+      int map_offset = block_index + y * cm->mi_cols + x;
+      cr->map[map_offset] = new_map_value;
+      cpi->segmentation_map[map_offset] = mbmi->segment_id;
+      // Inter skip blocks were clearly not coded at the current qindex, so
+      // don't update the map for them. For cases where motion is non-zero or
+      // the reference frame isn't the previous frame, the previous value in
+      // the map for this spatial location is not entirely correct.
+      if ((!is_inter_block(mbmi) || !skip) &&
+          mbmi->segment_id <= CR_SEGMENT_ID_BOOST2) {
+        cr->last_coded_q_map[map_offset] = clamp(
+            cm->base_qindex + cr->qindex_delta[mbmi->segment_id], 0, MAXQ);
+      } else if (is_inter_block(mbmi) && skip &&
+                 mbmi->segment_id <= CR_SEGMENT_ID_BOOST2) {
+        cr->last_coded_q_map[map_offset] =
+            VPXMIN(clamp(cm->base_qindex + cr->qindex_delta[mbmi->segment_id],
+                         0, MAXQ),
+                   cr->last_coded_q_map[map_offset]);
+      }
+    }
+}
+
+// Update the actual number of blocks that were applied the segment delta q.
+void vp10_cyclic_refresh_postencode(VP10_COMP *const cpi) {
+  VP10_COMMON *const cm = &cpi->common;
+  CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
+  unsigned char *const seg_map = cpi->segmentation_map;
+  int mi_row, mi_col;
+  cr->actual_num_seg1_blocks = 0;
+  cr->actual_num_seg2_blocks = 0;
+  for (mi_row = 0; mi_row < cm->mi_rows; mi_row++)
+    for (mi_col = 0; mi_col < cm->mi_cols; mi_col++) {
+      if (cyclic_refresh_segment_id(seg_map[mi_row * cm->mi_cols + mi_col]) ==
+          CR_SEGMENT_ID_BOOST1)
+        cr->actual_num_seg1_blocks++;
+      else if (cyclic_refresh_segment_id(
+                   seg_map[mi_row * cm->mi_cols + mi_col]) ==
+               CR_SEGMENT_ID_BOOST2)
+        cr->actual_num_seg2_blocks++;
+    }
+}
+
+// Set golden frame update interval, for 1 pass CBR mode.
+void vp10_cyclic_refresh_set_golden_update(VP10_COMP *const cpi) {
+  RATE_CONTROL *const rc = &cpi->rc;
+  CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
+  // Set minimum gf_interval for GF update to a multiple (== 2) of refresh
+  // period. Depending on past encoding stats, GF flag may be reset and update
+  // may not occur until next baseline_gf_interval.
+  if (cr->percent_refresh > 0)
+    rc->baseline_gf_interval = 4 * (100 / cr->percent_refresh);
+  else
+    rc->baseline_gf_interval = 40;
+}
+
+// Update some encoding stats (from the just encoded frame). If this frame's
+// background has high motion, refresh the golden frame. Otherwise, if the
+// golden reference is to be updated check if we should NOT update the golden
+// ref.
+void vp10_cyclic_refresh_check_golden_update(VP10_COMP *const cpi) {
+  VP10_COMMON *const cm = &cpi->common;
+  CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
+  int mi_row, mi_col;
+  double fraction_low = 0.0;
+  int low_content_frame = 0;
+
+  MODE_INFO **mi;
+  RATE_CONTROL *const rc = &cpi->rc;
+  const int rows = cm->mi_rows, cols = cm->mi_cols;
+  int cnt1 = 0, cnt2 = 0;
+  int force_gf_refresh = 0;
+
+  for (mi_row = 0; mi_row < rows; mi_row++) {
+    mi = cm->mi_grid_visible + mi_row * cm->mi_stride;
+
+    for (mi_col = 0; mi_col < cols; mi_col++) {
+      int16_t abs_mvr = mi[0]->mbmi.mv[0].as_mv.row >= 0
+                            ? mi[0]->mbmi.mv[0].as_mv.row
+                            : -1 * mi[0]->mbmi.mv[0].as_mv.row;
+      int16_t abs_mvc = mi[0]->mbmi.mv[0].as_mv.col >= 0
+                            ? mi[0]->mbmi.mv[0].as_mv.col
+                            : -1 * mi[0]->mbmi.mv[0].as_mv.col;
+
+      // Calculate the motion of the background.
+      if (abs_mvr <= 16 && abs_mvc <= 16) {
+        cnt1++;
+        if (abs_mvr == 0 && abs_mvc == 0) cnt2++;
+      }
+      mi++;
+
+      // Accumulate low_content_frame.
+      if (cr->map[mi_row * cols + mi_col] < 1) low_content_frame++;
+    }
+  }
+
+  // For video conference clips, if the background has high motion in current
+  // frame because of the camera movement, set this frame as the golden frame.
+  // Use 70% and 5% as the thresholds for golden frame refreshing.
+  // Also, force this frame as a golden update frame if this frame will change
+  // the resolution (resize_pending != 0).
+  if (cpi->resize_pending != 0 ||
+      (cnt1 * 10 > (70 * rows * cols) && cnt2 * 20 < cnt1)) {
+    vp10_cyclic_refresh_set_golden_update(cpi);
+    rc->frames_till_gf_update_due = rc->baseline_gf_interval;
+
+    if (rc->frames_till_gf_update_due > rc->frames_to_key)
+      rc->frames_till_gf_update_due = rc->frames_to_key;
+    cpi->refresh_golden_frame = 1;
+    force_gf_refresh = 1;
+  }
+
+  fraction_low = (double)low_content_frame / (rows * cols);
+  // Update average.
+  cr->low_content_avg = (fraction_low + 3 * cr->low_content_avg) / 4;
+  if (!force_gf_refresh && cpi->refresh_golden_frame == 1) {
+    // Don't update golden reference if the amount of low_content for the
+    // current encoded frame is small, or if the recursive average of the
+    // low_content over the update interval window falls below threshold.
+    if (fraction_low < 0.8 || cr->low_content_avg < 0.7)
+      cpi->refresh_golden_frame = 0;
+    // Reset for next internal.
+    cr->low_content_avg = fraction_low;
+  }
+}
+
+// Update the segmentation map, and related quantities: cyclic refresh map,
+// refresh sb_index, and target number of blocks to be refreshed.
+// The map is set to either 0/CR_SEGMENT_ID_BASE (no refresh) or to
+// 1/CR_SEGMENT_ID_BOOST1 (refresh) for each superblock.
+// Blocks labeled as BOOST1 may later get set to BOOST2 (during the
+// encoding of the superblock).
+static void cyclic_refresh_update_map(VP10_COMP *const cpi) {
+  VP10_COMMON *const cm = &cpi->common;
+  CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
+  unsigned char *const seg_map = cpi->segmentation_map;
+  int i, block_count, bl_index, sb_rows, sb_cols, sbs_in_frame;
+  int xmis, ymis, x, y;
+  memset(seg_map, CR_SEGMENT_ID_BASE, cm->mi_rows * cm->mi_cols);
+  sb_cols = (cm->mi_cols + cm->mib_size - 1) / cm->mib_size;
+  sb_rows = (cm->mi_rows + cm->mib_size - 1) / cm->mib_size;
+  sbs_in_frame = sb_cols * sb_rows;
+  // Number of target blocks to get the q delta (segment 1).
+  block_count = cr->percent_refresh * cm->mi_rows * cm->mi_cols / 100;
+  // Set the segmentation map: cycle through the superblocks, starting at
+  // cr->mb_index, and stopping when either block_count blocks have been found
+  // to be refreshed, or we have passed through whole frame.
+  assert(cr->sb_index < sbs_in_frame);
+  i = cr->sb_index;
+  cr->target_num_seg_blocks = 0;
+  do {
+    int sum_map = 0;
+    // Get the mi_row/mi_col corresponding to superblock index i.
+    int sb_row_index = (i / sb_cols);
+    int sb_col_index = i - sb_row_index * sb_cols;
+    int mi_row = sb_row_index * cm->mib_size;
+    int mi_col = sb_col_index * cm->mib_size;
+    int qindex_thresh =
+        cpi->oxcf.content == VPX_CONTENT_SCREEN
+            ? vp10_get_qindex(&cm->seg, CR_SEGMENT_ID_BOOST2, cm->base_qindex)
+            : 0;
+    assert(mi_row >= 0 && mi_row < cm->mi_rows);
+    assert(mi_col >= 0 && mi_col < cm->mi_cols);
+    bl_index = mi_row * cm->mi_cols + mi_col;
+    // Loop through all MI blocks in superblock and update map.
+    xmis = VPXMIN(cm->mi_cols - mi_col, cm->mib_size);
+    ymis = VPXMIN(cm->mi_rows - mi_row, cm->mib_size);
+    for (y = 0; y < ymis; y++) {
+      for (x = 0; x < xmis; x++) {
+        const int bl_index2 = bl_index + y * cm->mi_cols + x;
+        // If the block is as a candidate for clean up then mark it
+        // for possible boost/refresh (segment 1). The segment id may get
+        // reset to 0 later if block gets coded anything other than ZEROMV.
+        if (cr->map[bl_index2] == 0) {
+          if (cr->last_coded_q_map[bl_index2] > qindex_thresh) sum_map++;
+        } else if (cr->map[bl_index2] < 0) {
+          cr->map[bl_index2]++;
+        }
+      }
+    }
+    // Enforce constant segment over superblock.
+    // If segment is at least half of superblock, set to 1.
+    if (sum_map >= xmis * ymis / 2) {
+      for (y = 0; y < ymis; y++)
+        for (x = 0; x < xmis; x++) {
+          seg_map[bl_index + y * cm->mi_cols + x] = CR_SEGMENT_ID_BOOST1;
+        }
+      cr->target_num_seg_blocks += xmis * ymis;
+    }
+    i++;
+    if (i == sbs_in_frame) {
+      i = 0;
+    }
+  } while (cr->target_num_seg_blocks < block_count && i != cr->sb_index);
+  cr->sb_index = i;
+}
+
+// Set cyclic refresh parameters.
+void vp10_cyclic_refresh_update_parameters(VP10_COMP *const cpi) {
+  const RATE_CONTROL *const rc = &cpi->rc;
+  const VP10_COMMON *const cm = &cpi->common;
+  CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
+  cr->percent_refresh = 10;
+  cr->max_qdelta_perc = 50;
+  cr->time_for_refresh = 0;
+  // Use larger delta-qp (increase rate_ratio_qdelta) for first few (~4)
+  // periods of the refresh cycle, after a key frame.
+  if (rc->frames_since_key < 4 * cr->percent_refresh)
+    cr->rate_ratio_qdelta = 3.0;
+  else
+    cr->rate_ratio_qdelta = 2.0;
+  // Adjust some parameters for low resolutions at low bitrates.
+  if (cm->width <= 352 && cm->height <= 288 && rc->avg_frame_bandwidth < 3400) {
+    cr->motion_thresh = 4;
+    cr->rate_boost_fac = 10;
+  } else {
+    cr->motion_thresh = 32;
+    cr->rate_boost_fac = 17;
+  }
+}
+
+// Setup cyclic background refresh: set delta q and segmentation map.
+void vp10_cyclic_refresh_setup(VP10_COMP *const cpi) {
+  VP10_COMMON *const cm = &cpi->common;
+  const RATE_CONTROL *const rc = &cpi->rc;
+  CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
+  struct segmentation *const seg = &cm->seg;
+  const int apply_cyclic_refresh = apply_cyclic_refresh_bitrate(cm, rc);
+  if (cm->current_video_frame == 0) cr->low_content_avg = 0.0;
+  // Don't apply refresh on key frame or enhancement layer frames.
+  if (!apply_cyclic_refresh || cm->frame_type == KEY_FRAME) {
+    // Set segmentation map to 0 and disable.
+    unsigned char *const seg_map = cpi->segmentation_map;
+    memset(seg_map, 0, cm->mi_rows * cm->mi_cols);
+    vp10_disable_segmentation(&cm->seg);
+    if (cm->frame_type == KEY_FRAME) {
+      memset(cr->last_coded_q_map, MAXQ,
+             cm->mi_rows * cm->mi_cols * sizeof(*cr->last_coded_q_map));
+      cr->sb_index = 0;
+    }
+    return;
+  } else {
+    int qindex_delta = 0;
+    int qindex2;
+    const double q = vp10_convert_qindex_to_q(cm->base_qindex, cm->bit_depth);
+    vpx_clear_system_state();
+    // Set rate threshold to some multiple (set to 2 for now) of the target
+    // rate (target is given by sb64_target_rate and scaled by 256).
+    cr->thresh_rate_sb = ((int64_t)(rc->sb64_target_rate) << 8) << 2;
+    // Distortion threshold, quadratic in Q, scale factor to be adjusted.
+    // q will not exceed 457, so (q * q) is within 32bit; see:
+    // vp10_convert_qindex_to_q(), vp10_ac_quant(), ac_qlookup*[].
+    cr->thresh_dist_sb = ((int64_t)(q * q)) << 2;
+
+    // Set up segmentation.
+    // Clear down the segment map.
+    vp10_enable_segmentation(&cm->seg);
+    vp10_clearall_segfeatures(seg);
+    // Select delta coding method.
+    seg->abs_delta = SEGMENT_DELTADATA;
+
+    // Note: setting temporal_update has no effect, as the seg-map coding method
+    // (temporal or spatial) is determined in
+    // vp10_choose_segmap_coding_method(),
+    // based on the coding cost of each method. For error_resilient mode on the
+    // last_frame_seg_map is set to 0, so if temporal coding is used, it is
+    // relative to 0 previous map.
+    // seg->temporal_update = 0;
+
+    // Segment BASE "Q" feature is disabled so it defaults to the baseline Q.
+    vp10_disable_segfeature(seg, CR_SEGMENT_ID_BASE, SEG_LVL_ALT_Q);
+    // Use segment BOOST1 for in-frame Q adjustment.
+    vp10_enable_segfeature(seg, CR_SEGMENT_ID_BOOST1, SEG_LVL_ALT_Q);
+    // Use segment BOOST2 for more aggressive in-frame Q adjustment.
+    vp10_enable_segfeature(seg, CR_SEGMENT_ID_BOOST2, SEG_LVL_ALT_Q);
+
+    // Set the q delta for segment BOOST1.
+    qindex_delta = compute_deltaq(cpi, cm->base_qindex, cr->rate_ratio_qdelta);
+    cr->qindex_delta[1] = qindex_delta;
+
+    // Compute rd-mult for segment BOOST1.
+    qindex2 = clamp(cm->base_qindex + cm->y_dc_delta_q + qindex_delta, 0, MAXQ);
+
+    cr->rdmult = vp10_compute_rd_mult(cpi, qindex2);
+
+    vp10_set_segdata(seg, CR_SEGMENT_ID_BOOST1, SEG_LVL_ALT_Q, qindex_delta);
+
+    // Set a more aggressive (higher) q delta for segment BOOST2.
+    qindex_delta = compute_deltaq(
+        cpi, cm->base_qindex,
+        VPXMIN(CR_MAX_RATE_TARGET_RATIO,
+               0.1 * cr->rate_boost_fac * cr->rate_ratio_qdelta));
+    cr->qindex_delta[2] = qindex_delta;
+    vp10_set_segdata(seg, CR_SEGMENT_ID_BOOST2, SEG_LVL_ALT_Q, qindex_delta);
+
+    // Update the segmentation and refresh map.
+    cyclic_refresh_update_map(cpi);
+  }
+}
+
+int vp10_cyclic_refresh_get_rdmult(const CYCLIC_REFRESH *cr) {
+  return cr->rdmult;
+}
+
+void vp10_cyclic_refresh_reset_resize(VP10_COMP *const cpi) {
+  const VP10_COMMON *const cm = &cpi->common;
+  CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
+  memset(cr->map, 0, cm->mi_rows * cm->mi_cols);
+  cr->sb_index = 0;
+  cpi->refresh_golden_frame = 1;
+}
diff --git a/av1/encoder/aq_cyclicrefresh.h b/av1/encoder/aq_cyclicrefresh.h
new file mode 100644
index 0000000..24491fc
--- /dev/null
+++ b/av1/encoder/aq_cyclicrefresh.h
@@ -0,0 +1,97 @@
+/*
+ *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP10_ENCODER_AQ_CYCLICREFRESH_H_
+#define VP10_ENCODER_AQ_CYCLICREFRESH_H_
+
+#include "av1/common/blockd.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// The segment ids used in cyclic refresh: from base (no boost) to increasing
+// boost (higher delta-qp).
+#define CR_SEGMENT_ID_BASE 0
+#define CR_SEGMENT_ID_BOOST1 1
+#define CR_SEGMENT_ID_BOOST2 2
+
+// Maximum rate target ratio for setting segment delta-qp.
+#define CR_MAX_RATE_TARGET_RATIO 4.0
+
+struct VP10_COMP;
+
+struct CYCLIC_REFRESH;
+typedef struct CYCLIC_REFRESH CYCLIC_REFRESH;
+
+CYCLIC_REFRESH *vp10_cyclic_refresh_alloc(int mi_rows, int mi_cols);
+
+void vp10_cyclic_refresh_free(CYCLIC_REFRESH *cr);
+
+// Estimate the bits, incorporating the delta-q from segment 1, after encoding
+// the frame.
+int vp10_cyclic_refresh_estimate_bits_at_q(const struct VP10_COMP *cpi,
+                                           double correction_factor);
+
+// Estimate the bits per mb, for a given q = i and a corresponding delta-q
+// (for segment 1), prior to encoding the frame.
+int vp10_cyclic_refresh_rc_bits_per_mb(const struct VP10_COMP *cpi, int i,
+                                       double correction_factor);
+
+// Prior to coding a given prediction block, of size bsize at (mi_row, mi_col),
+// check if we should reset the segment_id, and update the cyclic_refresh map
+// and segmentation map.
+void vp10_cyclic_refresh_update_segment(struct VP10_COMP *const cpi,
+                                        MB_MODE_INFO *const mbmi, int mi_row,
+                                        int mi_col, BLOCK_SIZE bsize,
+                                        int64_t rate, int64_t dist, int skip);
+
+// Update the segmentation map, and related quantities: cyclic refresh map,
+// refresh sb_index, and target number of blocks to be refreshed.
+void vp10_cyclic_refresh_update__map(struct VP10_COMP *const cpi);
+
+// Update the actual number of blocks that were applied the segment delta q.
+void vp10_cyclic_refresh_postencode(struct VP10_COMP *const cpi);
+
+// Set golden frame update interval, for 1 pass CBR mode.
+void vp10_cyclic_refresh_set_golden_update(struct VP10_COMP *const cpi);
+
+// Check if we should not update golden reference, based on past refresh stats.
+void vp10_cyclic_refresh_check_golden_update(struct VP10_COMP *const cpi);
+
+// Set/update global/frame level refresh parameters.
+void vp10_cyclic_refresh_update_parameters(struct VP10_COMP *const cpi);
+
+// Setup cyclic background refresh: set delta q and segmentation map.
+void vp10_cyclic_refresh_setup(struct VP10_COMP *const cpi);
+
+int vp10_cyclic_refresh_get_rdmult(const CYCLIC_REFRESH *cr);
+
+void vp10_cyclic_refresh_reset_resize(struct VP10_COMP *const cpi);
+
+static INLINE int cyclic_refresh_segment_id_boosted(int segment_id) {
+  return segment_id == CR_SEGMENT_ID_BOOST1 ||
+         segment_id == CR_SEGMENT_ID_BOOST2;
+}
+
+static INLINE int cyclic_refresh_segment_id(int segment_id) {
+  if (segment_id == CR_SEGMENT_ID_BOOST1)
+    return CR_SEGMENT_ID_BOOST1;
+  else if (segment_id == CR_SEGMENT_ID_BOOST2)
+    return CR_SEGMENT_ID_BOOST2;
+  else
+    return CR_SEGMENT_ID_BASE;
+}
+
+#ifdef __cplusplus
+}  // extern "C"
+#endif
+
+#endif  // VP10_ENCODER_AQ_CYCLICREFRESH_H_
diff --git a/av1/encoder/aq_variance.c b/av1/encoder/aq_variance.c
new file mode 100644
index 0000000..2a529a1
--- /dev/null
+++ b/av1/encoder/aq_variance.c
@@ -0,0 +1,206 @@
+/*
+ *  Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <math.h>
+
+#include "aom_ports/mem.h"
+
+#include "av1/encoder/aq_variance.h"
+
+#include "av1/common/seg_common.h"
+#include "av1/encoder/ratectrl.h"
+#include "av1/encoder/rd.h"
+#include "av1/encoder/segmentation.h"
+#include "aom_ports/system_state.h"
+
+#define ENERGY_MIN (-4)
+#define ENERGY_MAX (1)
+#define ENERGY_SPAN (ENERGY_MAX - ENERGY_MIN + 1)
+#define ENERGY_IN_BOUNDS(energy) \
+  assert((energy) >= ENERGY_MIN && (energy) <= ENERGY_MAX)
+
+static const double rate_ratio[MAX_SEGMENTS] = { 2.5,  2.0, 1.5, 1.0,
+                                                 0.75, 1.0, 1.0, 1.0 };
+static const int segment_id[ENERGY_SPAN] = { 0, 1, 1, 2, 3, 4 };
+
+#define SEGMENT_ID(i) segment_id[(i)-ENERGY_MIN]
+
+DECLARE_ALIGNED(16, static const uint8_t, vp10_all_zeros[MAX_SB_SIZE]) = { 0 };
+#if CONFIG_VP9_HIGHBITDEPTH
+DECLARE_ALIGNED(16, static const uint16_t,
+                vp10_highbd_all_zeros[MAX_SB_SIZE]) = { 0 };
+#endif
+
+unsigned int vp10_vaq_segment_id(int energy) {
+  ENERGY_IN_BOUNDS(energy);
+  return SEGMENT_ID(energy);
+}
+
+void vp10_vaq_frame_setup(VP10_COMP *cpi) {
+  VP10_COMMON *cm = &cpi->common;
+  struct segmentation *seg = &cm->seg;
+  int i;
+
+  if (frame_is_intra_only(cm) || cm->error_resilient_mode ||
+      cpi->refresh_alt_ref_frame ||
+      (cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref)) {
+    cpi->vaq_refresh = 1;
+
+    vp10_enable_segmentation(seg);
+    vp10_clearall_segfeatures(seg);
+
+    seg->abs_delta = SEGMENT_DELTADATA;
+
+    vpx_clear_system_state();
+
+    for (i = 0; i < MAX_SEGMENTS; ++i) {
+      int qindex_delta =
+          vp10_compute_qdelta_by_rate(&cpi->rc, cm->frame_type, cm->base_qindex,
+                                      rate_ratio[i], cm->bit_depth);
+
+      // We don't allow qindex 0 in a segment if the base value is not 0.
+      // Q index 0 (lossless) implies 4x4 encoding only and in AQ mode a segment
+      // Q delta is sometimes applied without going back around the rd loop.
+      // This could lead to an illegal combination of partition size and q.
+      if ((cm->base_qindex != 0) && ((cm->base_qindex + qindex_delta) == 0)) {
+        qindex_delta = -cm->base_qindex + 1;
+      }
+
+      // No need to enable SEG_LVL_ALT_Q for this segment.
+      if (rate_ratio[i] == 1.0) {
+        continue;
+      }
+
+      vp10_set_segdata(seg, i, SEG_LVL_ALT_Q, qindex_delta);
+      vp10_enable_segfeature(seg, i, SEG_LVL_ALT_Q);
+    }
+  }
+}
+
+/* TODO(agrange, paulwilkins): The block_variance calls the unoptimized versions
+ * of variance() and highbd_8_variance(). It should not.
+ */
+static void aq_variance(const uint8_t *a, int a_stride, const uint8_t *b,
+                        int b_stride, int w, int h, unsigned int *sse,
+                        int *sum) {
+  int i, j;
+
+  *sum = 0;
+  *sse = 0;
+
+  for (i = 0; i < h; i++) {
+    for (j = 0; j < w; j++) {
+      const int diff = a[j] - b[j];
+      *sum += diff;
+      *sse += diff * diff;
+    }
+
+    a += a_stride;
+    b += b_stride;
+  }
+}
+
+#if CONFIG_VP9_HIGHBITDEPTH
+static void aq_highbd_variance64(const uint8_t *a8, int a_stride,
+                                 const uint8_t *b8, int b_stride, int w, int h,
+                                 uint64_t *sse, uint64_t *sum) {
+  int i, j;
+
+  uint16_t *a = CONVERT_TO_SHORTPTR(a8);
+  uint16_t *b = CONVERT_TO_SHORTPTR(b8);
+  *sum = 0;
+  *sse = 0;
+
+  for (i = 0; i < h; i++) {
+    for (j = 0; j < w; j++) {
+      const int diff = a[j] - b[j];
+      *sum += diff;
+      *sse += diff * diff;
+    }
+    a += a_stride;
+    b += b_stride;
+  }
+}
+
+static void aq_highbd_8_variance(const uint8_t *a8, int a_stride,
+                                 const uint8_t *b8, int b_stride, int w, int h,
+                                 unsigned int *sse, int *sum) {
+  uint64_t sse_long = 0;
+  uint64_t sum_long = 0;
+  aq_highbd_variance64(a8, a_stride, b8, b_stride, w, h, &sse_long, &sum_long);
+  *sse = (unsigned int)sse_long;
+  *sum = (int)sum_long;
+}
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+static unsigned int block_variance(VP10_COMP *cpi, MACROBLOCK *x,
+                                   BLOCK_SIZE bs) {
+  MACROBLOCKD *xd = &x->e_mbd;
+  unsigned int var, sse;
+  int right_overflow =
+      (xd->mb_to_right_edge < 0) ? ((-xd->mb_to_right_edge) >> 3) : 0;
+  int bottom_overflow =
+      (xd->mb_to_bottom_edge < 0) ? ((-xd->mb_to_bottom_edge) >> 3) : 0;
+
+  if (right_overflow || bottom_overflow) {
+    const int bw = 8 * num_8x8_blocks_wide_lookup[bs] - right_overflow;
+    const int bh = 8 * num_8x8_blocks_high_lookup[bs] - bottom_overflow;
+    int avg;
+#if CONFIG_VP9_HIGHBITDEPTH
+    if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+      aq_highbd_8_variance(x->plane[0].src.buf, x->plane[0].src.stride,
+                           CONVERT_TO_BYTEPTR(vp10_highbd_all_zeros), 0, bw, bh,
+                           &sse, &avg);
+      sse >>= 2 * (xd->bd - 8);
+      avg >>= (xd->bd - 8);
+    } else {
+      aq_variance(x->plane[0].src.buf, x->plane[0].src.stride, vp10_all_zeros,
+                  0, bw, bh, &sse, &avg);
+    }
+#else
+    aq_variance(x->plane[0].src.buf, x->plane[0].src.stride, vp10_all_zeros, 0,
+                bw, bh, &sse, &avg);
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+    var = sse - (((int64_t)avg * avg) / (bw * bh));
+    return (256 * var) / (bw * bh);
+  } else {
+#if CONFIG_VP9_HIGHBITDEPTH
+    if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+      var = cpi->fn_ptr[bs].vf(x->plane[0].src.buf, x->plane[0].src.stride,
+                               CONVERT_TO_BYTEPTR(vp10_highbd_all_zeros), 0,
+                               &sse);
+    } else {
+      var = cpi->fn_ptr[bs].vf(x->plane[0].src.buf, x->plane[0].src.stride,
+                               vp10_all_zeros, 0, &sse);
+    }
+#else
+    var = cpi->fn_ptr[bs].vf(x->plane[0].src.buf, x->plane[0].src.stride,
+                             vp10_all_zeros, 0, &sse);
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+    return (256 * var) >> num_pels_log2_lookup[bs];
+  }
+}
+
+double vp10_log_block_var(VP10_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs) {
+  unsigned int var = block_variance(cpi, x, bs);
+  vpx_clear_system_state();
+  return log(var + 1.0);
+}
+
+#define DEFAULT_E_MIDPOINT 10.0
+int vp10_block_energy(VP10_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs) {
+  double energy;
+  double energy_midpoint;
+  vpx_clear_system_state();
+  energy_midpoint =
+      (cpi->oxcf.pass == 2) ? cpi->twopass.mb_av_energy : DEFAULT_E_MIDPOINT;
+  energy = vp10_log_block_var(cpi, x, bs) - energy_midpoint;
+  return clamp((int)round(energy), ENERGY_MIN, ENERGY_MAX);
+}
diff --git a/av1/encoder/aq_variance.h b/av1/encoder/aq_variance.h
new file mode 100644
index 0000000..a30a449
--- /dev/null
+++ b/av1/encoder/aq_variance.h
@@ -0,0 +1,30 @@
+/*
+ *  Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP10_ENCODER_AQ_VARIANCE_H_
+#define VP10_ENCODER_AQ_VARIANCE_H_
+
+#include "av1/encoder/encoder.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+unsigned int vp10_vaq_segment_id(int energy);
+void vp10_vaq_frame_setup(VP10_COMP *cpi);
+
+int vp10_block_energy(VP10_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs);
+double vp10_log_block_var(VP10_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs);
+
+#ifdef __cplusplus
+}  // extern "C"
+#endif
+
+#endif  // VP10_ENCODER_AQ_VARIANCE_H_
diff --git a/av1/encoder/arm/neon/dct_neon.c b/av1/encoder/arm/neon/dct_neon.c
new file mode 100644
index 0000000..1d77bec
--- /dev/null
+++ b/av1/encoder/arm/neon/dct_neon.c
@@ -0,0 +1,33 @@
+/*
+ *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <arm_neon.h>
+
+#include "./vp10_rtcd.h"
+#include "./vpx_config.h"
+#include "./vpx_dsp_rtcd.h"
+
+#include "av1/common/blockd.h"
+#include "aom_dsp/txfm_common.h"
+
+void vp10_fdct8x8_quant_neon(
+    const int16_t *input, int stride, int16_t *coeff_ptr, intptr_t n_coeffs,
+    int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr,
+    const int16_t *quant_ptr, const int16_t *quant_shift_ptr,
+    int16_t *qcoeff_ptr, int16_t *dqcoeff_ptr, const int16_t *dequant_ptr,
+    uint16_t *eob_ptr, const int16_t *scan_ptr, const int16_t *iscan_ptr) {
+  int16_t temp_buffer[64];
+  (void)coeff_ptr;
+
+  vpx_fdct8x8_neon(input, temp_buffer, stride);
+  vp10_quantize_fp_neon(temp_buffer, n_coeffs, skip_block, zbin_ptr, round_ptr,
+                        quant_ptr, quant_shift_ptr, qcoeff_ptr, dqcoeff_ptr,
+                        dequant_ptr, eob_ptr, scan_ptr, iscan_ptr);
+}
diff --git a/av1/encoder/arm/neon/error_neon.c b/av1/encoder/arm/neon/error_neon.c
new file mode 100644
index 0000000..34805d3
--- /dev/null
+++ b/av1/encoder/arm/neon/error_neon.c
@@ -0,0 +1,41 @@
+/*
+ *  Copyright (c) 2015 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <arm_neon.h>
+#include <assert.h>
+
+#include "./vp10_rtcd.h"
+
+int64_t vp10_block_error_fp_neon(const int16_t *coeff, const int16_t *dqcoeff,
+                                 int block_size) {
+  int64x2_t error = vdupq_n_s64(0);
+
+  assert(block_size >= 8);
+  assert((block_size % 8) == 0);
+
+  do {
+    const int16x8_t c = vld1q_s16(coeff);
+    const int16x8_t d = vld1q_s16(dqcoeff);
+    const int16x8_t diff = vsubq_s16(c, d);
+    const int16x4_t diff_lo = vget_low_s16(diff);
+    const int16x4_t diff_hi = vget_high_s16(diff);
+    // diff is 15-bits, the squares 30, so we can store 2 in 31-bits before
+    // accumulating them in 64-bits.
+    const int32x4_t err0 = vmull_s16(diff_lo, diff_lo);
+    const int32x4_t err1 = vmlal_s16(err0, diff_hi, diff_hi);
+    const int64x2_t err2 = vaddl_s32(vget_low_s32(err1), vget_high_s32(err1));
+    error = vaddq_s64(error, err2);
+    coeff += 8;
+    dqcoeff += 8;
+    block_size -= 8;
+  } while (block_size != 0);
+
+  return vgetq_lane_s64(error, 0) + vgetq_lane_s64(error, 1);
+}
diff --git a/av1/encoder/arm/neon/quantize_neon.c b/av1/encoder/arm/neon/quantize_neon.c
new file mode 100644
index 0000000..db85b4d
--- /dev/null
+++ b/av1/encoder/arm/neon/quantize_neon.c
@@ -0,0 +1,117 @@
+/*
+ *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <arm_neon.h>
+
+#include <math.h>
+
+#include "aom_mem/vpx_mem.h"
+
+#include "av1/common/quant_common.h"
+#include "av1/common/seg_common.h"
+
+#include "av1/encoder/encoder.h"
+#include "av1/encoder/quantize.h"
+#include "av1/encoder/rd.h"
+
+void vp10_quantize_fp_neon(const int16_t *coeff_ptr, intptr_t count,
+                           int skip_block, const int16_t *zbin_ptr,
+                           const int16_t *round_ptr, const int16_t *quant_ptr,
+                           const int16_t *quant_shift_ptr, int16_t *qcoeff_ptr,
+                           int16_t *dqcoeff_ptr, const int16_t *dequant_ptr,
+                           uint16_t *eob_ptr, const int16_t *scan,
+                           const int16_t *iscan) {
+  // TODO(jingning) Decide the need of these arguments after the
+  // quantization process is completed.
+  (void)zbin_ptr;
+  (void)quant_shift_ptr;
+  (void)scan;
+
+  if (!skip_block) {
+    // Quantization pass: All coefficients with index >= zero_flag are
+    // skippable. Note: zero_flag can be zero.
+    int i;
+    const int16x8_t v_zero = vdupq_n_s16(0);
+    const int16x8_t v_one = vdupq_n_s16(1);
+    int16x8_t v_eobmax_76543210 = vdupq_n_s16(-1);
+    int16x8_t v_round = vmovq_n_s16(round_ptr[1]);
+    int16x8_t v_quant = vmovq_n_s16(quant_ptr[1]);
+    int16x8_t v_dequant = vmovq_n_s16(dequant_ptr[1]);
+    // adjust for dc
+    v_round = vsetq_lane_s16(round_ptr[0], v_round, 0);
+    v_quant = vsetq_lane_s16(quant_ptr[0], v_quant, 0);
+    v_dequant = vsetq_lane_s16(dequant_ptr[0], v_dequant, 0);
+    // process dc and the first seven ac coeffs
+    {
+      const int16x8_t v_iscan = vld1q_s16(&iscan[0]);
+      const int16x8_t v_coeff = vld1q_s16(&coeff_ptr[0]);
+      const int16x8_t v_coeff_sign = vshrq_n_s16(v_coeff, 15);
+      const int16x8_t v_tmp = vabaq_s16(v_round, v_coeff, v_zero);
+      const int32x4_t v_tmp_lo =
+          vmull_s16(vget_low_s16(v_tmp), vget_low_s16(v_quant));
+      const int32x4_t v_tmp_hi =
+          vmull_s16(vget_high_s16(v_tmp), vget_high_s16(v_quant));
+      const int16x8_t v_tmp2 =
+          vcombine_s16(vshrn_n_s32(v_tmp_lo, 16), vshrn_n_s32(v_tmp_hi, 16));
+      const uint16x8_t v_nz_mask = vceqq_s16(v_tmp2, v_zero);
+      const int16x8_t v_iscan_plus1 = vaddq_s16(v_iscan, v_one);
+      const int16x8_t v_nz_iscan = vbslq_s16(v_nz_mask, v_zero, v_iscan_plus1);
+      const int16x8_t v_qcoeff_a = veorq_s16(v_tmp2, v_coeff_sign);
+      const int16x8_t v_qcoeff = vsubq_s16(v_qcoeff_a, v_coeff_sign);
+      const int16x8_t v_dqcoeff = vmulq_s16(v_qcoeff, v_dequant);
+      v_eobmax_76543210 = vmaxq_s16(v_eobmax_76543210, v_nz_iscan);
+      vst1q_s16(&qcoeff_ptr[0], v_qcoeff);
+      vst1q_s16(&dqcoeff_ptr[0], v_dqcoeff);
+      v_round = vmovq_n_s16(round_ptr[1]);
+      v_quant = vmovq_n_s16(quant_ptr[1]);
+      v_dequant = vmovq_n_s16(dequant_ptr[1]);
+    }
+    // now process the rest of the ac coeffs
+    for (i = 8; i < count; i += 8) {
+      const int16x8_t v_iscan = vld1q_s16(&iscan[i]);
+      const int16x8_t v_coeff = vld1q_s16(&coeff_ptr[i]);
+      const int16x8_t v_coeff_sign = vshrq_n_s16(v_coeff, 15);
+      const int16x8_t v_tmp = vabaq_s16(v_round, v_coeff, v_zero);
+      const int32x4_t v_tmp_lo =
+          vmull_s16(vget_low_s16(v_tmp), vget_low_s16(v_quant));
+      const int32x4_t v_tmp_hi =
+          vmull_s16(vget_high_s16(v_tmp), vget_high_s16(v_quant));
+      const int16x8_t v_tmp2 =
+          vcombine_s16(vshrn_n_s32(v_tmp_lo, 16), vshrn_n_s32(v_tmp_hi, 16));
+      const uint16x8_t v_nz_mask = vceqq_s16(v_tmp2, v_zero);
+      const int16x8_t v_iscan_plus1 = vaddq_s16(v_iscan, v_one);
+      const int16x8_t v_nz_iscan = vbslq_s16(v_nz_mask, v_zero, v_iscan_plus1);
+      const int16x8_t v_qcoeff_a = veorq_s16(v_tmp2, v_coeff_sign);
+      const int16x8_t v_qcoeff = vsubq_s16(v_qcoeff_a, v_coeff_sign);
+      const int16x8_t v_dqcoeff = vmulq_s16(v_qcoeff, v_dequant);
+      v_eobmax_76543210 = vmaxq_s16(v_eobmax_76543210, v_nz_iscan);
+      vst1q_s16(&qcoeff_ptr[i], v_qcoeff);
+      vst1q_s16(&dqcoeff_ptr[i], v_dqcoeff);
+    }
+    {
+      const int16x4_t v_eobmax_3210 = vmax_s16(
+          vget_low_s16(v_eobmax_76543210), vget_high_s16(v_eobmax_76543210));
+      const int64x1_t v_eobmax_xx32 =
+          vshr_n_s64(vreinterpret_s64_s16(v_eobmax_3210), 32);
+      const int16x4_t v_eobmax_tmp =
+          vmax_s16(v_eobmax_3210, vreinterpret_s16_s64(v_eobmax_xx32));
+      const int64x1_t v_eobmax_xxx3 =
+          vshr_n_s64(vreinterpret_s64_s16(v_eobmax_tmp), 16);
+      const int16x4_t v_eobmax_final =
+          vmax_s16(v_eobmax_tmp, vreinterpret_s16_s64(v_eobmax_xxx3));
+
+      *eob_ptr = (uint16_t)vget_lane_s16(v_eobmax_final, 0);
+    }
+  } else {
+    memset(qcoeff_ptr, 0, count * sizeof(int16_t));
+    memset(dqcoeff_ptr, 0, count * sizeof(int16_t));
+    *eob_ptr = 0;
+  }
+}
diff --git a/av1/encoder/bitstream.c b/av1/encoder/bitstream.c
new file mode 100644
index 0000000..30699b4
--- /dev/null
+++ b/av1/encoder/bitstream.c
@@ -0,0 +1,3631 @@
+/*
+ *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include <limits.h>
+#include <stdio.h>
+
+#include "aom/vpx_encoder.h"
+#include "aom_dsp/bitwriter_buffer.h"
+#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_mem/vpx_mem.h"
+#include "aom_ports/mem_ops.h"
+#include "aom_ports/system_state.h"
+#include "aom_util/debug_util.h"
+
+#if CONFIG_CLPF
+#include "av1/common/clpf.h"
+#endif
+#if CONFIG_DERING
+#include "av1/common/dering.h"
+#endif  // CONFIG_DERING
+#include "av1/common/entropy.h"
+#include "av1/common/entropymode.h"
+#include "av1/common/entropymv.h"
+#include "av1/common/mvref_common.h"
+#include "av1/common/pred_common.h"
+#include "av1/common/reconinter.h"
+#include "av1/common/seg_common.h"
+#include "av1/common/tile_common.h"
+
+#if CONFIG_ANS
+#include "av1/encoder/buf_ans.h"
+#endif  // CONFIG_ANS
+#include "av1/encoder/bitstream.h"
+#include "av1/encoder/cost.h"
+#include "av1/encoder/encodemv.h"
+#include "av1/encoder/mcomp.h"
+#include "av1/encoder/segmentation.h"
+#include "av1/encoder/subexp.h"
+#include "av1/encoder/tokenize.h"
+
+static const struct vp10_token intra_mode_encodings[INTRA_MODES] = {
+  { 0, 1 },  { 6, 3 },   { 28, 5 },  { 30, 5 }, { 58, 6 },
+  { 59, 6 }, { 126, 7 }, { 127, 7 }, { 62, 6 }, { 2, 2 }
+};
+#if CONFIG_EXT_INTERP
+static const struct vp10_token switchable_interp_encodings[SWITCHABLE_FILTERS] =
+    { { 0, 1 }, { 4, 3 }, { 6, 3 }, { 5, 3 }, { 7, 3 } };
+#else
+static const struct vp10_token switchable_interp_encodings[SWITCHABLE_FILTERS] =
+    { { 0, 1 }, { 2, 2 }, { 3, 2 } };
+#endif  // CONFIG_EXT_INTERP
+#if CONFIG_EXT_PARTITION_TYPES
+static const struct vp10_token ext_partition_encodings[EXT_PARTITION_TYPES] = {
+  { 0, 1 },  { 4, 3 },  { 12, 4 }, { 7, 3 },
+  { 10, 4 }, { 11, 4 }, { 26, 5 }, { 27, 5 }
+};
+#endif
+static const struct vp10_token partition_encodings[PARTITION_TYPES] = {
+  { 0, 1 }, { 2, 2 }, { 6, 3 }, { 7, 3 }
+};
+#if !CONFIG_REF_MV
+static const struct vp10_token inter_mode_encodings[INTER_MODES] =
+#if CONFIG_EXT_INTER
+    { { 2, 2 }, { 6, 3 }, { 0, 1 }, { 14, 4 }, { 15, 4 } };
+#else
+    { { 2, 2 }, { 6, 3 }, { 0, 1 }, { 7, 3 } };
+#endif  // CONFIG_EXT_INTER
+#endif
+#if CONFIG_EXT_INTER
+static const struct vp10_token
+    inter_compound_mode_encodings[INTER_COMPOUND_MODES] = {
+      { 2, 2 },  { 50, 6 }, { 51, 6 }, { 24, 5 }, { 52, 6 },
+      { 53, 6 }, { 54, 6 }, { 55, 6 }, { 0, 1 },  { 7, 3 }
+    };
+#endif  // CONFIG_EXT_INTER
+static const struct vp10_token palette_size_encodings[] = {
+  { 0, 1 }, { 2, 2 }, { 6, 3 }, { 14, 4 }, { 30, 5 }, { 62, 6 }, { 63, 6 },
+};
+static const struct vp10_token
+    palette_color_encodings[PALETTE_MAX_SIZE - 1][PALETTE_MAX_SIZE] = {
+      { { 0, 1 }, { 1, 1 } },                                  // 2 colors
+      { { 0, 1 }, { 2, 2 }, { 3, 2 } },                        // 3 colors
+      { { 0, 1 }, { 2, 2 }, { 6, 3 }, { 7, 3 } },              // 4 colors
+      { { 0, 1 }, { 2, 2 }, { 6, 3 }, { 14, 4 }, { 15, 4 } },  // 5 colors
+      { { 0, 1 },
+        { 2, 2 },
+        { 6, 3 },
+        { 14, 4 },
+        { 30, 5 },
+        { 31, 5 } },  // 6 colors
+      { { 0, 1 },
+        { 2, 2 },
+        { 6, 3 },
+        { 14, 4 },
+        { 30, 5 },
+        { 62, 6 },
+        { 63, 6 } },  // 7 colors
+      { { 0, 1 },
+        { 2, 2 },
+        { 6, 3 },
+        { 14, 4 },
+        { 30, 5 },
+        { 62, 6 },
+        { 126, 7 },
+        { 127, 7 } },  // 8 colors
+    };
+
+static const struct vp10_token tx_size_encodings[TX_SIZES - 1][TX_SIZES] = {
+  { { 0, 1 }, { 1, 1 } },                      // Max tx_size is 8X8
+  { { 0, 1 }, { 2, 2 }, { 3, 2 } },            // Max tx_size is 16X16
+  { { 0, 1 }, { 2, 2 }, { 6, 3 }, { 7, 3 } },  // Max tx_size is 32X32
+};
+
+static INLINE void write_uniform(vp10_writer *w, int n, int v) {
+  int l = get_unsigned_bits(n);
+  int m = (1 << l) - n;
+  if (l == 0) return;
+  if (v < m) {
+    vp10_write_literal(w, v, l - 1);
+  } else {
+    vp10_write_literal(w, m + ((v - m) >> 1), l - 1);
+    vp10_write_literal(w, (v - m) & 1, 1);
+  }
+}
+
+#if CONFIG_EXT_TX
+static struct vp10_token ext_tx_inter_encodings[EXT_TX_SETS_INTER][TX_TYPES];
+static struct vp10_token ext_tx_intra_encodings[EXT_TX_SETS_INTRA][TX_TYPES];
+#else
+static struct vp10_token ext_tx_encodings[TX_TYPES];
+#endif  // CONFIG_EXT_TX
+#if CONFIG_GLOBAL_MOTION
+static struct vp10_token global_motion_types_encodings[GLOBAL_MOTION_TYPES];
+#endif  // CONFIG_GLOBAL_MOTION
+#if CONFIG_EXT_INTRA
+static struct vp10_token intra_filter_encodings[INTRA_FILTERS];
+#endif  // CONFIG_EXT_INTRA
+#if CONFIG_EXT_INTER
+static struct vp10_token interintra_mode_encodings[INTERINTRA_MODES];
+#endif  // CONFIG_EXT_INTER
+#if CONFIG_OBMC || CONFIG_WARPED_MOTION
+static struct vp10_token motvar_encodings[MOTION_VARIATIONS];
+#endif  // CONFIG_OBMC || CONFIG_WARPED_MOTION
+
+void vp10_encode_token_init(void) {
+#if CONFIG_EXT_TX
+  int s;
+  for (s = 1; s < EXT_TX_SETS_INTER; ++s) {
+    vp10_tokens_from_tree(ext_tx_inter_encodings[s], vp10_ext_tx_inter_tree[s]);
+  }
+  for (s = 1; s < EXT_TX_SETS_INTRA; ++s) {
+    vp10_tokens_from_tree(ext_tx_intra_encodings[s], vp10_ext_tx_intra_tree[s]);
+  }
+#else
+  vp10_tokens_from_tree(ext_tx_encodings, vp10_ext_tx_tree);
+#endif  // CONFIG_EXT_TX
+#if CONFIG_EXT_INTRA
+  vp10_tokens_from_tree(intra_filter_encodings, vp10_intra_filter_tree);
+#endif  // CONFIG_EXT_INTRA
+#if CONFIG_EXT_INTER
+  vp10_tokens_from_tree(interintra_mode_encodings, vp10_interintra_mode_tree);
+#endif  // CONFIG_EXT_INTER
+#if CONFIG_OBMC || CONFIG_WARPED_MOTION
+  vp10_tokens_from_tree(motvar_encodings, vp10_motvar_tree);
+#endif  // CONFIG_OBMC || CONFIG_WARPED_MOTION
+#if CONFIG_GLOBAL_MOTION
+  vp10_tokens_from_tree(global_motion_types_encodings,
+                        vp10_global_motion_types_tree);
+#endif  // CONFIG_GLOBAL_MOTION
+}
+
+static void write_intra_mode(vp10_writer *w, PREDICTION_MODE mode,
+                             const vpx_prob *probs) {
+  vp10_write_token(w, vp10_intra_mode_tree, probs, &intra_mode_encodings[mode]);
+}
+
+#if CONFIG_EXT_INTER
+static void write_interintra_mode(vp10_writer *w, INTERINTRA_MODE mode,
+                                  const vpx_prob *probs) {
+  vp10_write_token(w, vp10_interintra_mode_tree, probs,
+                   &interintra_mode_encodings[mode]);
+}
+#endif  // CONFIG_EXT_INTER
+
+static void write_inter_mode(VP10_COMMON *cm, vp10_writer *w,
+                             PREDICTION_MODE mode,
+#if CONFIG_REF_MV && CONFIG_EXT_INTER
+                             int is_compound,
+#endif  // CONFIG_REF_MV && CONFIG_EXT_INTER
+                             const int16_t mode_ctx) {
+#if CONFIG_REF_MV
+  const int16_t newmv_ctx = mode_ctx & NEWMV_CTX_MASK;
+  const vpx_prob newmv_prob = cm->fc->newmv_prob[newmv_ctx];
+#if CONFIG_EXT_INTER
+  vp10_write(w, mode != NEWMV && mode != NEWFROMNEARMV, newmv_prob);
+
+  if (!is_compound && (mode == NEWMV || mode == NEWFROMNEARMV))
+    vp10_write(w, mode == NEWFROMNEARMV, cm->fc->new2mv_prob);
+
+  if (mode != NEWMV && mode != NEWFROMNEARMV) {
+#else
+  vp10_write(w, mode != NEWMV, newmv_prob);
+
+  if (mode != NEWMV) {
+#endif  // CONFIG_EXT_INTER
+    const int16_t zeromv_ctx = (mode_ctx >> ZEROMV_OFFSET) & ZEROMV_CTX_MASK;
+    const vpx_prob zeromv_prob = cm->fc->zeromv_prob[zeromv_ctx];
+
+    if (mode_ctx & (1 << ALL_ZERO_FLAG_OFFSET)) {
+      assert(mode == ZEROMV);
+      return;
+    }
+
+    vp10_write(w, mode != ZEROMV, zeromv_prob);
+
+    if (mode != ZEROMV) {
+      int16_t refmv_ctx = (mode_ctx >> REFMV_OFFSET) & REFMV_CTX_MASK;
+      vpx_prob refmv_prob;
+
+      if (mode_ctx & (1 << SKIP_NEARESTMV_OFFSET)) refmv_ctx = 6;
+      if (mode_ctx & (1 << SKIP_NEARMV_OFFSET)) refmv_ctx = 7;
+      if (mode_ctx & (1 << SKIP_NEARESTMV_SUB8X8_OFFSET)) refmv_ctx = 8;
+
+      refmv_prob = cm->fc->refmv_prob[refmv_ctx];
+      vp10_write(w, mode != NEARESTMV, refmv_prob);
+    }
+  }
+#else
+  const vpx_prob *const inter_probs = cm->fc->inter_mode_probs[mode_ctx];
+  assert(is_inter_mode(mode));
+  vp10_write_token(w, vp10_inter_mode_tree, inter_probs,
+                   &inter_mode_encodings[INTER_OFFSET(mode)]);
+#endif
+}
+
+#if CONFIG_REF_MV
+static void write_drl_idx(const VP10_COMMON *cm, const MB_MODE_INFO *mbmi,
+                          const MB_MODE_INFO_EXT *mbmi_ext, vp10_writer *w) {
+  uint8_t ref_frame_type = vp10_ref_frame_type(mbmi->ref_frame);
+
+  assert(mbmi->ref_mv_idx < 3);
+
+  if (mbmi->mode == NEWMV) {
+    int idx;
+    for (idx = 0; idx < 2; ++idx) {
+      if (mbmi_ext->ref_mv_count[ref_frame_type] > idx + 1) {
+        uint8_t drl_ctx =
+            vp10_drl_ctx(mbmi_ext->ref_mv_stack[ref_frame_type], idx);
+        vpx_prob drl_prob = cm->fc->drl_prob[drl_ctx];
+
+        vp10_write(w, mbmi->ref_mv_idx != idx, drl_prob);
+        if (mbmi->ref_mv_idx == idx) return;
+      }
+    }
+    return;
+  }
+
+  if (mbmi->mode == NEARMV) {
+    int idx;
+    // TODO(jingning): Temporary solution to compensate the NEARESTMV offset.
+    for (idx = 1; idx < 3; ++idx) {
+      if (mbmi_ext->ref_mv_count[ref_frame_type] > idx + 1) {
+        uint8_t drl_ctx =
+            vp10_drl_ctx(mbmi_ext->ref_mv_stack[ref_frame_type], idx);
+        vpx_prob drl_prob = cm->fc->drl_prob[drl_ctx];
+
+        vp10_write(w, mbmi->ref_mv_idx != (idx - 1), drl_prob);
+        if (mbmi->ref_mv_idx == (idx - 1)) return;
+      }
+    }
+    return;
+  }
+}
+#endif
+
+#if CONFIG_EXT_INTER
+static void write_inter_compound_mode(VP10_COMMON *cm, vp10_writer *w,
+                                      PREDICTION_MODE mode,
+                                      const int16_t mode_ctx) {
+  const vpx_prob *const inter_compound_probs =
+      cm->fc->inter_compound_mode_probs[mode_ctx];
+
+  assert(is_inter_compound_mode(mode));
+  vp10_write_token(w, vp10_inter_compound_mode_tree, inter_compound_probs,
+                   &inter_compound_mode_encodings[INTER_COMPOUND_OFFSET(mode)]);
+}
+#endif  // CONFIG_EXT_INTER
+
+static void encode_unsigned_max(struct vpx_write_bit_buffer *wb, int data,
+                                int max) {
+  vpx_wb_write_literal(wb, data, get_unsigned_bits(max));
+}
+
+static void prob_diff_update(const vpx_tree_index *tree,
+                             vpx_prob probs[/*n - 1*/],
+                             const unsigned int counts[/*n - 1*/], int n,
+                             vp10_writer *w) {
+  int i;
+  unsigned int branch_ct[32][2];
+
+  // Assuming max number of probabilities <= 32
+  assert(n <= 32);
+
+  vp10_tree_probs_from_distribution(tree, branch_ct, counts);
+  for (i = 0; i < n - 1; ++i)
+    vp10_cond_prob_diff_update(w, &probs[i], branch_ct[i]);
+}
+
+static int prob_diff_update_savings(const vpx_tree_index *tree,
+                                    vpx_prob probs[/*n - 1*/],
+                                    const unsigned int counts[/*n - 1*/],
+                                    int n) {
+  int i;
+  unsigned int branch_ct[32][2];
+  int savings = 0;
+
+  // Assuming max number of probabilities <= 32
+  assert(n <= 32);
+  vp10_tree_probs_from_distribution(tree, branch_ct, counts);
+  for (i = 0; i < n - 1; ++i) {
+    savings += vp10_cond_prob_diff_update_savings(&probs[i], branch_ct[i]);
+  }
+  return savings;
+}
+
+#if CONFIG_VAR_TX
+static void write_tx_size_vartx(const VP10_COMMON *cm, const MACROBLOCKD *xd,
+                                const MB_MODE_INFO *mbmi, TX_SIZE tx_size,
+                                int blk_row, int blk_col, vp10_writer *w) {
+  const int tx_row = blk_row >> 1;
+  const int tx_col = blk_col >> 1;
+  int max_blocks_high = num_4x4_blocks_high_lookup[mbmi->sb_type];
+  int max_blocks_wide = num_4x4_blocks_wide_lookup[mbmi->sb_type];
+  int ctx = txfm_partition_context(xd->above_txfm_context + tx_col,
+                                   xd->left_txfm_context + tx_row, tx_size);
+
+  if (xd->mb_to_bottom_edge < 0) max_blocks_high += xd->mb_to_bottom_edge >> 5;
+  if (xd->mb_to_right_edge < 0) max_blocks_wide += xd->mb_to_right_edge >> 5;
+
+  if (blk_row >= max_blocks_high || blk_col >= max_blocks_wide) return;
+
+  if (tx_size == mbmi->inter_tx_size[tx_row][tx_col]) {
+    vp10_write(w, 0, cm->fc->txfm_partition_prob[ctx]);
+    txfm_partition_update(xd->above_txfm_context + tx_col,
+                          xd->left_txfm_context + tx_row, tx_size);
+  } else {
+    const BLOCK_SIZE bsize = txsize_to_bsize[tx_size];
+    int bsl = b_width_log2_lookup[bsize];
+    int i;
+    vp10_write(w, 1, cm->fc->txfm_partition_prob[ctx]);
+
+    if (tx_size == TX_8X8) {
+      txfm_partition_update(xd->above_txfm_context + tx_col,
+                            xd->left_txfm_context + tx_row, TX_4X4);
+      return;
+    }
+
+    assert(bsl > 0);
+    --bsl;
+    for (i = 0; i < 4; ++i) {
+      int offsetr = blk_row + ((i >> 1) << bsl);
+      int offsetc = blk_col + ((i & 0x01) << bsl);
+      write_tx_size_vartx(cm, xd, mbmi, tx_size - 1, offsetr, offsetc, w);
+    }
+  }
+}
+
+static void update_txfm_partition_probs(VP10_COMMON *cm, vp10_writer *w,
+                                        FRAME_COUNTS *counts) {
+  int k;
+  for (k = 0; k < TXFM_PARTITION_CONTEXTS; ++k)
+    vp10_cond_prob_diff_update(w, &cm->fc->txfm_partition_prob[k],
+                               counts->txfm_partition[k]);
+}
+#endif
+
+static void write_selected_tx_size(const VP10_COMMON *cm, const MACROBLOCKD *xd,
+                                   vp10_writer *w) {
+  const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
+  const BLOCK_SIZE bsize = mbmi->sb_type;
+  // For sub8x8 blocks the tx_size symbol does not need to be sent
+  if (bsize >= BLOCK_8X8) {
+    const TX_SIZE tx_size = mbmi->tx_size;
+    const int is_inter = is_inter_block(mbmi);
+    const int tx_size_ctx = get_tx_size_context(xd);
+    const int tx_size_cat = is_inter ? inter_tx_size_cat_lookup[bsize]
+                                     : intra_tx_size_cat_lookup[bsize];
+    const TX_SIZE coded_tx_size = txsize_sqr_up_map[tx_size];
+
+#if CONFIG_EXT_TX && CONFIG_RECT_TX
+    assert(IMPLIES(is_rect_tx(tx_size), is_rect_tx_allowed(mbmi)));
+    assert(
+        IMPLIES(is_rect_tx(tx_size), tx_size == max_txsize_rect_lookup[bsize]));
+#endif  // CONFIG_EXT_TX && CONFIG_RECT_TX
+
+    vp10_write_token(w, vp10_tx_size_tree[tx_size_cat],
+                     cm->fc->tx_size_probs[tx_size_cat][tx_size_ctx],
+                     &tx_size_encodings[tx_size_cat][coded_tx_size]);
+  }
+}
+
+#if CONFIG_REF_MV
+static void update_inter_mode_probs(VP10_COMMON *cm, vp10_writer *w,
+                                    FRAME_COUNTS *counts) {
+  int i;
+  for (i = 0; i < NEWMV_MODE_CONTEXTS; ++i)
+    vp10_cond_prob_diff_update(w, &cm->fc->newmv_prob[i],
+                               counts->newmv_mode[i]);
+  for (i = 0; i < ZEROMV_MODE_CONTEXTS; ++i)
+    vp10_cond_prob_diff_update(w, &cm->fc->zeromv_prob[i],
+                               counts->zeromv_mode[i]);
+  for (i = 0; i < REFMV_MODE_CONTEXTS; ++i)
+    vp10_cond_prob_diff_update(w, &cm->fc->refmv_prob[i],
+                               counts->refmv_mode[i]);
+  for (i = 0; i < DRL_MODE_CONTEXTS; ++i)
+    vp10_cond_prob_diff_update(w, &cm->fc->drl_prob[i], counts->drl_mode[i]);
+#if CONFIG_EXT_INTER
+  vp10_cond_prob_diff_update(w, &cm->fc->new2mv_prob, counts->new2mv_mode);
+#endif  // CONFIG_EXT_INTER
+}
+#endif
+
+#if CONFIG_EXT_INTER
+static void update_inter_compound_mode_probs(VP10_COMMON *cm, vp10_writer *w) {
+  const int savings_thresh = vp10_cost_one(GROUP_DIFF_UPDATE_PROB) -
+                             vp10_cost_zero(GROUP_DIFF_UPDATE_PROB);
+  int i;
+  int savings = 0;
+  int do_update = 0;
+  for (i = 0; i < INTER_MODE_CONTEXTS; ++i) {
+    savings += prob_diff_update_savings(
+        vp10_inter_compound_mode_tree, cm->fc->inter_compound_mode_probs[i],
+        cm->counts.inter_compound_mode[i], INTER_COMPOUND_MODES);
+  }
+  do_update = savings > savings_thresh;
+  vp10_write(w, do_update, GROUP_DIFF_UPDATE_PROB);
+  if (do_update) {
+    for (i = 0; i < INTER_MODE_CONTEXTS; ++i) {
+      prob_diff_update(
+          vp10_inter_compound_mode_tree, cm->fc->inter_compound_mode_probs[i],
+          cm->counts.inter_compound_mode[i], INTER_COMPOUND_MODES, w);
+    }
+  }
+}
+#endif  // CONFIG_EXT_INTER
+
+static int write_skip(const VP10_COMMON *cm, const MACROBLOCKD *xd,
+                      int segment_id, const MODE_INFO *mi, vp10_writer *w) {
+  if (segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP)) {
+    return 1;
+  } else {
+    const int skip = mi->mbmi.skip;
+    vp10_write(w, skip, vp10_get_skip_prob(cm, xd));
+    return skip;
+  }
+}
+
+static void update_skip_probs(VP10_COMMON *cm, vp10_writer *w,
+                              FRAME_COUNTS *counts) {
+  int k;
+
+  for (k = 0; k < SKIP_CONTEXTS; ++k)
+    vp10_cond_prob_diff_update(w, &cm->fc->skip_probs[k], counts->skip[k]);
+}
+
+static void update_switchable_interp_probs(VP10_COMMON *cm, vp10_writer *w,
+                                           FRAME_COUNTS *counts) {
+  int j;
+  for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j)
+    prob_diff_update(vp10_switchable_interp_tree,
+                     cm->fc->switchable_interp_prob[j],
+                     counts->switchable_interp[j], SWITCHABLE_FILTERS, w);
+}
+
+#if CONFIG_EXT_TX
+static void update_ext_tx_probs(VP10_COMMON *cm, vp10_writer *w) {
+  const int savings_thresh = vp10_cost_one(GROUP_DIFF_UPDATE_PROB) -
+                             vp10_cost_zero(GROUP_DIFF_UPDATE_PROB);
+  int i, j;
+  int s;
+  for (s = 1; s < EXT_TX_SETS_INTER; ++s) {
+    int savings = 0;
+    int do_update = 0;
+    for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
+      if (!use_inter_ext_tx_for_txsize[s][i]) continue;
+      savings += prob_diff_update_savings(
+          vp10_ext_tx_inter_tree[s], cm->fc->inter_ext_tx_prob[s][i],
+          cm->counts.inter_ext_tx[s][i], num_ext_tx_set_inter[s]);
+    }
+    do_update = savings > savings_thresh;
+    vp10_write(w, do_update, GROUP_DIFF_UPDATE_PROB);
+    if (do_update) {
+      for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
+        if (!use_inter_ext_tx_for_txsize[s][i]) continue;
+        prob_diff_update(
+            vp10_ext_tx_inter_tree[s], cm->fc->inter_ext_tx_prob[s][i],
+            cm->counts.inter_ext_tx[s][i], num_ext_tx_set_inter[s], w);
+      }
+    }
+  }
+
+  for (s = 1; s < EXT_TX_SETS_INTRA; ++s) {
+    int savings = 0;
+    int do_update = 0;
+    for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
+      if (!use_intra_ext_tx_for_txsize[s][i]) continue;
+      for (j = 0; j < INTRA_MODES; ++j)
+        savings += prob_diff_update_savings(
+            vp10_ext_tx_intra_tree[s], cm->fc->intra_ext_tx_prob[s][i][j],
+            cm->counts.intra_ext_tx[s][i][j], num_ext_tx_set_intra[s]);
+    }
+    do_update = savings > savings_thresh;
+    vp10_write(w, do_update, GROUP_DIFF_UPDATE_PROB);
+    if (do_update) {
+      for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
+        if (!use_intra_ext_tx_for_txsize[s][i]) continue;
+        for (j = 0; j < INTRA_MODES; ++j)
+          prob_diff_update(
+              vp10_ext_tx_intra_tree[s], cm->fc->intra_ext_tx_prob[s][i][j],
+              cm->counts.intra_ext_tx[s][i][j], num_ext_tx_set_intra[s], w);
+      }
+    }
+  }
+}
+
+#else
+
+static void update_ext_tx_probs(VP10_COMMON *cm, vp10_writer *w) {
+  const int savings_thresh = vp10_cost_one(GROUP_DIFF_UPDATE_PROB) -
+                             vp10_cost_zero(GROUP_DIFF_UPDATE_PROB);
+  int i, j;
+
+  int savings = 0;
+  int do_update = 0;
+  for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
+    for (j = 0; j < TX_TYPES; ++j)
+      savings += prob_diff_update_savings(
+          vp10_ext_tx_tree, cm->fc->intra_ext_tx_prob[i][j],
+          cm->counts.intra_ext_tx[i][j], TX_TYPES);
+  }
+  do_update = savings > savings_thresh;
+  vp10_write(w, do_update, GROUP_DIFF_UPDATE_PROB);
+  if (do_update) {
+    for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
+      for (j = 0; j < TX_TYPES; ++j)
+        prob_diff_update(vp10_ext_tx_tree, cm->fc->intra_ext_tx_prob[i][j],
+                         cm->counts.intra_ext_tx[i][j], TX_TYPES, w);
+    }
+  }
+  savings = 0;
+  for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
+    savings +=
+        prob_diff_update_savings(vp10_ext_tx_tree, cm->fc->inter_ext_tx_prob[i],
+                                 cm->counts.inter_ext_tx[i], TX_TYPES);
+  }
+  do_update = savings > savings_thresh;
+  vp10_write(w, do_update, GROUP_DIFF_UPDATE_PROB);
+  if (do_update) {
+    for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
+      prob_diff_update(vp10_ext_tx_tree, cm->fc->inter_ext_tx_prob[i],
+                       cm->counts.inter_ext_tx[i], TX_TYPES, w);
+    }
+  }
+}
+#endif  // CONFIG_EXT_TX
+
+static void pack_palette_tokens(vp10_writer *w, const TOKENEXTRA **tp, int n,
+                                int num) {
+  int i;
+  const TOKENEXTRA *p = *tp;
+
+  for (i = 0; i < num; ++i) {
+    vp10_write_token(w, vp10_palette_color_tree[n - 2], p->context_tree,
+                     &palette_color_encodings[n - 2][p->token]);
+    ++p;
+  }
+
+  *tp = p;
+}
+
+#if CONFIG_SUPERTX
+static void update_supertx_probs(VP10_COMMON *cm, vp10_writer *w) {
+  const int savings_thresh = vp10_cost_one(GROUP_DIFF_UPDATE_PROB) -
+                             vp10_cost_zero(GROUP_DIFF_UPDATE_PROB);
+  int i, j;
+  int savings = 0;
+  int do_update = 0;
+  for (i = 0; i < PARTITION_SUPERTX_CONTEXTS; ++i) {
+    for (j = 1; j < TX_SIZES; ++j) {
+      savings += vp10_cond_prob_diff_update_savings(&cm->fc->supertx_prob[i][j],
+                                                    cm->counts.supertx[i][j]);
+    }
+  }
+  do_update = savings > savings_thresh;
+  vp10_write(w, do_update, GROUP_DIFF_UPDATE_PROB);
+  if (do_update) {
+    for (i = 0; i < PARTITION_SUPERTX_CONTEXTS; ++i) {
+      for (j = 1; j < TX_SIZES; ++j) {
+        vp10_cond_prob_diff_update(w, &cm->fc->supertx_prob[i][j],
+                                   cm->counts.supertx[i][j]);
+      }
+    }
+  }
+}
+#endif  // CONFIG_SUPERTX
+
+#if !CONFIG_ANS
+static void pack_mb_tokens(vp10_writer *w, const TOKENEXTRA **tp,
+                           const TOKENEXTRA *const stop,
+                           vpx_bit_depth_t bit_depth, const TX_SIZE tx) {
+  const TOKENEXTRA *p = *tp;
+#if CONFIG_VAR_TX
+  int count = 0;
+  const int seg_eob = get_tx2d_size(tx);
+#endif
+
+  while (p < stop && p->token != EOSB_TOKEN) {
+    const int t = p->token;
+    const struct vp10_token *const a = &vp10_coef_encodings[t];
+    int v = a->value;
+    int n = a->len;
+#if CONFIG_VP9_HIGHBITDEPTH
+    const vp10_extra_bit *b;
+    if (bit_depth == VPX_BITS_12)
+      b = &vp10_extra_bits_high12[t];
+    else if (bit_depth == VPX_BITS_10)
+      b = &vp10_extra_bits_high10[t];
+    else
+      b = &vp10_extra_bits[t];
+#else
+    const vp10_extra_bit *const b = &vp10_extra_bits[t];
+    (void)bit_depth;
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+    /* skip one or two nodes */
+    if (p->skip_eob_node)
+      n -= p->skip_eob_node;
+    else
+      vp10_write(w, t != EOB_TOKEN, p->context_tree[0]);
+
+    if (t != EOB_TOKEN) {
+      vp10_write(w, t != ZERO_TOKEN, p->context_tree[1]);
+
+      if (t != ZERO_TOKEN) {
+        vp10_write(w, t != ONE_TOKEN, p->context_tree[2]);
+
+        if (t != ONE_TOKEN) {
+          int len = UNCONSTRAINED_NODES - p->skip_eob_node;
+          vp10_write_tree(w, vp10_coef_con_tree,
+                          vp10_pareto8_full[p->context_tree[PIVOT_NODE] - 1], v,
+                          n - len, 0);
+        }
+      }
+    }
+
+    if (b->base_val) {
+      const int e = p->extra, l = b->len;
+      int skip_bits = (b->base_val == CAT6_MIN_VAL)
+                          ? TX_SIZES - 1 - txsize_sqr_up_map[tx]
+                          : 0;
+
+      if (l) {
+        const unsigned char *pb = b->prob;
+        int v = e >> 1;
+        int n = l; /* number of bits in v, assumed nonzero */
+        int i = 0;
+
+        do {
+          const int bb = (v >> --n) & 1;
+          if (skip_bits) {
+            skip_bits--;
+            assert(!bb);
+          } else {
+            vp10_write(w, bb, pb[i >> 1]);
+          }
+          i = b->tree[i + bb];
+        } while (n);
+      }
+
+      vp10_write_bit(w, e & 1);
+    }
+    ++p;
+
+#if CONFIG_VAR_TX
+    ++count;
+    if (t == EOB_TOKEN || count == seg_eob) break;
+#endif
+  }
+
+  *tp = p;
+}
+#else
+// This function serializes the tokens in forward order using a buffered ans
+// coder.
+static void pack_mb_tokens(struct BufAnsCoder *ans, const TOKENEXTRA **tp,
+                           const TOKENEXTRA *const stop,
+                           vpx_bit_depth_t bit_depth, const TX_SIZE tx) {
+  const TOKENEXTRA *p = *tp;
+#if CONFIG_VAR_TX
+  int count = 0;
+  const int seg_eob = 16 << (tx << 1);
+#endif  // CONFIG_VAR_TX
+
+  while (p < stop && p->token != EOSB_TOKEN) {
+    const int t = p->token;
+#if CONFIG_VP9_HIGHBITDEPTH
+    const vp10_extra_bit *b;
+    if (bit_depth == VPX_BITS_12)
+      b = &vp10_extra_bits_high12[t];
+    else if (bit_depth == VPX_BITS_10)
+      b = &vp10_extra_bits_high10[t];
+    else
+      b = &vp10_extra_bits[t];
+#else
+    const vp10_extra_bit *const b = &vp10_extra_bits[t];
+    (void)bit_depth;
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+    /* skip one or two nodes */
+    if (!p->skip_eob_node)
+      buf_uabs_write(ans, t != EOB_TOKEN, p->context_tree[0]);
+
+    if (t != EOB_TOKEN) {
+      struct rans_sym s;
+      const rans_dec_lut *token_cdf = p->token_cdf;
+      assert(token_cdf);
+      s.cum_prob = (*token_cdf)[t - ZERO_TOKEN];
+      s.prob = (*token_cdf)[t - ZERO_TOKEN + 1] - s.cum_prob;
+      buf_rans_write(ans, &s);
+
+      if (b->base_val) {
+        const int e = p->extra, l = b->len;
+        int skip_bits = (b->base_val == CAT6_MIN_VAL)
+                            ? TX_SIZES - 1 - txsize_sqr_up_map[tx]
+                            : 0;
+
+        if (l) {
+          const unsigned char *pb = b->prob;
+          int v = e >> 1;
+          int n = l; /* number of bits in v, assumed nonzero */
+          int i = 0;
+
+          do {
+            const int bb = (v >> --n) & 1;
+            if (skip_bits) {
+              skip_bits--;
+              assert(!bb);
+            } else {
+              buf_uabs_write(ans, bb, pb[i >> 1]);
+            }
+            i = b->tree[i + bb];
+          } while (n);
+        }
+
+        buf_uabs_write(ans, e & 1, 128);
+      }
+    }
+    ++p;
+
+#if CONFIG_VAR_TX
+    ++count;
+    if (t == EOB_TOKEN || count == seg_eob) break;
+#endif  // CONFIG_VAR_TX
+  }
+
+  *tp = p;
+}
+#endif  // !CONFIG_ANS
+
+#if CONFIG_VAR_TX
+static void pack_txb_tokens(vp10_writer *w, const TOKENEXTRA **tp,
+                            const TOKENEXTRA *const tok_end, MACROBLOCKD *xd,
+                            MB_MODE_INFO *mbmi, int plane,
+                            BLOCK_SIZE plane_bsize, vpx_bit_depth_t bit_depth,
+                            int block, int blk_row, int blk_col,
+                            TX_SIZE tx_size) {
+  const struct macroblockd_plane *const pd = &xd->plane[plane];
+  const BLOCK_SIZE bsize = txsize_to_bsize[tx_size];
+  const int tx_row = blk_row >> (1 - pd->subsampling_y);
+  const int tx_col = blk_col >> (1 - pd->subsampling_x);
+  TX_SIZE plane_tx_size;
+  int max_blocks_high = num_4x4_blocks_high_lookup[plane_bsize];
+  int max_blocks_wide = num_4x4_blocks_wide_lookup[plane_bsize];
+
+  if (xd->mb_to_bottom_edge < 0)
+    max_blocks_high += xd->mb_to_bottom_edge >> (5 + pd->subsampling_y);
+  if (xd->mb_to_right_edge < 0)
+    max_blocks_wide += xd->mb_to_right_edge >> (5 + pd->subsampling_x);
+
+  if (blk_row >= max_blocks_high || blk_col >= max_blocks_wide) return;
+
+  plane_tx_size = plane ? get_uv_tx_size_impl(
+                              mbmi->inter_tx_size[tx_row][tx_col], bsize, 0, 0)
+                        : mbmi->inter_tx_size[tx_row][tx_col];
+
+  if (tx_size == plane_tx_size) {
+    pack_mb_tokens(w, tp, tok_end, bit_depth, tx_size);
+  } else {
+    int bsl = b_width_log2_lookup[bsize];
+    int i;
+
+    assert(bsl > 0);
+    --bsl;
+
+    for (i = 0; i < 4; ++i) {
+      const int offsetr = blk_row + ((i >> 1) << bsl);
+      const int offsetc = blk_col + ((i & 0x01) << bsl);
+      int step = num_4x4_blocks_txsize_lookup[tx_size - 1];
+
+      if (offsetr >= max_blocks_high || offsetc >= max_blocks_wide) continue;
+
+      pack_txb_tokens(w, tp, tok_end, xd, mbmi, plane, plane_bsize, bit_depth,
+                      block + i * step, offsetr, offsetc, tx_size - 1);
+    }
+  }
+}
+#endif
+
+static void write_segment_id(vp10_writer *w, const struct segmentation *seg,
+                             const struct segmentation_probs *segp,
+                             int segment_id) {
+  if (seg->enabled && seg->update_map)
+    vp10_write_tree(w, vp10_segment_tree, segp->tree_probs, segment_id, 3, 0);
+}
+
+// This function encodes the reference frame
+static void write_ref_frames(const VP10_COMMON *cm, const MACROBLOCKD *xd,
+                             vp10_writer *w) {
+  const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
+  const int is_compound = has_second_ref(mbmi);
+  const int segment_id = mbmi->segment_id;
+
+  // If segment level coding of this signal is disabled...
+  // or the segment allows multiple reference frame options
+  if (segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME)) {
+    assert(!is_compound);
+    assert(mbmi->ref_frame[0] ==
+           get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME));
+  } else {
+    // does the feature use compound prediction or not
+    // (if not specified at the frame/segment level)
+    if (cm->reference_mode == REFERENCE_MODE_SELECT) {
+      vp10_write(w, is_compound, vp10_get_reference_mode_prob(cm, xd));
+    } else {
+      assert((!is_compound) == (cm->reference_mode == SINGLE_REFERENCE));
+    }
+
+    if (is_compound) {
+#if CONFIG_EXT_REFS
+      const int bit = (mbmi->ref_frame[0] == GOLDEN_FRAME ||
+                       mbmi->ref_frame[0] == LAST3_FRAME);
+      const int bit_bwd = mbmi->ref_frame[1] == ALTREF_FRAME;
+#else  // CONFIG_EXT_REFS
+      const int bit = mbmi->ref_frame[0] == GOLDEN_FRAME;
+#endif  // CONFIG_EXT_REFS
+
+      vp10_write(w, bit, vp10_get_pred_prob_comp_ref_p(cm, xd));
+
+#if CONFIG_EXT_REFS
+      if (!bit) {
+        const int bit1 = mbmi->ref_frame[0] == LAST_FRAME;
+        vp10_write(w, bit1, vp10_get_pred_prob_comp_ref_p1(cm, xd));
+      } else {
+        const int bit2 = mbmi->ref_frame[0] == GOLDEN_FRAME;
+        vp10_write(w, bit2, vp10_get_pred_prob_comp_ref_p2(cm, xd));
+      }
+      vp10_write(w, bit_bwd, vp10_get_pred_prob_comp_bwdref_p(cm, xd));
+#endif  // CONFIG_EXT_REFS
+    } else {
+#if CONFIG_EXT_REFS
+      const int bit0 = (mbmi->ref_frame[0] == ALTREF_FRAME ||
+                        mbmi->ref_frame[0] == BWDREF_FRAME);
+      vp10_write(w, bit0, vp10_get_pred_prob_single_ref_p1(cm, xd));
+
+      if (bit0) {
+        const int bit1 = mbmi->ref_frame[0] == ALTREF_FRAME;
+        vp10_write(w, bit1, vp10_get_pred_prob_single_ref_p2(cm, xd));
+      } else {
+        const int bit2 = (mbmi->ref_frame[0] == LAST3_FRAME ||
+                          mbmi->ref_frame[0] == GOLDEN_FRAME);
+        vp10_write(w, bit2, vp10_get_pred_prob_single_ref_p3(cm, xd));
+
+        if (!bit2) {
+          const int bit3 = mbmi->ref_frame[0] != LAST_FRAME;
+          vp10_write(w, bit3, vp10_get_pred_prob_single_ref_p4(cm, xd));
+        } else {
+          const int bit4 = mbmi->ref_frame[0] != LAST3_FRAME;
+          vp10_write(w, bit4, vp10_get_pred_prob_single_ref_p5(cm, xd));
+        }
+      }
+#else   // CONFIG_EXT_REFS
+      const int bit0 = mbmi->ref_frame[0] != LAST_FRAME;
+      vp10_write(w, bit0, vp10_get_pred_prob_single_ref_p1(cm, xd));
+
+      if (bit0) {
+        const int bit1 = mbmi->ref_frame[0] != GOLDEN_FRAME;
+        vp10_write(w, bit1, vp10_get_pred_prob_single_ref_p2(cm, xd));
+      }
+#endif  // CONFIG_EXT_REFS
+    }
+  }
+}
+
+#if CONFIG_EXT_INTRA
+static void write_ext_intra_mode_info(const VP10_COMMON *const cm,
+                                      const MB_MODE_INFO *const mbmi,
+                                      vp10_writer *w) {
+#if !ALLOW_FILTER_INTRA_MODES
+  return;
+#endif
+  if (mbmi->mode == DC_PRED && mbmi->palette_mode_info.palette_size[0] == 0) {
+    vp10_write(w, mbmi->ext_intra_mode_info.use_ext_intra_mode[0],
+               cm->fc->ext_intra_probs[0]);
+    if (mbmi->ext_intra_mode_info.use_ext_intra_mode[0]) {
+      EXT_INTRA_MODE mode = mbmi->ext_intra_mode_info.ext_intra_mode[0];
+      write_uniform(w, FILTER_INTRA_MODES, mode);
+    }
+  }
+
+  if (mbmi->uv_mode == DC_PRED &&
+      mbmi->palette_mode_info.palette_size[1] == 0) {
+    vp10_write(w, mbmi->ext_intra_mode_info.use_ext_intra_mode[1],
+               cm->fc->ext_intra_probs[1]);
+    if (mbmi->ext_intra_mode_info.use_ext_intra_mode[1]) {
+      EXT_INTRA_MODE mode = mbmi->ext_intra_mode_info.ext_intra_mode[1];
+      write_uniform(w, FILTER_INTRA_MODES, mode);
+    }
+  }
+}
+
+static void write_intra_angle_info(const VP10_COMMON *cm, const MACROBLOCKD *xd,
+                                   vp10_writer *w) {
+  const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
+  const BLOCK_SIZE bsize = mbmi->sb_type;
+  const int intra_filter_ctx = vp10_get_pred_context_intra_interp(xd);
+  int p_angle;
+
+  if (bsize < BLOCK_8X8) return;
+
+  if (mbmi->mode != DC_PRED && mbmi->mode != TM_PRED) {
+    write_uniform(w, 2 * MAX_ANGLE_DELTAS + 1,
+                  MAX_ANGLE_DELTAS + mbmi->angle_delta[0]);
+    p_angle = mode_to_angle_map[mbmi->mode] + mbmi->angle_delta[0] * ANGLE_STEP;
+    if (vp10_is_intra_filter_switchable(p_angle)) {
+      vp10_write_token(w, vp10_intra_filter_tree,
+                       cm->fc->intra_filter_probs[intra_filter_ctx],
+                       &intra_filter_encodings[mbmi->intra_filter]);
+    }
+  }
+
+  if (mbmi->uv_mode != DC_PRED && mbmi->uv_mode != TM_PRED) {
+    write_uniform(w, 2 * MAX_ANGLE_DELTAS + 1,
+                  MAX_ANGLE_DELTAS + mbmi->angle_delta[1]);
+  }
+}
+#endif  // CONFIG_EXT_INTRA
+
+static void write_switchable_interp_filter(VP10_COMP *cpi,
+                                           const MACROBLOCKD *xd,
+                                           vp10_writer *w) {
+  VP10_COMMON *const cm = &cpi->common;
+  const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
+#if CONFIG_DUAL_FILTER
+  int dir;
+#endif
+  if (cm->interp_filter == SWITCHABLE) {
+#if CONFIG_EXT_INTERP
+#if CONFIG_DUAL_FILTER
+    if (!vp10_is_interp_needed(xd)) {
+      assert(mbmi->interp_filter[0] == EIGHTTAP_REGULAR);
+      return;
+    }
+#else
+    if (!vp10_is_interp_needed(xd)) {
+#if CONFIG_DUAL_FILTER
+      assert(mbmi->interp_filter[0] == EIGHTTAP_REGULAR);
+      assert(mbmi->interp_filter[1] == EIGHTTAP_REGULAR);
+#else
+      assert(mbmi->interp_filter == EIGHTTAP_REGULAR);
+#endif
+      return;
+    }
+#endif  // CONFIG_DUAL_FILTER
+#endif  // CONFIG_EXT_INTERP
+#if CONFIG_DUAL_FILTER
+    for (dir = 0; dir < 2; ++dir) {
+      if (has_subpel_mv_component(xd->mi[0], xd, dir) ||
+          (mbmi->ref_frame[1] > INTRA_FRAME &&
+           has_subpel_mv_component(xd->mi[0], xd, dir + 2))) {
+        const int ctx = vp10_get_pred_context_switchable_interp(xd, dir);
+        vp10_write_token(
+            w, vp10_switchable_interp_tree, cm->fc->switchable_interp_prob[ctx],
+            &switchable_interp_encodings[mbmi->interp_filter[dir]]);
+        ++cpi->interp_filter_selected[0][mbmi->interp_filter[dir]];
+      }
+    }
+#else
+    {
+      const int ctx = vp10_get_pred_context_switchable_interp(xd);
+      vp10_write_token(w, vp10_switchable_interp_tree,
+                       cm->fc->switchable_interp_prob[ctx],
+                       &switchable_interp_encodings[mbmi->interp_filter]);
+      ++cpi->interp_filter_selected[0][mbmi->interp_filter];
+    }
+#endif
+  }
+}
+
+static void write_palette_mode_info(const VP10_COMMON *cm,
+                                    const MACROBLOCKD *xd,
+                                    const MODE_INFO *const mi, vp10_writer *w) {
+  const MB_MODE_INFO *const mbmi = &mi->mbmi;
+  const MODE_INFO *const above_mi = xd->above_mi;
+  const MODE_INFO *const left_mi = xd->left_mi;
+  const BLOCK_SIZE bsize = mbmi->sb_type;
+  const PALETTE_MODE_INFO *const pmi = &mbmi->palette_mode_info;
+  int palette_ctx = 0;
+  int n, i;
+
+  if (mbmi->mode == DC_PRED) {
+    n = pmi->palette_size[0];
+    if (above_mi)
+      palette_ctx += (above_mi->mbmi.palette_mode_info.palette_size[0] > 0);
+    if (left_mi)
+      palette_ctx += (left_mi->mbmi.palette_mode_info.palette_size[0] > 0);
+    vp10_write(
+        w, n > 0,
+        vp10_default_palette_y_mode_prob[bsize - BLOCK_8X8][palette_ctx]);
+    if (n > 0) {
+      vp10_write_token(w, vp10_palette_size_tree,
+                       vp10_default_palette_y_size_prob[bsize - BLOCK_8X8],
+                       &palette_size_encodings[n - 2]);
+      for (i = 0; i < n; ++i)
+        vp10_write_literal(w, pmi->palette_colors[i], cm->bit_depth);
+      write_uniform(w, n, pmi->palette_first_color_idx[0]);
+    }
+  }
+
+  if (mbmi->uv_mode == DC_PRED) {
+    n = pmi->palette_size[1];
+    vp10_write(w, n > 0,
+               vp10_default_palette_uv_mode_prob[pmi->palette_size[0] > 0]);
+    if (n > 0) {
+      vp10_write_token(w, vp10_palette_size_tree,
+                       vp10_default_palette_uv_size_prob[bsize - BLOCK_8X8],
+                       &palette_size_encodings[n - 2]);
+      for (i = 0; i < n; ++i) {
+        vp10_write_literal(w, pmi->palette_colors[PALETTE_MAX_SIZE + i],
+                           cm->bit_depth);
+        vp10_write_literal(w, pmi->palette_colors[2 * PALETTE_MAX_SIZE + i],
+                           cm->bit_depth);
+      }
+      write_uniform(w, n, pmi->palette_first_color_idx[1]);
+    }
+  }
+}
+
+static void pack_inter_mode_mvs(VP10_COMP *cpi, const MODE_INFO *mi,
+#if CONFIG_SUPERTX
+                                int supertx_enabled,
+#endif
+                                vp10_writer *w) {
+  VP10_COMMON *const cm = &cpi->common;
+#if !CONFIG_REF_MV
+  const nmv_context *nmvc = &cm->fc->nmvc;
+#endif
+  const MACROBLOCK *x = &cpi->td.mb;
+  const MACROBLOCKD *xd = &x->e_mbd;
+  const struct segmentation *const seg = &cm->seg;
+  const struct segmentation_probs *const segp = &cm->fc->seg;
+  const MB_MODE_INFO *const mbmi = &mi->mbmi;
+  const MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
+  const PREDICTION_MODE mode = mbmi->mode;
+  const int segment_id = mbmi->segment_id;
+  const BLOCK_SIZE bsize = mbmi->sb_type;
+  const int allow_hp = cm->allow_high_precision_mv;
+  const int is_inter = is_inter_block(mbmi);
+  const int is_compound = has_second_ref(mbmi);
+  int skip, ref;
+
+  if (seg->update_map) {
+    if (seg->temporal_update) {
+      const int pred_flag = mbmi->seg_id_predicted;
+      vpx_prob pred_prob = vp10_get_pred_prob_seg_id(segp, xd);
+      vp10_write(w, pred_flag, pred_prob);
+      if (!pred_flag) write_segment_id(w, seg, segp, segment_id);
+    } else {
+      write_segment_id(w, seg, segp, segment_id);
+    }
+  }
+
+#if CONFIG_SUPERTX
+  if (supertx_enabled)
+    skip = mbmi->skip;
+  else
+    skip = write_skip(cm, xd, segment_id, mi, w);
+#else
+  skip = write_skip(cm, xd, segment_id, mi, w);
+#endif  // CONFIG_SUPERTX
+
+#if CONFIG_SUPERTX
+  if (!supertx_enabled)
+#endif  // CONFIG_SUPERTX
+    if (!segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME))
+      vp10_write(w, is_inter, vp10_get_intra_inter_prob(cm, xd));
+
+  if (bsize >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT &&
+#if CONFIG_SUPERTX
+      !supertx_enabled &&
+#endif  // CONFIG_SUPERTX
+      !(is_inter && skip) && !xd->lossless[segment_id]) {
+#if CONFIG_VAR_TX
+    if (is_inter) {  // This implies skip flag is 0.
+      const TX_SIZE max_tx_size = max_txsize_lookup[bsize];
+      const int txb_size = txsize_to_bsize[max_tx_size];
+      const int bs = num_4x4_blocks_wide_lookup[txb_size];
+      const int width = num_4x4_blocks_wide_lookup[bsize];
+      const int height = num_4x4_blocks_high_lookup[bsize];
+      int idx, idy;
+      for (idy = 0; idy < height; idy += bs)
+        for (idx = 0; idx < width; idx += bs)
+          write_tx_size_vartx(cm, xd, mbmi, max_tx_size, idy, idx, w);
+    } else {
+      set_txfm_ctxs(mbmi->tx_size, xd->n8_w, xd->n8_h, xd);
+      write_selected_tx_size(cm, xd, w);
+    }
+  } else {
+    set_txfm_ctxs(mbmi->tx_size, xd->n8_w, xd->n8_h, xd);
+#else
+    write_selected_tx_size(cm, xd, w);
+#endif
+  }
+
+  if (!is_inter) {
+    if (bsize >= BLOCK_8X8) {
+      write_intra_mode(w, mode, cm->fc->y_mode_prob[size_group_lookup[bsize]]);
+    } else {
+      int idx, idy;
+      const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
+      const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
+      for (idy = 0; idy < 2; idy += num_4x4_h) {
+        for (idx = 0; idx < 2; idx += num_4x4_w) {
+          const PREDICTION_MODE b_mode = mi->bmi[idy * 2 + idx].as_mode;
+          write_intra_mode(w, b_mode, cm->fc->y_mode_prob[0]);
+        }
+      }
+    }
+    write_intra_mode(w, mbmi->uv_mode, cm->fc->uv_mode_prob[mode]);
+#if CONFIG_EXT_INTRA
+    write_intra_angle_info(cm, xd, w);
+#endif  // CONFIG_EXT_INTRA
+    if (bsize >= BLOCK_8X8 && cm->allow_screen_content_tools)
+      write_palette_mode_info(cm, xd, mi, w);
+#if CONFIG_EXT_INTRA
+    if (bsize >= BLOCK_8X8) write_ext_intra_mode_info(cm, mbmi, w);
+#endif  // CONFIG_EXT_INTRA
+  } else {
+    int16_t mode_ctx = mbmi_ext->mode_context[mbmi->ref_frame[0]];
+    write_ref_frames(cm, xd, w);
+
+#if CONFIG_REF_MV
+#if CONFIG_EXT_INTER
+    if (is_compound)
+      mode_ctx = mbmi_ext->compound_mode_context[mbmi->ref_frame[0]];
+    else
+#endif  // CONFIG_EXT_INTER
+      mode_ctx = vp10_mode_context_analyzer(mbmi_ext->mode_context,
+                                            mbmi->ref_frame, bsize, -1);
+#endif
+
+    // If segment skip is not enabled code the mode.
+    if (!segfeature_active(seg, segment_id, SEG_LVL_SKIP)) {
+      if (bsize >= BLOCK_8X8) {
+#if CONFIG_EXT_INTER
+        if (is_inter_compound_mode(mode))
+          write_inter_compound_mode(cm, w, mode, mode_ctx);
+        else if (is_inter_singleref_mode(mode))
+#endif  // CONFIG_EXT_INTER
+          write_inter_mode(cm, w, mode,
+#if CONFIG_REF_MV && CONFIG_EXT_INTER
+                           is_compound,
+#endif  // CONFIG_REF_MV && CONFIG_EXT_INTER
+                           mode_ctx);
+
+#if CONFIG_REF_MV
+        if (mode == NEARMV || mode == NEWMV)
+          write_drl_idx(cm, mbmi, mbmi_ext, w);
+#endif
+      }
+    }
+
+#if !CONFIG_EXT_INTERP && !CONFIG_DUAL_FILTER
+    write_switchable_interp_filter(cpi, xd, w);
+#endif  // !CONFIG_EXT_INTERP
+
+    if (bsize < BLOCK_8X8) {
+      const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
+      const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
+      int idx, idy;
+      for (idy = 0; idy < 2; idy += num_4x4_h) {
+        for (idx = 0; idx < 2; idx += num_4x4_w) {
+          const int j = idy * 2 + idx;
+          const PREDICTION_MODE b_mode = mi->bmi[j].as_mode;
+#if CONFIG_REF_MV
+#if CONFIG_EXT_INTER
+          if (!is_compound)
+#endif  // CONFIG_EXT_INTER
+            mode_ctx = vp10_mode_context_analyzer(mbmi_ext->mode_context,
+                                                  mbmi->ref_frame, bsize, j);
+#endif
+#if CONFIG_EXT_INTER
+          if (is_inter_compound_mode(b_mode))
+            write_inter_compound_mode(cm, w, b_mode, mode_ctx);
+          else if (is_inter_singleref_mode(b_mode))
+#endif  // CONFIG_EXT_INTER
+            write_inter_mode(cm, w, b_mode,
+#if CONFIG_REF_MV && CONFIG_EXT_INTER
+                             has_second_ref(mbmi),
+#endif  // CONFIG_REF_MV && CONFIG_EXT_INTER
+                             mode_ctx);
+
+#if CONFIG_EXT_INTER
+          if (b_mode == NEWMV || b_mode == NEWFROMNEARMV ||
+              b_mode == NEW_NEWMV) {
+#else
+          if (b_mode == NEWMV) {
+#endif  // CONFIG_EXT_INTER
+            for (ref = 0; ref < 1 + is_compound; ++ref) {
+#if CONFIG_REF_MV
+              int nmv_ctx =
+                  vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[ref]],
+                               mbmi_ext->ref_mv_stack[mbmi->ref_frame[ref]]);
+              const nmv_context *nmvc = &cm->fc->nmvc[nmv_ctx];
+#endif
+              vp10_encode_mv(cpi, w, &mi->bmi[j].as_mv[ref].as_mv,
+#if CONFIG_EXT_INTER
+                             &mi->bmi[j].ref_mv[ref].as_mv,
+#if CONFIG_REF_MV
+                             is_compound,
+#endif
+#else
+#if CONFIG_REF_MV
+                             &mi->bmi[j].pred_mv_s8[ref].as_mv, is_compound,
+#else
+                             &mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][0].as_mv,
+#endif  // CONFIG_REF_MV
+#endif  // CONFIG_EXT_INTER
+                             nmvc, allow_hp);
+            }
+          }
+#if CONFIG_EXT_INTER
+          else if (b_mode == NEAREST_NEWMV || b_mode == NEAR_NEWMV) {
+#if CONFIG_REF_MV
+            int nmv_ctx =
+                vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[1]],
+                             mbmi_ext->ref_mv_stack[mbmi->ref_frame[1]]);
+            const nmv_context *nmvc = &cm->fc->nmvc[nmv_ctx];
+#endif
+            vp10_encode_mv(cpi, w, &mi->bmi[j].as_mv[1].as_mv,
+                           &mi->bmi[j].ref_mv[1].as_mv,
+#if CONFIG_REF_MV
+                           is_compound,
+#endif
+                           nmvc, allow_hp);
+          } else if (b_mode == NEW_NEARESTMV || b_mode == NEW_NEARMV) {
+#if CONFIG_REF_MV
+            int nmv_ctx =
+                vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[0]],
+                             mbmi_ext->ref_mv_stack[mbmi->ref_frame[0]]);
+            const nmv_context *nmvc = &cm->fc->nmvc[nmv_ctx];
+#endif
+            vp10_encode_mv(cpi, w, &mi->bmi[j].as_mv[0].as_mv,
+                           &mi->bmi[j].ref_mv[0].as_mv,
+#if CONFIG_REF_MV
+                           is_compound,
+#endif
+                           nmvc, allow_hp);
+          }
+#endif  // CONFIG_EXT_INTER
+        }
+      }
+    } else {
+#if CONFIG_EXT_INTER
+      if (mode == NEWMV || mode == NEWFROMNEARMV || mode == NEW_NEWMV) {
+#else
+      if (mode == NEWMV) {
+#endif  // CONFIG_EXT_INTER
+        int_mv ref_mv;
+        for (ref = 0; ref < 1 + is_compound; ++ref) {
+#if CONFIG_REF_MV
+          int nmv_ctx =
+              vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[ref]],
+                           mbmi_ext->ref_mv_stack[mbmi->ref_frame[ref]]);
+          const nmv_context *nmvc = &cm->fc->nmvc[nmv_ctx];
+#endif
+          ref_mv = mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][0];
+#if CONFIG_EXT_INTER
+          if (mode == NEWFROMNEARMV)
+            vp10_encode_mv(cpi, w, &mbmi->mv[ref].as_mv,
+                           &mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][1].as_mv,
+#if CONFIG_REF_MV
+                           is_compound,
+#endif
+                           nmvc, allow_hp);
+          else
+#endif  // CONFIG_EXT_INTER
+            vp10_encode_mv(cpi, w, &mbmi->mv[ref].as_mv, &ref_mv.as_mv,
+#if CONFIG_REF_MV
+                           is_compound,
+#endif
+                           nmvc, allow_hp);
+        }
+#if CONFIG_EXT_INTER
+      } else if (mode == NEAREST_NEWMV || mode == NEAR_NEWMV) {
+#if CONFIG_REF_MV
+        int nmv_ctx = vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[1]],
+                                   mbmi_ext->ref_mv_stack[mbmi->ref_frame[1]]);
+        const nmv_context *nmvc = &cm->fc->nmvc[nmv_ctx];
+#endif
+        vp10_encode_mv(cpi, w, &mbmi->mv[1].as_mv,
+                       &mbmi_ext->ref_mvs[mbmi->ref_frame[1]][0].as_mv,
+#if CONFIG_REF_MV
+                       is_compound,
+#endif
+                       nmvc, allow_hp);
+      } else if (mode == NEW_NEARESTMV || mode == NEW_NEARMV) {
+#if CONFIG_REF_MV
+        int nmv_ctx = vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[0]],
+                                   mbmi_ext->ref_mv_stack[mbmi->ref_frame[0]]);
+        const nmv_context *nmvc = &cm->fc->nmvc[nmv_ctx];
+#endif
+        vp10_encode_mv(cpi, w, &mbmi->mv[0].as_mv,
+                       &mbmi_ext->ref_mvs[mbmi->ref_frame[0]][0].as_mv,
+#if CONFIG_REF_MV
+                       is_compound,
+#endif
+                       nmvc, allow_hp);
+#endif  // CONFIG_EXT_INTER
+      }
+    }
+
+#if CONFIG_EXT_INTER
+    if (cpi->common.reference_mode != COMPOUND_REFERENCE &&
+#if CONFIG_SUPERTX
+        !supertx_enabled &&
+#endif  // CONFIG_SUPERTX
+        is_interintra_allowed(mbmi)) {
+      const int interintra = mbmi->ref_frame[1] == INTRA_FRAME;
+      const int bsize_group = size_group_lookup[bsize];
+      vp10_write(w, interintra, cm->fc->interintra_prob[bsize_group]);
+      if (interintra) {
+        write_interintra_mode(w, mbmi->interintra_mode,
+                              cm->fc->interintra_mode_prob[bsize_group]);
+        if (is_interintra_wedge_used(bsize)) {
+          vp10_write(w, mbmi->use_wedge_interintra,
+                     cm->fc->wedge_interintra_prob[bsize]);
+          if (mbmi->use_wedge_interintra) {
+            vp10_write_literal(w, mbmi->interintra_wedge_index,
+                               get_wedge_bits_lookup(bsize));
+            assert(mbmi->interintra_wedge_sign == 0);
+          }
+        }
+      }
+    }
+#endif  // CONFIG_EXT_INTER
+
+#if CONFIG_OBMC || CONFIG_WARPED_MOTION
+#if CONFIG_SUPERTX
+    if (!supertx_enabled)
+#endif  // CONFIG_SUPERTX
+#if CONFIG_EXT_INTER
+      if (mbmi->ref_frame[1] != INTRA_FRAME)
+#endif  // CONFIG_EXT_INTER
+        if (is_motvar_allowed(mbmi)) {
+          // TODO(debargha): Might want to only emit this if SEG_LVL_SKIP
+          // is not active, and assume SIMPLE_TRANSLATION in the decoder if
+          // it is active.
+          assert(mbmi->motion_variation < MOTION_VARIATIONS);
+          vp10_write_token(w, vp10_motvar_tree, cm->fc->motvar_prob[bsize],
+                           &motvar_encodings[mbmi->motion_variation]);
+        }
+#endif  // CONFIG_OBMC || CONFIG_WARPED_MOTION
+
+#if CONFIG_EXT_INTER
+    if (cpi->common.reference_mode != SINGLE_REFERENCE &&
+        is_inter_compound_mode(mbmi->mode) &&
+#if CONFIG_OBMC
+        !(is_motvar_allowed(mbmi) &&
+          mbmi->motion_variation != SIMPLE_TRANSLATION) &&
+#endif  // CONFIG_OBMC
+        is_interinter_wedge_used(bsize)) {
+      vp10_write(w, mbmi->use_wedge_interinter,
+                 cm->fc->wedge_interinter_prob[bsize]);
+      if (mbmi->use_wedge_interinter) {
+        vp10_write_literal(w, mbmi->interinter_wedge_index,
+                           get_wedge_bits_lookup(bsize));
+        vp10_write_bit(w, mbmi->interinter_wedge_sign);
+      }
+    }
+#endif  // CONFIG_EXT_INTER
+
+#if CONFIG_EXT_INTERP || CONFIG_DUAL_FILTER
+    write_switchable_interp_filter(cpi, xd, w);
+#endif  // CONFIG_EXT_INTERP
+  }
+
+  if (!FIXED_TX_TYPE) {
+#if CONFIG_EXT_TX
+    if (get_ext_tx_types(mbmi->tx_size, bsize, is_inter) > 1 &&
+        cm->base_qindex > 0 && !mbmi->skip &&
+#if CONFIG_SUPERTX
+        !supertx_enabled &&
+#endif  // CONFIG_SUPERTX
+        !segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
+      int eset = get_ext_tx_set(mbmi->tx_size, bsize, is_inter);
+      if (is_inter) {
+        assert(ext_tx_used_inter[eset][mbmi->tx_type]);
+        if (eset > 0)
+          vp10_write_token(
+              w, vp10_ext_tx_inter_tree[eset],
+              cm->fc->inter_ext_tx_prob[eset][txsize_sqr_map[mbmi->tx_size]],
+              &ext_tx_inter_encodings[eset][mbmi->tx_type]);
+      } else if (ALLOW_INTRA_EXT_TX) {
+        if (eset > 0)
+          vp10_write_token(
+              w, vp10_ext_tx_intra_tree[eset],
+              cm->fc->intra_ext_tx_prob[eset][mbmi->tx_size][mbmi->mode],
+              &ext_tx_intra_encodings[eset][mbmi->tx_type]);
+      }
+    }
+#else
+    if (mbmi->tx_size < TX_32X32 && cm->base_qindex > 0 && !mbmi->skip &&
+#if CONFIG_SUPERTX
+        !supertx_enabled &&
+#endif  // CONFIG_SUPERTX
+        !segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
+      if (is_inter) {
+        vp10_write_token(w, vp10_ext_tx_tree,
+                         cm->fc->inter_ext_tx_prob[mbmi->tx_size],
+                         &ext_tx_encodings[mbmi->tx_type]);
+      } else {
+        vp10_write_token(
+            w, vp10_ext_tx_tree,
+            cm->fc->intra_ext_tx_prob
+                [mbmi->tx_size][intra_mode_to_tx_type_context[mbmi->mode]],
+            &ext_tx_encodings[mbmi->tx_type]);
+      }
+    } else {
+      if (!mbmi->skip) {
+#if CONFIG_SUPERTX
+        if (!supertx_enabled)
+#endif  // CONFIG_SUPERTX
+          assert(mbmi->tx_type == DCT_DCT);
+      }
+    }
+#endif  // CONFIG_EXT_TX
+  }
+}
+
+static void write_mb_modes_kf(const VP10_COMMON *cm, const MACROBLOCKD *xd,
+                              MODE_INFO **mi_8x8, vp10_writer *w) {
+  const struct segmentation *const seg = &cm->seg;
+  const struct segmentation_probs *const segp = &cm->fc->seg;
+  const MODE_INFO *const mi = mi_8x8[0];
+  const MODE_INFO *const above_mi = xd->above_mi;
+  const MODE_INFO *const left_mi = xd->left_mi;
+  const MB_MODE_INFO *const mbmi = &mi->mbmi;
+  const BLOCK_SIZE bsize = mbmi->sb_type;
+
+  if (seg->update_map) write_segment_id(w, seg, segp, mbmi->segment_id);
+
+  write_skip(cm, xd, mbmi->segment_id, mi, w);
+
+  if (bsize >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT &&
+      !xd->lossless[mbmi->segment_id])
+    write_selected_tx_size(cm, xd, w);
+
+  if (bsize >= BLOCK_8X8) {
+    write_intra_mode(w, mbmi->mode,
+                     get_y_mode_probs(cm, mi, above_mi, left_mi, 0));
+  } else {
+    const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
+    const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
+    int idx, idy;
+
+    for (idy = 0; idy < 2; idy += num_4x4_h) {
+      for (idx = 0; idx < 2; idx += num_4x4_w) {
+        const int block = idy * 2 + idx;
+        write_intra_mode(w, mi->bmi[block].as_mode,
+                         get_y_mode_probs(cm, mi, above_mi, left_mi, block));
+      }
+    }
+  }
+
+  write_intra_mode(w, mbmi->uv_mode, cm->fc->uv_mode_prob[mbmi->mode]);
+#if CONFIG_EXT_INTRA
+  write_intra_angle_info(cm, xd, w);
+#endif  // CONFIG_EXT_INTRA
+  if (bsize >= BLOCK_8X8 && cm->allow_screen_content_tools)
+    write_palette_mode_info(cm, xd, mi, w);
+#if CONFIG_EXT_INTRA
+  if (bsize >= BLOCK_8X8) write_ext_intra_mode_info(cm, mbmi, w);
+#endif  // CONFIG_EXT_INTRA
+
+  if (!FIXED_TX_TYPE) {
+#if CONFIG_EXT_TX
+    if (get_ext_tx_types(mbmi->tx_size, bsize, 0) > 1 && cm->base_qindex > 0 &&
+        !mbmi->skip &&
+        !segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP) &&
+        ALLOW_INTRA_EXT_TX) {
+      int eset = get_ext_tx_set(mbmi->tx_size, bsize, 0);
+      if (eset > 0)
+        vp10_write_token(
+            w, vp10_ext_tx_intra_tree[eset],
+            cm->fc->intra_ext_tx_prob[eset][mbmi->tx_size][mbmi->mode],
+            &ext_tx_intra_encodings[eset][mbmi->tx_type]);
+    }
+#else
+    if (mbmi->tx_size < TX_32X32 && cm->base_qindex > 0 && !mbmi->skip &&
+        !segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
+      vp10_write_token(
+          w, vp10_ext_tx_tree,
+          cm->fc->intra_ext_tx_prob[mbmi->tx_size]
+                                   [intra_mode_to_tx_type_context[mbmi->mode]],
+          &ext_tx_encodings[mbmi->tx_type]);
+    }
+#endif  // CONFIG_EXT_TX
+  }
+}
+
+#if CONFIG_SUPERTX
+#define write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled, \
+                              mi_row, mi_col)                              \
+  write_modes_b(cpi, tile, w, tok, tok_end, supertx_enabled, mi_row, mi_col)
+#else
+#define write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled, \
+                              mi_row, mi_col)                              \
+  write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col)
+#endif  // CONFIG_ANS && CONFIG_SUPERTX
+
+static void write_modes_b(VP10_COMP *cpi, const TileInfo *const tile,
+                          vp10_writer *w, const TOKENEXTRA **tok,
+                          const TOKENEXTRA *const tok_end,
+#if CONFIG_SUPERTX
+                          int supertx_enabled,
+#endif
+                          int mi_row, int mi_col) {
+  VP10_COMMON *const cm = &cpi->common;
+  MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
+  MODE_INFO *m;
+  int plane;
+  int bh, bw;
+#if CONFIG_ANS
+  (void)tok;
+  (void)tok_end;
+  (void)plane;
+#endif  // !CONFIG_ANS
+
+  xd->mi = cm->mi_grid_visible + (mi_row * cm->mi_stride + mi_col);
+  m = xd->mi[0];
+
+  assert(m->mbmi.sb_type <= cm->sb_size);
+
+  bh = num_8x8_blocks_high_lookup[m->mbmi.sb_type];
+  bw = num_8x8_blocks_wide_lookup[m->mbmi.sb_type];
+
+  cpi->td.mb.mbmi_ext = cpi->mbmi_ext_base + (mi_row * cm->mi_cols + mi_col);
+
+  set_mi_row_col(xd, tile, mi_row, bh, mi_col, bw, cm->mi_rows, cm->mi_cols);
+  if (frame_is_intra_only(cm)) {
+    write_mb_modes_kf(cm, xd, xd->mi, w);
+  } else {
+#if CONFIG_VAR_TX
+    xd->above_txfm_context = cm->above_txfm_context + mi_col;
+    xd->left_txfm_context =
+        xd->left_txfm_context_buffer + (mi_row & MAX_MIB_MASK);
+#endif
+#if CONFIG_EXT_INTERP
+    // vp10_is_interp_needed needs the ref frame buffers set up to look
+    // up if they are scaled. vp10_is_interp_needed is in turn needed by
+    // write_switchable_interp_filter, which is called by pack_inter_mode_mvs.
+    set_ref_ptrs(cm, xd, m->mbmi.ref_frame[0], m->mbmi.ref_frame[1]);
+#endif  // CONFIG_EXT_INTERP
+#if 0
+    // NOTE(zoeliu): For debug
+    if (cm->current_video_frame == FRAME_TO_CHECK && cm->show_frame == 1) {
+      const PREDICTION_MODE mode = m->mbmi.mode;
+      const int segment_id = m->mbmi.segment_id;
+      const BLOCK_SIZE bsize = m->mbmi.sb_type;
+
+      // For sub8x8, simply dump out the first sub8x8 block info
+      const PREDICTION_MODE b_mode =
+          (bsize < BLOCK_8X8) ? m->bmi[0].as_mode : -1;
+      const int mv_x = (bsize < BLOCK_8X8) ?
+          m->bmi[0].as_mv[0].as_mv.row : m->mbmi.mv[0].as_mv.row;
+      const int mv_y = (bsize < BLOCK_8X8) ?
+          m->bmi[0].as_mv[0].as_mv.col : m->mbmi.mv[0].as_mv.col;
+
+      printf("Before pack_inter_mode_mvs(): "
+             "Frame=%d, (mi_row,mi_col)=(%d,%d), "
+             "mode=%d, segment_id=%d, bsize=%d, b_mode=%d, "
+             "mv[0]=(%d, %d), ref[0]=%d, ref[1]=%d\n",
+             cm->current_video_frame, mi_row, mi_col,
+             mode, segment_id, bsize, b_mode, mv_x, mv_y,
+             m->mbmi.ref_frame[0], m->mbmi.ref_frame[1]);
+    }
+#endif  // 0
+    pack_inter_mode_mvs(cpi, m,
+#if CONFIG_SUPERTX
+                        supertx_enabled,
+#endif
+                        w);
+  }
+
+  for (plane = 0; plane <= 1; ++plane) {
+    if (m->mbmi.palette_mode_info.palette_size[plane] > 0) {
+      const int rows = (4 * num_4x4_blocks_high_lookup[m->mbmi.sb_type]) >>
+                       (xd->plane[plane].subsampling_y);
+      const int cols = (4 * num_4x4_blocks_wide_lookup[m->mbmi.sb_type]) >>
+                       (xd->plane[plane].subsampling_x);
+      assert(*tok < tok_end);
+      pack_palette_tokens(w, tok, m->mbmi.palette_mode_info.palette_size[plane],
+                          rows * cols - 1);
+      assert(*tok < tok_end + m->mbmi.skip);
+    }
+  }
+
+#if CONFIG_SUPERTX
+  if (supertx_enabled) return;
+#endif  // CONFIG_SUPERTX
+
+  if (!m->mbmi.skip) {
+    assert(*tok < tok_end);
+    for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
+#if CONFIG_VAR_TX
+      const struct macroblockd_plane *const pd = &xd->plane[plane];
+      MB_MODE_INFO *mbmi = &m->mbmi;
+      BLOCK_SIZE bsize = mbmi->sb_type;
+      const BLOCK_SIZE plane_bsize =
+          get_plane_block_size(VPXMAX(bsize, BLOCK_8X8), pd);
+
+      const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize];
+      const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize];
+      int row, col;
+#if CONFIG_EXT_TX && CONFIG_RECT_TX
+      TX_SIZE tx_size =
+          plane ? get_uv_tx_size(mbmi, &xd->plane[plane]) : mbmi->tx_size;
+
+      if (is_inter_block(mbmi) && tx_size < TX_SIZES) {
+#else
+      if (is_inter_block(mbmi)) {
+#endif
+        const TX_SIZE max_tx_size = max_txsize_lookup[plane_bsize];
+        const BLOCK_SIZE txb_size = txsize_to_bsize[max_tx_size];
+        int bw = num_4x4_blocks_wide_lookup[txb_size];
+        int block = 0;
+        const int step = num_4x4_blocks_txsize_lookup[max_tx_size];
+        for (row = 0; row < num_4x4_h; row += bw) {
+          for (col = 0; col < num_4x4_w; col += bw) {
+            pack_txb_tokens(w, tok, tok_end, xd, mbmi, plane, plane_bsize,
+                            cm->bit_depth, block, row, col, max_tx_size);
+            block += step;
+          }
+        }
+      } else {
+        TX_SIZE tx = plane ? get_uv_tx_size(&m->mbmi, &xd->plane[plane])
+                           : m->mbmi.tx_size;
+        BLOCK_SIZE txb_size = txsize_to_bsize[tx];
+        int bw = num_4x4_blocks_wide_lookup[txb_size];
+        int bh = num_4x4_blocks_high_lookup[txb_size];
+
+        for (row = 0; row < num_4x4_h; row += bh)
+          for (col = 0; col < num_4x4_w; col += bw)
+            pack_mb_tokens(w, tok, tok_end, cm->bit_depth, tx);
+      }
+#else
+      TX_SIZE tx =
+          plane ? get_uv_tx_size(&m->mbmi, &xd->plane[plane]) : m->mbmi.tx_size;
+      pack_mb_tokens(w, tok, tok_end, cm->bit_depth, tx);
+#endif  // CONFIG_VAR_TX
+      assert(*tok < tok_end && (*tok)->token == EOSB_TOKEN);
+      (*tok)++;
+    }
+  }
+}
+
+static void write_partition(const VP10_COMMON *const cm,
+                            const MACROBLOCKD *const xd, int hbs, int mi_row,
+                            int mi_col, PARTITION_TYPE p, BLOCK_SIZE bsize,
+                            vp10_writer *w) {
+  const int ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
+  const vpx_prob *const probs = cm->fc->partition_prob[ctx];
+  const int has_rows = (mi_row + hbs) < cm->mi_rows;
+  const int has_cols = (mi_col + hbs) < cm->mi_cols;
+
+  if (has_rows && has_cols) {
+#if CONFIG_EXT_PARTITION_TYPES
+    if (bsize <= BLOCK_8X8)
+      vp10_write_token(w, vp10_partition_tree, probs, &partition_encodings[p]);
+    else
+      vp10_write_token(w, vp10_ext_partition_tree, probs,
+                       &ext_partition_encodings[p]);
+#else
+    vp10_write_token(w, vp10_partition_tree, probs, &partition_encodings[p]);
+#endif  // CONFIG_EXT_PARTITION_TYPES
+  } else if (!has_rows && has_cols) {
+    assert(p == PARTITION_SPLIT || p == PARTITION_HORZ);
+    vp10_write(w, p == PARTITION_SPLIT, probs[1]);
+  } else if (has_rows && !has_cols) {
+    assert(p == PARTITION_SPLIT || p == PARTITION_VERT);
+    vp10_write(w, p == PARTITION_SPLIT, probs[2]);
+  } else {
+    assert(p == PARTITION_SPLIT);
+  }
+}
+
+#if CONFIG_SUPERTX
+#define write_modes_sb_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,   \
+                               mi_row, mi_col, bsize)                         \
+  write_modes_sb(cpi, tile, w, tok, tok_end, supertx_enabled, mi_row, mi_col, \
+                 bsize)
+#else
+#define write_modes_sb_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled, \
+                               mi_row, mi_col, bsize)                       \
+  write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col, bsize)
+#endif  // CONFIG_ANS && CONFIG_SUPERTX
+
+static void write_modes_sb(VP10_COMP *const cpi, const TileInfo *const tile,
+                           vp10_writer *const w, const TOKENEXTRA **tok,
+                           const TOKENEXTRA *const tok_end,
+#if CONFIG_SUPERTX
+                           int supertx_enabled,
+#endif
+                           int mi_row, int mi_col, BLOCK_SIZE bsize) {
+  const VP10_COMMON *const cm = &cpi->common;
+  MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
+  const int hbs = num_8x8_blocks_wide_lookup[bsize] / 2;
+  const PARTITION_TYPE partition = get_partition(cm, mi_row, mi_col, bsize);
+  const BLOCK_SIZE subsize = get_subsize(bsize, partition);
+#if CONFIG_SUPERTX
+  const int mi_offset = mi_row * cm->mi_stride + mi_col;
+  MB_MODE_INFO *mbmi;
+  const int pack_token = !supertx_enabled;
+  TX_SIZE supertx_size;
+  int plane;
+#endif
+
+  if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
+
+  write_partition(cm, xd, hbs, mi_row, mi_col, partition, bsize, w);
+#if CONFIG_SUPERTX
+  mbmi = &cm->mi_grid_visible[mi_offset]->mbmi;
+  xd->mi = cm->mi_grid_visible + mi_offset;
+  set_mi_row_col(xd, tile, mi_row, num_8x8_blocks_high_lookup[bsize], mi_col,
+                 num_8x8_blocks_wide_lookup[bsize], cm->mi_rows, cm->mi_cols);
+  if (!supertx_enabled && !frame_is_intra_only(cm) &&
+      partition != PARTITION_NONE && bsize <= MAX_SUPERTX_BLOCK_SIZE &&
+      !xd->lossless[0]) {
+    vpx_prob prob;
+    supertx_size = max_txsize_lookup[bsize];
+    prob = cm->fc->supertx_prob[partition_supertx_context_lookup[partition]]
+                               [supertx_size];
+    supertx_enabled = (xd->mi[0]->mbmi.tx_size == supertx_size);
+    vp10_write(w, supertx_enabled, prob);
+  }
+#endif  // CONFIG_SUPERTX
+  if (subsize < BLOCK_8X8) {
+    write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled, mi_row,
+                          mi_col);
+  } else {
+    switch (partition) {
+      case PARTITION_NONE:
+        write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
+                              mi_row, mi_col);
+        break;
+      case PARTITION_HORZ:
+        write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
+                              mi_row, mi_col);
+        if (mi_row + hbs < cm->mi_rows)
+          write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
+                                mi_row + hbs, mi_col);
+        break;
+      case PARTITION_VERT:
+        write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
+                              mi_row, mi_col);
+        if (mi_col + hbs < cm->mi_cols)
+          write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
+                                mi_row, mi_col + hbs);
+        break;
+      case PARTITION_SPLIT:
+        write_modes_sb_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
+                               mi_row, mi_col, subsize);
+        write_modes_sb_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
+                               mi_row, mi_col + hbs, subsize);
+        write_modes_sb_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
+                               mi_row + hbs, mi_col, subsize);
+        write_modes_sb_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
+                               mi_row + hbs, mi_col + hbs, subsize);
+        break;
+#if CONFIG_EXT_PARTITION_TYPES
+      case PARTITION_HORZ_A:
+        write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
+                              mi_row, mi_col);
+        write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
+                              mi_row, mi_col + hbs);
+        write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
+                              mi_row + hbs, mi_col);
+        break;
+      case PARTITION_HORZ_B:
+        write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
+                              mi_row, mi_col);
+        write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
+                              mi_row + hbs, mi_col);
+        write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
+                              mi_row + hbs, mi_col + hbs);
+        break;
+      case PARTITION_VERT_A:
+        write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
+                              mi_row, mi_col);
+        write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
+                              mi_row + hbs, mi_col);
+        write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
+                              mi_row, mi_col + hbs);
+        break;
+      case PARTITION_VERT_B:
+        write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
+                              mi_row, mi_col);
+        write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
+                              mi_row, mi_col + hbs);
+        write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled,
+                              mi_row + hbs, mi_col + hbs);
+        break;
+#endif  // CONFIG_EXT_PARTITION_TYPES
+      default: assert(0);
+    }
+  }
+#if CONFIG_SUPERTX
+  if (partition != PARTITION_NONE && supertx_enabled && pack_token) {
+    int skip;
+    xd->mi = cm->mi_grid_visible + mi_offset;
+    supertx_size = mbmi->tx_size;
+    set_mi_row_col(xd, tile, mi_row, num_8x8_blocks_high_lookup[bsize], mi_col,
+                   num_8x8_blocks_wide_lookup[bsize], cm->mi_rows, cm->mi_cols);
+
+    assert(IMPLIES(!cm->seg.enabled, mbmi->segment_id_supertx == 0));
+    assert(mbmi->segment_id_supertx < MAX_SEGMENTS);
+
+    skip = write_skip(cm, xd, mbmi->segment_id_supertx, xd->mi[0], w);
+#if CONFIG_EXT_TX
+    if (get_ext_tx_types(supertx_size, bsize, 1) > 1 && !skip) {
+      int eset = get_ext_tx_set(supertx_size, bsize, 1);
+      if (eset > 0) {
+        vp10_write_token(w, vp10_ext_tx_inter_tree[eset],
+                         cm->fc->inter_ext_tx_prob[eset][supertx_size],
+                         &ext_tx_inter_encodings[eset][mbmi->tx_type]);
+      }
+    }
+#else
+    if (supertx_size < TX_32X32 && !skip) {
+      vp10_write_token(w, vp10_ext_tx_tree,
+                       cm->fc->inter_ext_tx_prob[supertx_size],
+                       &ext_tx_encodings[mbmi->tx_type]);
+    }
+#endif  // CONFIG_EXT_TX
+
+    if (!skip) {
+      assert(*tok < tok_end);
+      for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
+        const int mbmi_txb_size = txsize_to_bsize[mbmi->tx_size];
+        const int num_4x4_w = num_4x4_blocks_wide_lookup[mbmi_txb_size];
+        const int num_4x4_h = num_4x4_blocks_high_lookup[mbmi_txb_size];
+        int row, col;
+        TX_SIZE tx =
+            plane ? get_uv_tx_size(mbmi, &xd->plane[plane]) : mbmi->tx_size;
+        BLOCK_SIZE txb_size = txsize_to_bsize[tx];
+        int bw = num_4x4_blocks_wide_lookup[txb_size];
+
+        for (row = 0; row < num_4x4_h; row += bw)
+          for (col = 0; col < num_4x4_w; col += bw)
+            pack_mb_tokens(w, tok, tok_end, cm->bit_depth, tx);
+        assert(*tok < tok_end && (*tok)->token == EOSB_TOKEN);
+        (*tok)++;
+      }
+    }
+  }
+#endif  // CONFIG_SUPERTX
+
+// update partition context
+#if CONFIG_EXT_PARTITION_TYPES
+  update_ext_partition_context(xd, mi_row, mi_col, subsize, bsize, partition);
+#else
+  if (bsize >= BLOCK_8X8 &&
+      (bsize == BLOCK_8X8 || partition != PARTITION_SPLIT))
+    update_partition_context(xd, mi_row, mi_col, subsize, bsize);
+
+#if DERING_REFINEMENT
+  if (bsize == BLOCK_64X64 && cm->dering_level != 0 &&
+      !sb_all_skip(cm, mi_row, mi_col)) {
+    vpx_write_literal(
+        w,
+        cm->mi_grid_visible[mi_row * cm->mi_stride + mi_col]->mbmi.dering_gain,
+        DERING_REFINEMENT_BITS);
+  }
+#endif
+#endif  // CONFIG_EXT_PARTITION_TYPES
+}
+
+static void write_modes(VP10_COMP *const cpi, const TileInfo *const tile,
+                        vp10_writer *const w, const TOKENEXTRA **tok,
+                        const TOKENEXTRA *const tok_end) {
+  VP10_COMMON *const cm = &cpi->common;
+  MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
+  const int mi_row_start = tile->mi_row_start;
+  const int mi_row_end = tile->mi_row_end;
+  const int mi_col_start = tile->mi_col_start;
+  const int mi_col_end = tile->mi_col_end;
+  int mi_row, mi_col;
+
+  vp10_zero_above_context(cm, mi_col_start, mi_col_end);
+
+  for (mi_row = mi_row_start; mi_row < mi_row_end; mi_row += cm->mib_size) {
+    vp10_zero_left_context(xd);
+
+    for (mi_col = mi_col_start; mi_col < mi_col_end; mi_col += cm->mib_size) {
+      write_modes_sb_wrapper(cpi, tile, w, tok, tok_end, 0, mi_row, mi_col,
+                             cm->sb_size);
+    }
+  }
+}
+
+static void build_tree_distribution(VP10_COMP *cpi, TX_SIZE tx_size,
+                                    vp10_coeff_stats *coef_branch_ct,
+                                    vp10_coeff_probs_model *coef_probs) {
+  vp10_coeff_count *coef_counts = cpi->td.rd_counts.coef_counts[tx_size];
+  unsigned int(*eob_branch_ct)[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS] =
+      cpi->common.counts.eob_branch[tx_size];
+  int i, j, k, l, m;
+
+  for (i = 0; i < PLANE_TYPES; ++i) {
+    for (j = 0; j < REF_TYPES; ++j) {
+      for (k = 0; k < COEF_BANDS; ++k) {
+        for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
+          vp10_tree_probs_from_distribution(vp10_coef_tree,
+                                            coef_branch_ct[i][j][k][l],
+                                            coef_counts[i][j][k][l]);
+          coef_branch_ct[i][j][k][l][0][1] =
+              eob_branch_ct[i][j][k][l] - coef_branch_ct[i][j][k][l][0][0];
+          for (m = 0; m < UNCONSTRAINED_NODES; ++m)
+            coef_probs[i][j][k][l][m] =
+                get_binary_prob(coef_branch_ct[i][j][k][l][m][0],
+                                coef_branch_ct[i][j][k][l][m][1]);
+        }
+      }
+    }
+  }
+}
+
+static void update_coef_probs_common(vp10_writer *const bc, VP10_COMP *cpi,
+                                     TX_SIZE tx_size,
+                                     vp10_coeff_stats *frame_branch_ct,
+                                     vp10_coeff_probs_model *new_coef_probs) {
+  vp10_coeff_probs_model *old_coef_probs = cpi->common.fc->coef_probs[tx_size];
+  const vpx_prob upd = DIFF_UPDATE_PROB;
+  const int entropy_nodes_update = UNCONSTRAINED_NODES;
+  int i, j, k, l, t;
+  int stepsize = cpi->sf.coeff_prob_appx_step;
+
+  switch (cpi->sf.use_fast_coef_updates) {
+    case TWO_LOOP: {
+      /* dry run to see if there is any update at all needed */
+      int savings = 0;
+      int update[2] = { 0, 0 };
+      for (i = 0; i < PLANE_TYPES; ++i) {
+        for (j = 0; j < REF_TYPES; ++j) {
+          for (k = 0; k < COEF_BANDS; ++k) {
+            for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
+              for (t = 0; t < entropy_nodes_update; ++t) {
+                vpx_prob newp = new_coef_probs[i][j][k][l][t];
+                const vpx_prob oldp = old_coef_probs[i][j][k][l][t];
+                int s;
+                int u = 0;
+                if (t == PIVOT_NODE)
+                  s = vp10_prob_diff_update_savings_search_model(
+                      frame_branch_ct[i][j][k][l][0],
+                      old_coef_probs[i][j][k][l], &newp, upd, stepsize);
+                else
+                  s = vp10_prob_diff_update_savings_search(
+                      frame_branch_ct[i][j][k][l][t], oldp, &newp, upd);
+                if (s > 0 && newp != oldp) u = 1;
+                if (u)
+                  savings += s - (int)(vp10_cost_zero(upd));
+                else
+                  savings -= (int)(vp10_cost_zero(upd));
+                update[u]++;
+              }
+            }
+          }
+        }
+      }
+
+      /* Is coef updated at all */
+      if (update[1] == 0 || savings < 0) {
+        vp10_write_bit(bc, 0);
+        return;
+      }
+      vp10_write_bit(bc, 1);
+      for (i = 0; i < PLANE_TYPES; ++i) {
+        for (j = 0; j < REF_TYPES; ++j) {
+          for (k = 0; k < COEF_BANDS; ++k) {
+            for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
+              // calc probs and branch cts for this frame only
+              for (t = 0; t < entropy_nodes_update; ++t) {
+                vpx_prob newp = new_coef_probs[i][j][k][l][t];
+                vpx_prob *oldp = old_coef_probs[i][j][k][l] + t;
+                const vpx_prob upd = DIFF_UPDATE_PROB;
+                int s;
+                int u = 0;
+                if (t == PIVOT_NODE)
+                  s = vp10_prob_diff_update_savings_search_model(
+                      frame_branch_ct[i][j][k][l][0],
+                      old_coef_probs[i][j][k][l], &newp, upd, stepsize);
+                else
+                  s = vp10_prob_diff_update_savings_search(
+                      frame_branch_ct[i][j][k][l][t], *oldp, &newp, upd);
+                if (s > 0 && newp != *oldp) u = 1;
+                vp10_write(bc, u, upd);
+                if (u) {
+                  /* send/use new probability */
+                  vp10_write_prob_diff_update(bc, newp, *oldp);
+                  *oldp = newp;
+                }
+              }
+            }
+          }
+        }
+      }
+      return;
+    }
+
+    case ONE_LOOP_REDUCED: {
+      int updates = 0;
+      int noupdates_before_first = 0;
+      for (i = 0; i < PLANE_TYPES; ++i) {
+        for (j = 0; j < REF_TYPES; ++j) {
+          for (k = 0; k < COEF_BANDS; ++k) {
+            for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
+              // calc probs and branch cts for this frame only
+              for (t = 0; t < entropy_nodes_update; ++t) {
+                vpx_prob newp = new_coef_probs[i][j][k][l][t];
+                vpx_prob *oldp = old_coef_probs[i][j][k][l] + t;
+                int s;
+                int u = 0;
+
+                if (t == PIVOT_NODE) {
+                  s = vp10_prob_diff_update_savings_search_model(
+                      frame_branch_ct[i][j][k][l][0],
+                      old_coef_probs[i][j][k][l], &newp, upd, stepsize);
+                } else {
+                  s = vp10_prob_diff_update_savings_search(
+                      frame_branch_ct[i][j][k][l][t], *oldp, &newp, upd);
+                }
+
+                if (s > 0 && newp != *oldp) u = 1;
+                updates += u;
+                if (u == 0 && updates == 0) {
+                  noupdates_before_first++;
+                  continue;
+                }
+                if (u == 1 && updates == 1) {
+                  int v;
+                  // first update
+                  vp10_write_bit(bc, 1);
+                  for (v = 0; v < noupdates_before_first; ++v)
+                    vp10_write(bc, 0, upd);
+                }
+                vp10_write(bc, u, upd);
+                if (u) {
+                  /* send/use new probability */
+                  vp10_write_prob_diff_update(bc, newp, *oldp);
+                  *oldp = newp;
+                }
+              }
+            }
+          }
+        }
+      }
+      if (updates == 0) {
+        vp10_write_bit(bc, 0);  // no updates
+      }
+      return;
+    }
+    default: assert(0);
+  }
+}
+
+#if CONFIG_ENTROPY
+// Calculate the token counts between subsequent subframe updates.
+static void get_coef_counts_diff(
+    VP10_COMP *cpi, int index,
+    vp10_coeff_count coef_counts[TX_SIZES][PLANE_TYPES],
+    unsigned int eob_counts[TX_SIZES][PLANE_TYPES][REF_TYPES][COEF_BANDS]
+                           [COEFF_CONTEXTS]) {
+  int i, j, k, l, m, tx_size, val;
+  const int max_idx = cpi->common.coef_probs_update_idx;
+  const TX_MODE tx_mode = cpi->common.tx_mode;
+  const TX_SIZE max_tx_size = tx_mode_to_biggest_tx_size[tx_mode];
+  const SUBFRAME_STATS *subframe_stats = &cpi->subframe_stats;
+
+  assert(max_idx < COEF_PROBS_BUFS);
+
+  for (tx_size = TX_4X4; tx_size <= max_tx_size; ++tx_size)
+    for (i = 0; i < PLANE_TYPES; ++i)
+      for (j = 0; j < REF_TYPES; ++j)
+        for (k = 0; k < COEF_BANDS; ++k)
+          for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
+            if (index == max_idx) {
+              val =
+                  cpi->common.counts.eob_branch[tx_size][i][j][k][l] -
+                  subframe_stats->eob_counts_buf[max_idx][tx_size][i][j][k][l];
+            } else {
+              val = subframe_stats
+                        ->eob_counts_buf[index + 1][tx_size][i][j][k][l] -
+                    subframe_stats->eob_counts_buf[index][tx_size][i][j][k][l];
+            }
+            assert(val >= 0);
+            eob_counts[tx_size][i][j][k][l] = val;
+
+            for (m = 0; m < ENTROPY_TOKENS; ++m) {
+              if (index == max_idx) {
+                val = cpi->td.rd_counts.coef_counts[tx_size][i][j][k][l][m] -
+                      subframe_stats
+                          ->coef_counts_buf[max_idx][tx_size][i][j][k][l][m];
+              } else {
+                val = subframe_stats
+                          ->coef_counts_buf[index + 1][tx_size][i][j][k][l][m] -
+                      subframe_stats
+                          ->coef_counts_buf[index][tx_size][i][j][k][l][m];
+              }
+              assert(val >= 0);
+              coef_counts[tx_size][i][j][k][l][m] = val;
+            }
+          }
+}
+
+static void update_coef_probs_subframe(
+    vp10_writer *const bc, VP10_COMP *cpi, TX_SIZE tx_size,
+    vp10_coeff_stats branch_ct[COEF_PROBS_BUFS][TX_SIZES][PLANE_TYPES],
+    vp10_coeff_probs_model *new_coef_probs) {
+  vp10_coeff_probs_model *old_coef_probs = cpi->common.fc->coef_probs[tx_size];
+  const vpx_prob upd = DIFF_UPDATE_PROB;
+  const int entropy_nodes_update = UNCONSTRAINED_NODES;
+  int i, j, k, l, t;
+  int stepsize = cpi->sf.coeff_prob_appx_step;
+  const int max_idx = cpi->common.coef_probs_update_idx;
+  int idx;
+  unsigned int this_branch_ct[ENTROPY_NODES][COEF_PROBS_BUFS][2];
+
+  switch (cpi->sf.use_fast_coef_updates) {
+    case TWO_LOOP: {
+      /* dry run to see if there is any update at all needed */
+      int savings = 0;
+      int update[2] = { 0, 0 };
+      for (i = 0; i < PLANE_TYPES; ++i) {
+        for (j = 0; j < REF_TYPES; ++j) {
+          for (k = 0; k < COEF_BANDS; ++k) {
+            for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
+              for (t = 0; t < ENTROPY_NODES; ++t) {
+                for (idx = 0; idx <= max_idx; ++idx) {
+                  memcpy(this_branch_ct[t][idx],
+                         branch_ct[idx][tx_size][i][j][k][l][t],
+                         2 * sizeof(this_branch_ct[t][idx][0]));
+                }
+              }
+              for (t = 0; t < entropy_nodes_update; ++t) {
+                vpx_prob newp = new_coef_probs[i][j][k][l][t];
+                const vpx_prob oldp = old_coef_probs[i][j][k][l][t];
+                int s, u = 0;
+
+                if (t == PIVOT_NODE)
+                  s = vp10_prob_update_search_model_subframe(
+                      this_branch_ct, old_coef_probs[i][j][k][l], &newp, upd,
+                      stepsize, max_idx);
+                else
+                  s = vp10_prob_update_search_subframe(this_branch_ct[t], oldp,
+                                                       &newp, upd, max_idx);
+                if (s > 0 && newp != oldp) u = 1;
+                if (u)
+                  savings += s - (int)(vp10_cost_zero(upd));
+                else
+                  savings -= (int)(vp10_cost_zero(upd));
+                update[u]++;
+              }
+            }
+          }
+        }
+      }
+
+      /* Is coef updated at all */
+      if (update[1] == 0 || savings < 0) {
+        vp10_write_bit(bc, 0);
+        return;
+      }
+      vp10_write_bit(bc, 1);
+      for (i = 0; i < PLANE_TYPES; ++i) {
+        for (j = 0; j < REF_TYPES; ++j) {
+          for (k = 0; k < COEF_BANDS; ++k) {
+            for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
+              for (t = 0; t < ENTROPY_NODES; ++t) {
+                for (idx = 0; idx <= max_idx; ++idx) {
+                  memcpy(this_branch_ct[t][idx],
+                         branch_ct[idx][tx_size][i][j][k][l][t],
+                         2 * sizeof(this_branch_ct[t][idx][0]));
+                }
+              }
+              for (t = 0; t < entropy_nodes_update; ++t) {
+                vpx_prob newp = new_coef_probs[i][j][k][l][t];
+                vpx_prob *oldp = old_coef_probs[i][j][k][l] + t;
+                const vpx_prob upd = DIFF_UPDATE_PROB;
+                int s;
+                int u = 0;
+
+                if (t == PIVOT_NODE)
+                  s = vp10_prob_update_search_model_subframe(
+                      this_branch_ct, old_coef_probs[i][j][k][l], &newp, upd,
+                      stepsize, max_idx);
+                else
+                  s = vp10_prob_update_search_subframe(this_branch_ct[t], *oldp,
+                                                       &newp, upd, max_idx);
+                if (s > 0 && newp != *oldp) u = 1;
+                vp10_write(bc, u, upd);
+                if (u) {
+                  /* send/use new probability */
+                  vp10_write_prob_diff_update(bc, newp, *oldp);
+                  *oldp = newp;
+                }
+              }
+            }
+          }
+        }
+      }
+      return;
+    }
+
+    case ONE_LOOP_REDUCED: {
+      int updates = 0;
+      int noupdates_before_first = 0;
+      for (i = 0; i < PLANE_TYPES; ++i) {
+        for (j = 0; j < REF_TYPES; ++j) {
+          for (k = 0; k < COEF_BANDS; ++k) {
+            for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
+              for (t = 0; t < ENTROPY_NODES; ++t) {
+                for (idx = 0; idx <= max_idx; ++idx) {
+                  memcpy(this_branch_ct[t][idx],
+                         branch_ct[idx][tx_size][i][j][k][l][t],
+                         2 * sizeof(this_branch_ct[t][idx][0]));
+                }
+              }
+              for (t = 0; t < entropy_nodes_update; ++t) {
+                vpx_prob newp = new_coef_probs[i][j][k][l][t];
+                vpx_prob *oldp = old_coef_probs[i][j][k][l] + t;
+                int s;
+                int u = 0;
+
+                if (t == PIVOT_NODE)
+                  s = vp10_prob_update_search_model_subframe(
+                      this_branch_ct, old_coef_probs[i][j][k][l], &newp, upd,
+                      stepsize, max_idx);
+                else
+                  s = vp10_prob_update_search_subframe(this_branch_ct[t], *oldp,
+                                                       &newp, upd, max_idx);
+                if (s > 0 && newp != *oldp) u = 1;
+                updates += u;
+                if (u == 0 && updates == 0) {
+                  noupdates_before_first++;
+                  continue;
+                }
+                if (u == 1 && updates == 1) {
+                  int v;
+                  // first update
+                  vp10_write_bit(bc, 1);
+                  for (v = 0; v < noupdates_before_first; ++v)
+                    vp10_write(bc, 0, upd);
+                }
+                vp10_write(bc, u, upd);
+                if (u) {
+                  /* send/use new probability */
+                  vp10_write_prob_diff_update(bc, newp, *oldp);
+                  *oldp = newp;
+                }
+              }
+            }
+          }
+        }
+      }
+      if (updates == 0) {
+        vp10_write_bit(bc, 0);  // no updates
+      }
+      return;
+    }
+    default: assert(0);
+  }
+}
+#endif  // CONFIG_ENTROPY
+
+static void update_coef_probs(VP10_COMP *cpi, vp10_writer *w) {
+  const TX_MODE tx_mode = cpi->common.tx_mode;
+  const TX_SIZE max_tx_size = tx_mode_to_biggest_tx_size[tx_mode];
+  TX_SIZE tx_size;
+#if CONFIG_ANS
+  int update = 0;
+#endif  // CONFIG_ANS
+#if CONFIG_ENTROPY
+  VP10_COMMON *cm = &cpi->common;
+  SUBFRAME_STATS *subframe_stats = &cpi->subframe_stats;
+  unsigned int eob_counts_copy[TX_SIZES][PLANE_TYPES][REF_TYPES][COEF_BANDS]
+                              [COEFF_CONTEXTS];
+  int i;
+  vp10_coeff_probs_model dummy_frame_coef_probs[PLANE_TYPES];
+
+  if (cm->do_subframe_update &&
+      cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD) {
+    vp10_copy(cpi->common.fc->coef_probs,
+              subframe_stats->enc_starting_coef_probs);
+    for (i = 0; i <= cpi->common.coef_probs_update_idx; ++i) {
+      get_coef_counts_diff(cpi, i, cpi->wholeframe_stats.coef_counts_buf[i],
+                           cpi->wholeframe_stats.eob_counts_buf[i]);
+    }
+  }
+#endif  // CONFIG_ENTROPY
+
+  for (tx_size = TX_4X4; tx_size <= max_tx_size; ++tx_size) {
+    vp10_coeff_stats frame_branch_ct[PLANE_TYPES];
+    vp10_coeff_probs_model frame_coef_probs[PLANE_TYPES];
+    if (cpi->td.counts->tx_size_totals[tx_size] <= 20 ||
+        (tx_size >= TX_16X16 && cpi->sf.tx_size_search_method == USE_TX_8X8)) {
+      vp10_write_bit(w, 0);
+    } else {
+#if CONFIG_ENTROPY
+      if (cm->do_subframe_update &&
+          cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD) {
+        unsigned int
+            eob_counts_copy[PLANE_TYPES][REF_TYPES][COEF_BANDS][COEFF_CONTEXTS];
+        vp10_coeff_count coef_counts_copy[PLANE_TYPES];
+        vp10_copy(eob_counts_copy, cpi->common.counts.eob_branch[tx_size]);
+        vp10_copy(coef_counts_copy, cpi->td.rd_counts.coef_counts[tx_size]);
+        build_tree_distribution(cpi, tx_size, frame_branch_ct,
+                                frame_coef_probs);
+        for (i = 0; i <= cpi->common.coef_probs_update_idx; ++i) {
+          vp10_copy(cpi->common.counts.eob_branch[tx_size],
+                    cpi->wholeframe_stats.eob_counts_buf[i][tx_size]);
+          vp10_copy(cpi->td.rd_counts.coef_counts[tx_size],
+                    cpi->wholeframe_stats.coef_counts_buf[i][tx_size]);
+          build_tree_distribution(cpi, tx_size, cpi->branch_ct_buf[i][tx_size],
+                                  dummy_frame_coef_probs);
+        }
+        vp10_copy(cpi->common.counts.eob_branch[tx_size], eob_counts_copy);
+        vp10_copy(cpi->td.rd_counts.coef_counts[tx_size], coef_counts_copy);
+
+        update_coef_probs_subframe(w, cpi, tx_size, cpi->branch_ct_buf,
+                                   frame_coef_probs);
+#if CONFIG_ANS
+        update = 1;
+#endif  // CONFIG_ANS
+      } else {
+#endif  // CONFIG_ENTROPY
+        build_tree_distribution(cpi, tx_size, frame_branch_ct,
+                                frame_coef_probs);
+        update_coef_probs_common(w, cpi, tx_size, frame_branch_ct,
+                                 frame_coef_probs);
+#if CONFIG_ANS
+        update = 1;
+#endif  // CONFIG_ANS
+#if CONFIG_ENTROPY
+      }
+#endif  // CONFIG_ENTROPY
+    }
+  }
+
+#if CONFIG_ENTROPY
+  vp10_copy(cm->starting_coef_probs, cm->fc->coef_probs);
+  vp10_copy(subframe_stats->coef_probs_buf[0], cm->fc->coef_probs);
+  if (cm->do_subframe_update &&
+      cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD) {
+    vp10_copy(eob_counts_copy, cm->counts.eob_branch);
+    for (i = 1; i <= cpi->common.coef_probs_update_idx; ++i) {
+      for (tx_size = TX_4X4; tx_size <= max_tx_size; ++tx_size)
+        vp10_full_to_model_counts(cm->counts.coef[tx_size],
+                                  subframe_stats->coef_counts_buf[i][tx_size]);
+      vp10_copy(cm->counts.eob_branch, subframe_stats->eob_counts_buf[i]);
+      vp10_partial_adapt_probs(cm, 0, 0);
+      vp10_copy(subframe_stats->coef_probs_buf[i], cm->fc->coef_probs);
+    }
+    vp10_copy(cm->fc->coef_probs, subframe_stats->coef_probs_buf[0]);
+    vp10_copy(cm->counts.eob_branch, eob_counts_copy);
+  }
+#endif  // CONFIG_ENTROPY
+#if CONFIG_ANS
+  if (update) vp10_coef_pareto_cdfs(cpi->common.fc);
+#endif  // CONFIG_ANS
+}
+
+#if CONFIG_LOOP_RESTORATION
+static void encode_restoration(VP10_COMMON *cm,
+                               struct vpx_write_bit_buffer *wb) {
+  int i;
+  RestorationInfo *rst = &cm->rst_info;
+  vpx_wb_write_bit(wb, rst->restoration_type != RESTORE_NONE);
+  if (rst->restoration_type != RESTORE_NONE) {
+    if (rst->restoration_type == RESTORE_BILATERAL) {
+      vpx_wb_write_bit(wb, 1);
+      for (i = 0; i < cm->rst_internal.ntiles; ++i) {
+        if (rst->bilateral_level[i] >= 0) {
+          vpx_wb_write_bit(wb, 1);
+          vpx_wb_write_literal(wb, rst->bilateral_level[i],
+                               vp10_bilateral_level_bits(cm));
+        } else {
+          vpx_wb_write_bit(wb, 0);
+        }
+      }
+    } else {
+      vpx_wb_write_bit(wb, 0);
+      for (i = 0; i < cm->rst_internal.ntiles; ++i) {
+        if (rst->wiener_level[i]) {
+          vpx_wb_write_bit(wb, 1);
+          vpx_wb_write_literal(wb, rst->vfilter[i][0] - WIENER_FILT_TAP0_MINV,
+                               WIENER_FILT_TAP0_BITS);
+          vpx_wb_write_literal(wb, rst->vfilter[i][1] - WIENER_FILT_TAP1_MINV,
+                               WIENER_FILT_TAP1_BITS);
+          vpx_wb_write_literal(wb, rst->vfilter[i][2] - WIENER_FILT_TAP2_MINV,
+                               WIENER_FILT_TAP2_BITS);
+          vpx_wb_write_literal(wb, rst->hfilter[i][0] - WIENER_FILT_TAP0_MINV,
+                               WIENER_FILT_TAP0_BITS);
+          vpx_wb_write_literal(wb, rst->hfilter[i][1] - WIENER_FILT_TAP1_MINV,
+                               WIENER_FILT_TAP1_BITS);
+          vpx_wb_write_literal(wb, rst->hfilter[i][2] - WIENER_FILT_TAP2_MINV,
+                               WIENER_FILT_TAP2_BITS);
+        } else {
+          vpx_wb_write_bit(wb, 0);
+        }
+      }
+    }
+  }
+}
+#endif  // CONFIG_LOOP_RESTORATION
+
+static void encode_loopfilter(VP10_COMMON *cm,
+                              struct vpx_write_bit_buffer *wb) {
+  int i;
+  struct loopfilter *lf = &cm->lf;
+
+  // Encode the loop filter level and type
+  vpx_wb_write_literal(wb, lf->filter_level, 6);
+  vpx_wb_write_literal(wb, lf->sharpness_level, 3);
+
+  // Write out loop filter deltas applied at the MB level based on mode or
+  // ref frame (if they are enabled).
+  vpx_wb_write_bit(wb, lf->mode_ref_delta_enabled);
+
+  if (lf->mode_ref_delta_enabled) {
+    vpx_wb_write_bit(wb, lf->mode_ref_delta_update);
+    if (lf->mode_ref_delta_update) {
+      for (i = 0; i < TOTAL_REFS_PER_FRAME; i++) {
+        const int delta = lf->ref_deltas[i];
+        const int changed = delta != lf->last_ref_deltas[i];
+        vpx_wb_write_bit(wb, changed);
+        if (changed) {
+          lf->last_ref_deltas[i] = delta;
+          vpx_wb_write_inv_signed_literal(wb, delta, 6);
+        }
+      }
+
+      for (i = 0; i < MAX_MODE_LF_DELTAS; i++) {
+        const int delta = lf->mode_deltas[i];
+        const int changed = delta != lf->last_mode_deltas[i];
+        vpx_wb_write_bit(wb, changed);
+        if (changed) {
+          lf->last_mode_deltas[i] = delta;
+          vpx_wb_write_inv_signed_literal(wb, delta, 6);
+        }
+      }
+    }
+  }
+}
+
+#if CONFIG_CLPF
+static void encode_clpf(const VP10_COMMON *cm,
+                        struct vpx_write_bit_buffer *wb) {
+  vpx_wb_write_literal(wb, cm->clpf, 1);
+}
+#endif
+
+#if CONFIG_DERING
+static void encode_dering(int level, struct vpx_write_bit_buffer *wb) {
+  vpx_wb_write_literal(wb, level, DERING_LEVEL_BITS);
+}
+#endif  // CONFIG_DERING
+
+static void write_delta_q(struct vpx_write_bit_buffer *wb, int delta_q) {
+  if (delta_q != 0) {
+    vpx_wb_write_bit(wb, 1);
+    vpx_wb_write_inv_signed_literal(wb, delta_q, 6);
+  } else {
+    vpx_wb_write_bit(wb, 0);
+  }
+}
+
+static void encode_quantization(const VP10_COMMON *const cm,
+                                struct vpx_write_bit_buffer *wb) {
+  vpx_wb_write_literal(wb, cm->base_qindex, QINDEX_BITS);
+  write_delta_q(wb, cm->y_dc_delta_q);
+  write_delta_q(wb, cm->uv_dc_delta_q);
+  write_delta_q(wb, cm->uv_ac_delta_q);
+#if CONFIG_AOM_QM
+  vpx_wb_write_bit(wb, cm->using_qmatrix);
+  if (cm->using_qmatrix) {
+    vpx_wb_write_literal(wb, cm->min_qmlevel, QM_LEVEL_BITS);
+    vpx_wb_write_literal(wb, cm->max_qmlevel, QM_LEVEL_BITS);
+  }
+#endif
+}
+
+static void encode_segmentation(VP10_COMMON *cm, MACROBLOCKD *xd,
+                                struct vpx_write_bit_buffer *wb) {
+  int i, j;
+  const struct segmentation *seg = &cm->seg;
+
+  vpx_wb_write_bit(wb, seg->enabled);
+  if (!seg->enabled) return;
+
+  // Segmentation map
+  if (!frame_is_intra_only(cm) && !cm->error_resilient_mode) {
+    vpx_wb_write_bit(wb, seg->update_map);
+  } else {
+    assert(seg->update_map == 1);
+  }
+  if (seg->update_map) {
+    // Select the coding strategy (temporal or spatial)
+    vp10_choose_segmap_coding_method(cm, xd);
+
+    // Write out the chosen coding method.
+    if (!frame_is_intra_only(cm) && !cm->error_resilient_mode) {
+      vpx_wb_write_bit(wb, seg->temporal_update);
+    } else {
+      assert(seg->temporal_update == 0);
+    }
+  }
+
+  // Segmentation data
+  vpx_wb_write_bit(wb, seg->update_data);
+  if (seg->update_data) {
+    vpx_wb_write_bit(wb, seg->abs_delta);
+
+    for (i = 0; i < MAX_SEGMENTS; i++) {
+      for (j = 0; j < SEG_LVL_MAX; j++) {
+        const int active = segfeature_active(seg, i, j);
+        vpx_wb_write_bit(wb, active);
+        if (active) {
+          const int data = get_segdata(seg, i, j);
+          const int data_max = vp10_seg_feature_data_max(j);
+
+          if (vp10_is_segfeature_signed(j)) {
+            encode_unsigned_max(wb, abs(data), data_max);
+            vpx_wb_write_bit(wb, data < 0);
+          } else {
+            encode_unsigned_max(wb, data, data_max);
+          }
+        }
+      }
+    }
+  }
+}
+
+static void update_seg_probs(VP10_COMP *cpi, vp10_writer *w) {
+  VP10_COMMON *cm = &cpi->common;
+
+  if (!cm->seg.enabled || !cm->seg.update_map) return;
+
+  if (cm->seg.temporal_update) {
+    int i;
+
+    for (i = 0; i < PREDICTION_PROBS; i++)
+      vp10_cond_prob_diff_update(w, &cm->fc->seg.pred_probs[i],
+                                 cm->counts.seg.pred[i]);
+
+    prob_diff_update(vp10_segment_tree, cm->fc->seg.tree_probs,
+                     cm->counts.seg.tree_mispred, MAX_SEGMENTS, w);
+  } else {
+    prob_diff_update(vp10_segment_tree, cm->fc->seg.tree_probs,
+                     cm->counts.seg.tree_total, MAX_SEGMENTS, w);
+  }
+}
+
+static void write_txfm_mode(TX_MODE mode, struct vpx_write_bit_buffer *wb) {
+  vpx_wb_write_bit(wb, mode == TX_MODE_SELECT);
+  if (mode != TX_MODE_SELECT) vpx_wb_write_literal(wb, mode, 2);
+}
+
+static void update_txfm_probs(VP10_COMMON *cm, vp10_writer *w,
+                              FRAME_COUNTS *counts) {
+  if (cm->tx_mode == TX_MODE_SELECT) {
+    int i, j;
+    for (i = 0; i < TX_SIZES - 1; ++i)
+      for (j = 0; j < TX_SIZE_CONTEXTS; ++j)
+        prob_diff_update(vp10_tx_size_tree[i], cm->fc->tx_size_probs[i][j],
+                         counts->tx_size[i][j], i + 2, w);
+  }
+}
+
+static void write_interp_filter(INTERP_FILTER filter,
+                                struct vpx_write_bit_buffer *wb) {
+  vpx_wb_write_bit(wb, filter == SWITCHABLE);
+  if (filter != SWITCHABLE)
+    vpx_wb_write_literal(wb, filter, 2 + CONFIG_EXT_INTERP);
+}
+
+static void fix_interp_filter(VP10_COMMON *cm, FRAME_COUNTS *counts) {
+  if (cm->interp_filter == SWITCHABLE) {
+    // Check to see if only one of the filters is actually used
+    int count[SWITCHABLE_FILTERS];
+    int i, j, c = 0;
+    for (i = 0; i < SWITCHABLE_FILTERS; ++i) {
+      count[i] = 0;
+      for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j)
+        count[i] += counts->switchable_interp[j][i];
+      c += (count[i] > 0);
+    }
+    if (c == 1) {
+      // Only one filter is used. So set the filter at frame level
+      for (i = 0; i < SWITCHABLE_FILTERS; ++i) {
+        if (count[i]) {
+          cm->interp_filter = i;
+          break;
+        }
+      }
+    }
+  }
+}
+
+static void write_tile_info(const VP10_COMMON *const cm,
+                            struct vpx_write_bit_buffer *wb) {
+#if CONFIG_EXT_TILE
+  const int tile_width =
+      ALIGN_POWER_OF_TWO(cm->tile_width, cm->mib_size_log2) >>
+      cm->mib_size_log2;
+  const int tile_height =
+      ALIGN_POWER_OF_TWO(cm->tile_height, cm->mib_size_log2) >>
+      cm->mib_size_log2;
+
+  assert(tile_width > 0);
+  assert(tile_height > 0);
+
+// Write the tile sizes
+#if CONFIG_EXT_PARTITION
+  if (cm->sb_size == BLOCK_128X128) {
+    assert(tile_width <= 32);
+    assert(tile_height <= 32);
+    vpx_wb_write_literal(wb, tile_width - 1, 5);
+    vpx_wb_write_literal(wb, tile_height - 1, 5);
+  } else
+#endif  // CONFIG_EXT_PARTITION
+  {
+    assert(tile_width <= 64);
+    assert(tile_height <= 64);
+    vpx_wb_write_literal(wb, tile_width - 1, 6);
+    vpx_wb_write_literal(wb, tile_height - 1, 6);
+  }
+#else
+  int min_log2_tile_cols, max_log2_tile_cols, ones;
+  vp10_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
+
+  // columns
+  ones = cm->log2_tile_cols - min_log2_tile_cols;
+  while (ones--) vpx_wb_write_bit(wb, 1);
+
+  if (cm->log2_tile_cols < max_log2_tile_cols) vpx_wb_write_bit(wb, 0);
+
+  // rows
+  vpx_wb_write_bit(wb, cm->log2_tile_rows != 0);
+  if (cm->log2_tile_rows != 0) vpx_wb_write_bit(wb, cm->log2_tile_rows != 1);
+#endif  // CONFIG_EXT_TILE
+}
+
+static int get_refresh_mask(VP10_COMP *cpi) {
+  int refresh_mask = 0;
+
+#if CONFIG_EXT_REFS
+  // NOTE(zoeliu): When LAST_FRAME is to get refreshed, the decoder will be
+  // notified to get LAST3_FRAME refreshed and then the virtual indexes for all
+  // the 3 LAST reference frames will be updated accordingly, i.e.:
+  // (1) The original virtual index for LAST3_FRAME will become the new virtual
+  //     index for LAST_FRAME; and
+  // (2) The original virtual indexes for LAST_FRAME and LAST2_FRAME will be
+  //     shifted and become the new virtual indexes for LAST2_FRAME and
+  //     LAST3_FRAME.
+  refresh_mask |=
+      (cpi->refresh_last_frame << cpi->lst_fb_idxes[LAST_REF_FRAMES - 1]);
+  if (cpi->rc.is_bwd_ref_frame && cpi->num_extra_arfs) {
+    // We have swapped the virtual indices
+    refresh_mask |= (cpi->refresh_bwd_ref_frame << cpi->arf_map[0]);
+  } else {
+    refresh_mask |= (cpi->refresh_bwd_ref_frame << cpi->bwd_fb_idx);
+  }
+#else
+  refresh_mask |= (cpi->refresh_last_frame << cpi->lst_fb_idx);
+#endif  // CONFIG_EXT_REFS
+
+  if (vp10_preserve_existing_gf(cpi)) {
+    // We have decided to preserve the previously existing golden frame as our
+    // new ARF frame. However, in the short term we leave it in the GF slot and,
+    // if we're updating the GF with the current decoded frame, we save it
+    // instead to the ARF slot.
+    // Later, in the function vp10_encoder.c:vp10_update_reference_frames() we
+    // will swap gld_fb_idx and alt_fb_idx to achieve our objective. We do it
+    // there so that it can be done outside of the recode loop.
+    // Note: This is highly specific to the use of ARF as a forward reference,
+    // and this needs to be generalized as other uses are implemented
+    // (like RTC/temporal scalability).
+    return refresh_mask | (cpi->refresh_golden_frame << cpi->alt_fb_idx);
+  } else {
+    int arf_idx = cpi->alt_fb_idx;
+#if CONFIG_EXT_REFS
+    const GF_GROUP *const gf_group = &cpi->twopass.gf_group;
+    arf_idx = cpi->arf_map[gf_group->arf_update_idx[gf_group->index]];
+#else
+    if ((cpi->oxcf.pass == 2) && cpi->multi_arf_allowed) {
+      const GF_GROUP *const gf_group = &cpi->twopass.gf_group;
+      arf_idx = gf_group->arf_update_idx[gf_group->index];
+    }
+#endif  // CONFIG_EXT_REFS
+    return refresh_mask | (cpi->refresh_golden_frame << cpi->gld_fb_idx) |
+           (cpi->refresh_alt_ref_frame << arf_idx);
+  }
+}
+
+#if CONFIG_EXT_TILE
+static INLINE int find_identical_tile(
+    const int tile_row, const int tile_col,
+    TileBufferEnc (*const tile_buffers)[1024]) {
+  const MV32 candidate_offset[1] = { { 1, 0 } };
+  const uint8_t *const cur_tile_data =
+      tile_buffers[tile_row][tile_col].data + 4;
+  const unsigned int cur_tile_size = tile_buffers[tile_row][tile_col].size;
+
+  int i;
+
+  if (tile_row == 0) return 0;
+
+  // (TODO: yunqingwang) For now, only above tile is checked and used.
+  // More candidates such as left tile can be added later.
+  for (i = 0; i < 1; i++) {
+    int row_offset = candidate_offset[0].row;
+    int col_offset = candidate_offset[0].col;
+    int row = tile_row - row_offset;
+    int col = tile_col - col_offset;
+    uint8_t tile_hdr;
+    const uint8_t *tile_data;
+    TileBufferEnc *candidate;
+
+    if (row < 0 || col < 0) continue;
+
+    tile_hdr = *(tile_buffers[row][col].data);
+
+    // Read out tcm bit
+    if ((tile_hdr >> 7) == 1) {
+      // The candidate is a copy tile itself
+      row_offset += tile_hdr & 0x7f;
+      row = tile_row - row_offset;
+    }
+
+    candidate = &tile_buffers[row][col];
+
+    if (row_offset >= 128 || candidate->size != cur_tile_size) continue;
+
+    tile_data = candidate->data + 4;
+
+    if (memcmp(tile_data, cur_tile_data, cur_tile_size) != 0) continue;
+
+    // Identical tile found
+    assert(row_offset > 0);
+    return row_offset;
+  }
+
+  // No identical tile found
+  return 0;
+}
+#endif  // CONFIG_EXT_TILE
+
+static uint32_t write_tiles(VP10_COMP *const cpi, uint8_t *const dst,
+                            unsigned int *max_tile_size,
+                            unsigned int *max_tile_col_size) {
+  const VP10_COMMON *const cm = &cpi->common;
+#if CONFIG_ANS
+  struct AnsCoder token_ans;
+#else
+  vp10_writer mode_bc;
+#endif  // CONFIG_ANS
+  int tile_row, tile_col;
+  TOKENEXTRA *(*const tok_buffers)[MAX_TILE_COLS] = cpi->tile_tok;
+  TileBufferEnc (*const tile_buffers)[MAX_TILE_COLS] = cpi->tile_buffers;
+  size_t total_size = 0;
+  const int tile_cols = cm->tile_cols;
+  const int tile_rows = cm->tile_rows;
+#if CONFIG_EXT_TILE
+  const int have_tiles = tile_cols * tile_rows > 1;
+#endif  // CONFIG_EXT_TILE
+#if CONFIG_ANS
+  BufAnsCoder *buf_ans = &cpi->buf_ans;
+#endif  // CONFIG_ANS
+
+  *max_tile_size = 0;
+  *max_tile_col_size = 0;
+
+// All tile size fields are output on 4 bytes. A call to remux_tiles will
+// later compact the data if smaller headers are adequate.
+
+#if CONFIG_EXT_TILE
+  for (tile_col = 0; tile_col < tile_cols; tile_col++) {
+    TileInfo tile_info;
+    const int is_last_col = (tile_col == tile_cols - 1);
+    const size_t col_offset = total_size;
+
+    vp10_tile_set_col(&tile_info, cm, tile_col);
+
+    // The last column does not have a column header
+    if (!is_last_col) total_size += 4;
+
+    for (tile_row = 0; tile_row < tile_rows; tile_row++) {
+      TileBufferEnc *const buf = &tile_buffers[tile_row][tile_col];
+      unsigned int tile_size;
+      const TOKENEXTRA *tok = tok_buffers[tile_row][tile_col];
+      const TOKENEXTRA *tok_end = tok + cpi->tok_count[tile_row][tile_col];
+      const int data_offset = have_tiles ? 4 : 0;
+
+      vp10_tile_set_row(&tile_info, cm, tile_row);
+
+      buf->data = dst + total_size;
+
+      // Is CONFIG_EXT_TILE = 1, every tile in the row has a header,
+      // even for the last one, unless no tiling is used at all.
+      total_size += data_offset;
+#if !CONFIG_ANS
+      vpx_start_encode(&mode_bc, buf->data + data_offset);
+      write_modes(cpi, &tile_info, &mode_bc, &tok, tok_end);
+      assert(tok == tok_end);
+      vpx_stop_encode(&mode_bc);
+      tile_size = mode_bc.pos;
+#else
+      buf_ans_write_reset(buf_ans);
+      write_modes(cpi, &tile_info, buf_ans, &tok, tok_end);
+      assert(tok == tok_end);
+      ans_write_init(&token_ans, buf->data + data_offset);
+      buf_ans_flush(buf_ans, &token_ans);
+      tile_size = ans_write_end(&token_ans);
+#endif  // !CONFIG_ANS
+
+      buf->size = tile_size;
+
+      // Record the maximum tile size we see, so we can compact headers later.
+      *max_tile_size = VPXMAX(*max_tile_size, tile_size);
+
+      if (have_tiles) {
+        // tile header: size of this tile, or copy offset
+        uint32_t tile_header = tile_size;
+
+        // Check if this tile is a copy tile.
+        // Very low chances to have copy tiles on the key frames, so don't
+        // search on key frames to reduce unnecessary search.
+        if (cm->frame_type != KEY_FRAME) {
+          const int idendical_tile_offset =
+              find_identical_tile(tile_row, tile_col, tile_buffers);
+
+          if (idendical_tile_offset > 0) {
+            tile_size = 0;
+            tile_header = idendical_tile_offset | 0x80;
+            tile_header <<= 24;
+          }
+        }
+
+        mem_put_le32(buf->data, tile_header);
+      }
+
+      total_size += tile_size;
+    }
+
+    if (!is_last_col) {
+      size_t col_size = total_size - col_offset - 4;
+      mem_put_le32(dst + col_offset, col_size);
+
+      // If it is not final packing, record the maximum tile column size we see,
+      // otherwise, check if the tile size is out of the range.
+      *max_tile_col_size = VPXMAX(*max_tile_col_size, col_size);
+    }
+  }
+#else
+  for (tile_row = 0; tile_row < tile_rows; tile_row++) {
+    TileInfo tile_info;
+    const int is_last_row = (tile_row == tile_rows - 1);
+
+    vp10_tile_set_row(&tile_info, cm, tile_row);
+
+    for (tile_col = 0; tile_col < tile_cols; tile_col++) {
+      TileBufferEnc *const buf = &tile_buffers[tile_row][tile_col];
+      const int is_last_col = (tile_col == tile_cols - 1);
+      const int is_last_tile = is_last_col && is_last_row;
+      unsigned int tile_size;
+      const TOKENEXTRA *tok = tok_buffers[tile_row][tile_col];
+      const TOKENEXTRA *tok_end = tok + cpi->tok_count[tile_row][tile_col];
+
+      vp10_tile_set_col(&tile_info, cm, tile_col);
+
+      buf->data = dst + total_size;
+
+      // The last tile does not have a header.
+      if (!is_last_tile) total_size += 4;
+
+#if !CONFIG_ANS
+      vpx_start_encode(&mode_bc, dst + total_size);
+      write_modes(cpi, &tile_info, &mode_bc, &tok, tok_end);
+      assert(tok == tok_end);
+      vpx_stop_encode(&mode_bc);
+      tile_size = mode_bc.pos;
+#else
+      buf_ans_write_reset(buf_ans);
+      write_modes(cpi, &tile_info, buf_ans, &tok, tok_end);
+      assert(tok == tok_end);
+      ans_write_init(&token_ans, dst + total_size);
+      buf_ans_flush(buf_ans, &token_ans);
+      tile_size = ans_write_end(&token_ans);
+#endif  // !CONFIG_ANS
+
+      assert(tile_size > 0);
+
+      buf->size = tile_size;
+
+      if (!is_last_tile) {
+        *max_tile_size = VPXMAX(*max_tile_size, tile_size);
+        // size of this tile
+        mem_put_le32(buf->data, tile_size);
+      }
+
+      total_size += tile_size;
+    }
+  }
+#endif  // CONFIG_EXT_TILE
+  return (uint32_t)total_size;
+}
+
+static void write_render_size(const VP10_COMMON *cm,
+                              struct vpx_write_bit_buffer *wb) {
+  const int scaling_active =
+      cm->width != cm->render_width || cm->height != cm->render_height;
+  vpx_wb_write_bit(wb, scaling_active);
+  if (scaling_active) {
+    vpx_wb_write_literal(wb, cm->render_width - 1, 16);
+    vpx_wb_write_literal(wb, cm->render_height - 1, 16);
+  }
+}
+
+static void write_frame_size(const VP10_COMMON *cm,
+                             struct vpx_write_bit_buffer *wb) {
+  vpx_wb_write_literal(wb, cm->width - 1, 16);
+  vpx_wb_write_literal(wb, cm->height - 1, 16);
+
+  write_render_size(cm, wb);
+}
+
+static void write_frame_size_with_refs(VP10_COMP *cpi,
+                                       struct vpx_write_bit_buffer *wb) {
+  VP10_COMMON *const cm = &cpi->common;
+  int found = 0;
+
+  MV_REFERENCE_FRAME ref_frame;
+  for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
+    YV12_BUFFER_CONFIG *cfg = get_ref_frame_buffer(cpi, ref_frame);
+
+    if (cfg != NULL) {
+      found =
+          cm->width == cfg->y_crop_width && cm->height == cfg->y_crop_height;
+      found &= cm->render_width == cfg->render_width &&
+               cm->render_height == cfg->render_height;
+    }
+    vpx_wb_write_bit(wb, found);
+    if (found) {
+      break;
+    }
+  }
+
+  if (!found) {
+    vpx_wb_write_literal(wb, cm->width - 1, 16);
+    vpx_wb_write_literal(wb, cm->height - 1, 16);
+    write_render_size(cm, wb);
+  }
+}
+
+static void write_sync_code(struct vpx_write_bit_buffer *wb) {
+  vpx_wb_write_literal(wb, VP10_SYNC_CODE_0, 8);
+  vpx_wb_write_literal(wb, VP10_SYNC_CODE_1, 8);
+  vpx_wb_write_literal(wb, VP10_SYNC_CODE_2, 8);
+}
+
+static void write_profile(BITSTREAM_PROFILE profile,
+                          struct vpx_write_bit_buffer *wb) {
+  switch (profile) {
+    case PROFILE_0: vpx_wb_write_literal(wb, 0, 2); break;
+    case PROFILE_1: vpx_wb_write_literal(wb, 2, 2); break;
+    case PROFILE_2: vpx_wb_write_literal(wb, 1, 2); break;
+    case PROFILE_3: vpx_wb_write_literal(wb, 6, 3); break;
+    default: assert(0);
+  }
+}
+
+static void write_bitdepth_colorspace_sampling(
+    VP10_COMMON *const cm, struct vpx_write_bit_buffer *wb) {
+  if (cm->profile >= PROFILE_2) {
+    assert(cm->bit_depth > VPX_BITS_8);
+    vpx_wb_write_bit(wb, cm->bit_depth == VPX_BITS_10 ? 0 : 1);
+  }
+  vpx_wb_write_literal(wb, cm->color_space, 3);
+  if (cm->color_space != VPX_CS_SRGB) {
+    // 0: [16, 235] (i.e. xvYCC), 1: [0, 255]
+    vpx_wb_write_bit(wb, cm->color_range);
+    if (cm->profile == PROFILE_1 || cm->profile == PROFILE_3) {
+      assert(cm->subsampling_x != 1 || cm->subsampling_y != 1);
+      vpx_wb_write_bit(wb, cm->subsampling_x);
+      vpx_wb_write_bit(wb, cm->subsampling_y);
+      vpx_wb_write_bit(wb, 0);  // unused
+    } else {
+      assert(cm->subsampling_x == 1 && cm->subsampling_y == 1);
+    }
+  } else {
+    assert(cm->profile == PROFILE_1 || cm->profile == PROFILE_3);
+    vpx_wb_write_bit(wb, 0);  // unused
+  }
+}
+
+static void write_uncompressed_header(VP10_COMP *cpi,
+                                      struct vpx_write_bit_buffer *wb) {
+  VP10_COMMON *const cm = &cpi->common;
+  MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
+
+  vpx_wb_write_literal(wb, VPX_FRAME_MARKER, 2);
+
+  write_profile(cm->profile, wb);
+
+#if CONFIG_EXT_REFS
+  // NOTE: By default all coded frames to be used as a reference
+  cm->is_reference_frame = 1;
+
+  if (cm->show_existing_frame) {
+    RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
+    const int frame_to_show = cm->ref_frame_map[cpi->existing_fb_idx_to_show];
+
+    if (frame_to_show < 0 || frame_bufs[frame_to_show].ref_count < 1) {
+      vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
+                         "Buffer %d does not contain a reconstructed frame",
+                         frame_to_show);
+    }
+    ref_cnt_fb(frame_bufs, &cm->new_fb_idx, frame_to_show);
+
+    vpx_wb_write_bit(wb, 1);  // show_existing_frame
+    vpx_wb_write_literal(wb, cpi->existing_fb_idx_to_show, 3);
+
+    return;
+  } else {
+#endif                        // CONFIG_EXT_REFS
+    vpx_wb_write_bit(wb, 0);  // show_existing_frame
+#if CONFIG_EXT_REFS
+  }
+#endif  // CONFIG_EXT_REFS
+
+  vpx_wb_write_bit(wb, cm->frame_type);
+  vpx_wb_write_bit(wb, cm->show_frame);
+  vpx_wb_write_bit(wb, cm->error_resilient_mode);
+
+  if (cm->frame_type == KEY_FRAME) {
+    write_sync_code(wb);
+    write_bitdepth_colorspace_sampling(cm, wb);
+    write_frame_size(cm, wb);
+    if (frame_is_intra_only(cm))
+      vpx_wb_write_bit(wb, cm->allow_screen_content_tools);
+  } else {
+    if (!cm->show_frame) vpx_wb_write_bit(wb, cm->intra_only);
+
+    if (!cm->error_resilient_mode) {
+      if (cm->intra_only) {
+        vpx_wb_write_bit(wb,
+                         cm->reset_frame_context == RESET_FRAME_CONTEXT_ALL);
+      } else {
+        vpx_wb_write_bit(wb,
+                         cm->reset_frame_context != RESET_FRAME_CONTEXT_NONE);
+        if (cm->reset_frame_context != RESET_FRAME_CONTEXT_NONE)
+          vpx_wb_write_bit(wb,
+                           cm->reset_frame_context == RESET_FRAME_CONTEXT_ALL);
+      }
+    }
+
+#if CONFIG_EXT_REFS
+    cpi->refresh_frame_mask = get_refresh_mask(cpi);
+#endif  // CONFIG_EXT_REFS
+
+    if (cm->intra_only) {
+      write_sync_code(wb);
+      write_bitdepth_colorspace_sampling(cm, wb);
+
+#if CONFIG_EXT_REFS
+      vpx_wb_write_literal(wb, cpi->refresh_frame_mask, REF_FRAMES);
+#else
+      vpx_wb_write_literal(wb, get_refresh_mask(cpi), REF_FRAMES);
+#endif  // CONFIG_EXT_REFS
+      write_frame_size(cm, wb);
+    } else {
+      MV_REFERENCE_FRAME ref_frame;
+
+#if CONFIG_EXT_REFS
+      vpx_wb_write_literal(wb, cpi->refresh_frame_mask, REF_FRAMES);
+#else
+      vpx_wb_write_literal(wb, get_refresh_mask(cpi), REF_FRAMES);
+#endif  // CONFIG_EXT_REFS
+
+#if CONFIG_EXT_REFS
+      if (!cpi->refresh_frame_mask) {
+        // NOTE: "cpi->refresh_frame_mask == 0" indicates that the coded frame
+        //       will not be used as a reference
+        cm->is_reference_frame = 0;
+      }
+#endif  // CONFIG_EXT_REFS
+
+      for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
+        assert(get_ref_frame_map_idx(cpi, ref_frame) != INVALID_IDX);
+        vpx_wb_write_literal(wb, get_ref_frame_map_idx(cpi, ref_frame),
+                             REF_FRAMES_LOG2);
+        vpx_wb_write_bit(wb, cm->ref_frame_sign_bias[ref_frame]);
+      }
+
+      write_frame_size_with_refs(cpi, wb);
+
+      vpx_wb_write_bit(wb, cm->allow_high_precision_mv);
+
+      fix_interp_filter(cm, cpi->td.counts);
+      write_interp_filter(cm->interp_filter, wb);
+    }
+  }
+
+  if (!cm->error_resilient_mode) {
+    vpx_wb_write_bit(
+        wb, cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_FORWARD);
+  }
+
+  vpx_wb_write_literal(wb, cm->frame_context_idx, FRAME_CONTEXTS_LOG2);
+
+  assert(cm->mib_size == num_8x8_blocks_wide_lookup[cm->sb_size]);
+  assert(cm->mib_size == 1 << cm->mib_size_log2);
+#if CONFIG_EXT_PARTITION
+  assert(cm->sb_size == BLOCK_128X128 || cm->sb_size == BLOCK_64X64);
+  vpx_wb_write_bit(wb, cm->sb_size == BLOCK_128X128 ? 1 : 0);
+#else
+  assert(cm->sb_size == BLOCK_64X64);
+#endif  // CONFIG_EXT_PARTITION
+
+  encode_loopfilter(cm, wb);
+#if CONFIG_CLPF
+  encode_clpf(cm, wb);
+#endif
+#if CONFIG_DERING
+  encode_dering(cm->dering_level, wb);
+#endif  // CONFIG_DERING
+#if CONFIG_LOOP_RESTORATION
+  encode_restoration(cm, wb);
+#endif  // CONFIG_LOOP_RESTORATION
+  encode_quantization(cm, wb);
+  encode_segmentation(cm, xd, wb);
+  if (!cm->seg.enabled && xd->lossless[0])
+    cm->tx_mode = TX_4X4;
+  else
+    write_txfm_mode(cm->tx_mode, wb);
+
+  if (cpi->allow_comp_inter_inter) {
+    const int use_hybrid_pred = cm->reference_mode == REFERENCE_MODE_SELECT;
+    const int use_compound_pred = cm->reference_mode != SINGLE_REFERENCE;
+
+    vpx_wb_write_bit(wb, use_hybrid_pred);
+    if (!use_hybrid_pred) vpx_wb_write_bit(wb, use_compound_pred);
+  }
+
+  write_tile_info(cm, wb);
+}
+
+#if CONFIG_GLOBAL_MOTION
+static void write_global_motion_params(Global_Motion_Params *params,
+                                       vpx_prob *probs, vp10_writer *w) {
+  GLOBAL_MOTION_TYPE gmtype = get_gmtype(params);
+  vp10_write_token(w, vp10_global_motion_types_tree, probs,
+                   &global_motion_types_encodings[gmtype]);
+  switch (gmtype) {
+    case GLOBAL_ZERO: break;
+    case GLOBAL_AFFINE:
+      vp10_write_primitive_symmetric(
+          w, params->motion_params.wmmat[4] >> GM_ALPHA_PREC_DIFF,
+          GM_ABS_ALPHA_BITS);
+      vp10_write_primitive_symmetric(
+          w, (params->motion_params.wmmat[5] >> GM_ALPHA_PREC_DIFF) -
+                 (1 << GM_ALPHA_PREC_BITS),
+          GM_ABS_ALPHA_BITS);
+    // fallthrough intended
+    case GLOBAL_ROTZOOM:
+      vp10_write_primitive_symmetric(
+          w, (params->motion_params.wmmat[2] >> GM_ALPHA_PREC_DIFF) -
+                 (1 << GM_ALPHA_PREC_BITS),
+          GM_ABS_ALPHA_BITS);
+      vp10_write_primitive_symmetric(
+          w, params->motion_params.wmmat[3] >> GM_ALPHA_PREC_DIFF,
+          GM_ABS_ALPHA_BITS);
+    // fallthrough intended
+    case GLOBAL_TRANSLATION:
+      vp10_write_primitive_symmetric(
+          w, params->motion_params.wmmat[0] >> GM_TRANS_PREC_DIFF,
+          GM_ABS_TRANS_BITS);
+      vp10_write_primitive_symmetric(
+          w, params->motion_params.wmmat[1] >> GM_TRANS_PREC_DIFF,
+          GM_ABS_TRANS_BITS);
+      break;
+    default: assert(0);
+  }
+}
+
+static void write_global_motion(VP10_COMP *cpi, vp10_writer *w) {
+  VP10_COMMON *const cm = &cpi->common;
+  int frame;
+  for (frame = LAST_FRAME; frame <= ALTREF_FRAME; ++frame) {
+    if (!cpi->global_motion_used[frame]) {
+      memset(&cm->global_motion[frame], 0, sizeof(*cm->global_motion));
+    }
+    write_global_motion_params(&cm->global_motion[frame],
+                               cm->fc->global_motion_types_prob, w);
+  }
+}
+#endif
+
+static uint32_t write_compressed_header(VP10_COMP *cpi, uint8_t *data) {
+  VP10_COMMON *const cm = &cpi->common;
+#if CONFIG_SUPERTX
+  MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
+#endif  // CONFIG_SUPERTX
+  FRAME_CONTEXT *const fc = cm->fc;
+  FRAME_COUNTS *counts = cpi->td.counts;
+  vp10_writer *header_bc;
+  int i, j;
+
+#if CONFIG_ANS
+  struct AnsCoder header_ans;
+  int header_size;
+  header_bc = &cpi->buf_ans;
+  buf_ans_write_reset(header_bc);
+#else
+  vp10_writer real_header_bc;
+  header_bc = &real_header_bc;
+  vpx_start_encode(header_bc, data);
+#endif
+  update_txfm_probs(cm, header_bc, counts);
+  update_coef_probs(cpi, header_bc);
+
+#if CONFIG_VAR_TX
+  update_txfm_partition_probs(cm, header_bc, counts);
+#endif
+
+  update_skip_probs(cm, header_bc, counts);
+  update_seg_probs(cpi, header_bc);
+
+  for (i = 0; i < INTRA_MODES; ++i)
+    prob_diff_update(vp10_intra_mode_tree, fc->uv_mode_prob[i],
+                     counts->uv_mode[i], INTRA_MODES, header_bc);
+
+#if CONFIG_EXT_PARTITION_TYPES
+  prob_diff_update(vp10_partition_tree, fc->partition_prob[0],
+                   counts->partition[0], PARTITION_TYPES, header_bc);
+  for (i = 1; i < PARTITION_CONTEXTS; ++i)
+    prob_diff_update(vp10_ext_partition_tree, fc->partition_prob[i],
+                     counts->partition[i], EXT_PARTITION_TYPES, header_bc);
+#else
+  for (i = 0; i < PARTITION_CONTEXTS; ++i)
+    prob_diff_update(vp10_partition_tree, fc->partition_prob[i],
+                     counts->partition[i], PARTITION_TYPES, header_bc);
+#endif  // CONFIG_EXT_PARTITION_TYPES
+
+#if CONFIG_EXT_INTRA
+  for (i = 0; i < INTRA_FILTERS + 1; ++i)
+    prob_diff_update(vp10_intra_filter_tree, fc->intra_filter_probs[i],
+                     counts->intra_filter[i], INTRA_FILTERS, header_bc);
+#endif  // CONFIG_EXT_INTRA
+
+  if (frame_is_intra_only(cm)) {
+    vp10_copy(cm->kf_y_prob, vp10_kf_y_mode_prob);
+    for (i = 0; i < INTRA_MODES; ++i)
+      for (j = 0; j < INTRA_MODES; ++j)
+        prob_diff_update(vp10_intra_mode_tree, cm->kf_y_prob[i][j],
+                         counts->kf_y_mode[i][j], INTRA_MODES, header_bc);
+  } else {
+#if CONFIG_REF_MV
+    update_inter_mode_probs(cm, header_bc, counts);
+#else
+    for (i = 0; i < INTER_MODE_CONTEXTS; ++i)
+      prob_diff_update(vp10_inter_mode_tree, cm->fc->inter_mode_probs[i],
+                       counts->inter_mode[i], INTER_MODES, header_bc);
+#endif
+
+#if CONFIG_EXT_INTER
+    update_inter_compound_mode_probs(cm, header_bc);
+
+    if (cm->reference_mode != COMPOUND_REFERENCE) {
+      for (i = 0; i < BLOCK_SIZE_GROUPS; i++) {
+        if (is_interintra_allowed_bsize_group(i)) {
+          vp10_cond_prob_diff_update(header_bc, &fc->interintra_prob[i],
+                                     cm->counts.interintra[i]);
+        }
+      }
+      for (i = 0; i < BLOCK_SIZE_GROUPS; i++) {
+        prob_diff_update(
+            vp10_interintra_mode_tree, cm->fc->interintra_mode_prob[i],
+            counts->interintra_mode[i], INTERINTRA_MODES, header_bc);
+      }
+      for (i = 0; i < BLOCK_SIZES; i++) {
+        if (is_interintra_allowed_bsize(i) && is_interintra_wedge_used(i))
+          vp10_cond_prob_diff_update(header_bc, &fc->wedge_interintra_prob[i],
+                                     cm->counts.wedge_interintra[i]);
+      }
+    }
+    if (cm->reference_mode != SINGLE_REFERENCE) {
+      for (i = 0; i < BLOCK_SIZES; i++)
+        if (is_interinter_wedge_used(i))
+          vp10_cond_prob_diff_update(header_bc, &fc->wedge_interinter_prob[i],
+                                     cm->counts.wedge_interinter[i]);
+    }
+#endif  // CONFIG_EXT_INTER
+
+#if CONFIG_OBMC || CONFIG_WARPED_MOTION
+    for (i = BLOCK_8X8; i < BLOCK_SIZES; ++i)
+      prob_diff_update(vp10_motvar_tree, fc->motvar_prob[i], counts->motvar[i],
+                       MOTION_VARIATIONS, header_bc);
+#endif  // CONFIG_OBMC || CONFIG_WARPED_MOTION
+
+    if (cm->interp_filter == SWITCHABLE)
+      update_switchable_interp_probs(cm, header_bc, counts);
+
+    for (i = 0; i < INTRA_INTER_CONTEXTS; i++)
+      vp10_cond_prob_diff_update(header_bc, &fc->intra_inter_prob[i],
+                                 counts->intra_inter[i]);
+
+    if (cpi->allow_comp_inter_inter) {
+      const int use_hybrid_pred = cm->reference_mode == REFERENCE_MODE_SELECT;
+      if (use_hybrid_pred)
+        for (i = 0; i < COMP_INTER_CONTEXTS; i++)
+          vp10_cond_prob_diff_update(header_bc, &fc->comp_inter_prob[i],
+                                     counts->comp_inter[i]);
+    }
+
+    if (cm->reference_mode != COMPOUND_REFERENCE) {
+      for (i = 0; i < REF_CONTEXTS; i++) {
+        for (j = 0; j < (SINGLE_REFS - 1); j++) {
+          vp10_cond_prob_diff_update(header_bc, &fc->single_ref_prob[i][j],
+                                     counts->single_ref[i][j]);
+        }
+      }
+    }
+
+    if (cm->reference_mode != SINGLE_REFERENCE) {
+      for (i = 0; i < REF_CONTEXTS; i++) {
+#if CONFIG_EXT_REFS
+        for (j = 0; j < (FWD_REFS - 1); j++) {
+          vp10_cond_prob_diff_update(header_bc, &fc->comp_ref_prob[i][j],
+                                     counts->comp_ref[i][j]);
+        }
+        for (j = 0; j < (BWD_REFS - 1); j++) {
+          vp10_cond_prob_diff_update(header_bc, &fc->comp_bwdref_prob[i][j],
+                                     counts->comp_bwdref[i][j]);
+        }
+#else
+        for (j = 0; j < (COMP_REFS - 1); j++) {
+          vp10_cond_prob_diff_update(header_bc, &fc->comp_ref_prob[i][j],
+                                     counts->comp_ref[i][j]);
+        }
+#endif  // CONFIG_EXT_REFS
+      }
+    }
+
+    for (i = 0; i < BLOCK_SIZE_GROUPS; ++i)
+      prob_diff_update(vp10_intra_mode_tree, cm->fc->y_mode_prob[i],
+                       counts->y_mode[i], INTRA_MODES, header_bc);
+
+    vp10_write_nmv_probs(cm, cm->allow_high_precision_mv, header_bc,
+#if CONFIG_REF_MV
+                         counts->mv);
+#else
+                         &counts->mv);
+#endif
+    update_ext_tx_probs(cm, header_bc);
+#if CONFIG_SUPERTX
+    if (!xd->lossless[0]) update_supertx_probs(cm, header_bc);
+#endif  // CONFIG_SUPERTX
+  }
+#if CONFIG_GLOBAL_MOTION
+  write_global_motion(cpi, header_bc);
+#endif  // CONFIG_GLOBAL_MOTION
+#if CONFIG_ANS
+  ans_write_init(&header_ans, data);
+  buf_ans_flush(header_bc, &header_ans);
+  header_size = ans_write_end(&header_ans);
+  assert(header_size <= 0xffff);
+  return header_size;
+#else
+  vpx_stop_encode(header_bc);
+  assert(header_bc->pos <= 0xffff);
+  return header_bc->pos;
+#endif  // CONFIG_ANS
+}
+
+static int choose_size_bytes(uint32_t size, int spare_msbs) {
+  // Choose the number of bytes required to represent size, without
+  // using the 'spare_msbs' number of most significant bits.
+
+  // Make sure we will fit in 4 bytes to start with..
+  if (spare_msbs > 0 && size >> (32 - spare_msbs) != 0) return -1;
+
+  // Normalise to 32 bits
+  size <<= spare_msbs;
+
+  if (size >> 24 != 0)
+    return 4;
+  else if (size >> 16 != 0)
+    return 3;
+  else if (size >> 8 != 0)
+    return 2;
+  else
+    return 1;
+}
+
+static void mem_put_varsize(uint8_t *const dst, const int sz, const int val) {
+  switch (sz) {
+    case 1: dst[0] = (uint8_t)(val & 0xff); break;
+    case 2: mem_put_le16(dst, val); break;
+    case 3: mem_put_le24(dst, val); break;
+    case 4: mem_put_le32(dst, val); break;
+    default: assert("Invalid size" && 0); break;
+  }
+}
+
+static int remux_tiles(const VP10_COMMON *const cm, uint8_t *dst,
+                       const uint32_t data_size, const uint32_t max_tile_size,
+                       const uint32_t max_tile_col_size,
+                       int *const tile_size_bytes,
+                       int *const tile_col_size_bytes) {
+// Choose the tile size bytes (tsb) and tile column size bytes (tcsb)
+#if CONFIG_EXT_TILE
+  // The top bit in the tile size field indicates tile copy mode, so we
+  // have 1 less bit to code the tile size
+  const int tsb = choose_size_bytes(max_tile_size, 1);
+  const int tcsb = choose_size_bytes(max_tile_col_size, 0);
+#else
+  const int tsb = choose_size_bytes(max_tile_size, 0);
+  const int tcsb = 4;  // This is ignored
+  (void)max_tile_col_size;
+#endif  // CONFIG_EXT_TILE
+
+  assert(tsb > 0);
+  assert(tcsb > 0);
+
+  *tile_size_bytes = tsb;
+  *tile_col_size_bytes = tcsb;
+
+  if (tsb == 4 && tcsb == 4) {
+    return data_size;
+  } else {
+    uint32_t wpos = 0;
+    uint32_t rpos = 0;
+
+#if CONFIG_EXT_TILE
+    int tile_row;
+    int tile_col;
+
+    for (tile_col = 0; tile_col < cm->tile_cols; tile_col++) {
+      // All but the last column has a column header
+      if (tile_col < cm->tile_cols - 1) {
+        uint32_t tile_col_size = mem_get_le32(dst + rpos);
+        rpos += 4;
+
+        // Adjust the tile column size by the number of bytes removed
+        // from the tile size fields.
+        tile_col_size -= (4 - tsb) * cm->tile_rows;
+
+        mem_put_varsize(dst + wpos, tcsb, tile_col_size);
+        wpos += tcsb;
+      }
+
+      for (tile_row = 0; tile_row < cm->tile_rows; tile_row++) {
+        // All, including the last row has a header
+        uint32_t tile_header = mem_get_le32(dst + rpos);
+        rpos += 4;
+
+        // If this is a copy tile, we need to shift the MSB to the
+        // top bit of the new width, and there is no data to copy.
+        if (tile_header >> 31 != 0) {
+          if (tsb < 4) tile_header >>= 32 - 8 * tsb;
+          mem_put_varsize(dst + wpos, tsb, tile_header);
+          wpos += tsb;
+        } else {
+          mem_put_varsize(dst + wpos, tsb, tile_header);
+          wpos += tsb;
+
+          memmove(dst + wpos, dst + rpos, tile_header);
+          rpos += tile_header;
+          wpos += tile_header;
+        }
+      }
+    }
+#else
+    const int n_tiles = cm->tile_cols * cm->tile_rows;
+    int n;
+
+    for (n = 0; n < n_tiles; n++) {
+      int tile_size;
+
+      if (n == n_tiles - 1) {
+        tile_size = data_size - rpos;
+      } else {
+        tile_size = mem_get_le32(dst + rpos);
+        rpos += 4;
+        mem_put_varsize(dst + wpos, tsb, tile_size);
+        wpos += tsb;
+      }
+
+      memmove(dst + wpos, dst + rpos, tile_size);
+
+      rpos += tile_size;
+      wpos += tile_size;
+    }
+#endif  // CONFIG_EXT_TILE
+
+    assert(rpos > wpos);
+    assert(rpos == data_size);
+
+    return wpos;
+  }
+}
+
+void vp10_pack_bitstream(VP10_COMP *const cpi, uint8_t *dst, size_t *size) {
+  uint8_t *data = dst;
+  uint32_t compressed_header_size;
+  uint32_t uncompressed_header_size;
+  uint32_t data_size;
+  struct vpx_write_bit_buffer wb = { data, 0 };
+  struct vpx_write_bit_buffer saved_wb;
+  unsigned int max_tile_size;
+  unsigned int max_tile_col_size;
+  int tile_size_bytes;
+  int tile_col_size_bytes;
+
+  VP10_COMMON *const cm = &cpi->common;
+  const int have_tiles = cm->tile_cols * cm->tile_rows > 1;
+
+#if CONFIG_BITSTREAM_DEBUG
+  bitstream_queue_reset_write();
+#endif
+
+  // Write the uncompressed header
+  write_uncompressed_header(cpi, &wb);
+
+#if CONFIG_EXT_REFS
+  if (cm->show_existing_frame) {
+    *size = vpx_wb_bytes_written(&wb);
+    return;
+  }
+#endif  // CONFIG_EXT_REFS
+
+  // We do not know these in advance. Output placeholder bit.
+  saved_wb = wb;
+  // Write tile size magnitudes
+  if (have_tiles) {
+// Note that the last item in the uncompressed header is the data
+// describing tile configuration.
+#if CONFIG_EXT_TILE
+    // Number of bytes in tile column size - 1
+    vpx_wb_write_literal(&wb, 0, 2);
+#endif  // CONFIG_EXT_TILE
+    // Number of bytes in tile size - 1
+    vpx_wb_write_literal(&wb, 0, 2);
+  }
+  // Size of compressed header
+  vpx_wb_write_literal(&wb, 0, 16);
+
+  uncompressed_header_size = (uint32_t)vpx_wb_bytes_written(&wb);
+  data += uncompressed_header_size;
+
+  vpx_clear_system_state();
+
+  // Write the compressed header
+  compressed_header_size = write_compressed_header(cpi, data);
+  data += compressed_header_size;
+
+  // Write the encoded tile data
+  data_size = write_tiles(cpi, data, &max_tile_size, &max_tile_col_size);
+
+  if (have_tiles) {
+    data_size =
+        remux_tiles(cm, data, data_size, max_tile_size, max_tile_col_size,
+                    &tile_size_bytes, &tile_col_size_bytes);
+  }
+
+  data += data_size;
+
+  // Now fill in the gaps in the uncompressed header.
+  if (have_tiles) {
+#if CONFIG_EXT_TILE
+    assert(tile_col_size_bytes >= 1 && tile_col_size_bytes <= 4);
+    vpx_wb_write_literal(&saved_wb, tile_col_size_bytes - 1, 2);
+#endif  // CONFIG_EXT_TILE
+    assert(tile_size_bytes >= 1 && tile_size_bytes <= 4);
+    vpx_wb_write_literal(&saved_wb, tile_size_bytes - 1, 2);
+  }
+  // TODO(jbb): Figure out what to do if compressed_header_size > 16 bits.
+  assert(compressed_header_size <= 0xffff);
+  vpx_wb_write_literal(&saved_wb, compressed_header_size, 16);
+
+  *size = data - dst;
+}
diff --git a/av1/encoder/bitstream.h b/av1/encoder/bitstream.h
new file mode 100644
index 0000000..01d2c8d
--- /dev/null
+++ b/av1/encoder/bitstream.h
@@ -0,0 +1,39 @@
+/*
+ *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP10_ENCODER_BITSTREAM_H_
+#define VP10_ENCODER_BITSTREAM_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "av1/encoder/encoder.h"
+
+void vp10_pack_bitstream(VP10_COMP *const cpi, uint8_t *dest, size_t *size);
+
+void vp10_encode_token_init(void);
+
+static INLINE int vp10_preserve_existing_gf(VP10_COMP *cpi) {
+#if CONFIG_EXT_REFS
+  // Do not swap gf and arf indices for internal overlay frames
+  return !cpi->multi_arf_allowed && cpi->rc.is_src_frame_alt_ref &&
+         !cpi->rc.is_src_frame_ext_arf;
+#else
+  return !cpi->multi_arf_allowed && cpi->refresh_golden_frame &&
+         cpi->rc.is_src_frame_alt_ref;
+#endif  // CONFIG_EXT_REFS
+}
+
+#ifdef __cplusplus
+}  // extern "C"
+#endif
+
+#endif  // VP10_ENCODER_BITSTREAM_H_
diff --git a/av1/encoder/bitwriter.h b/av1/encoder/bitwriter.h
new file mode 100644
index 0000000..8cc674b
--- /dev/null
+++ b/av1/encoder/bitwriter.h
@@ -0,0 +1,35 @@
+/*
+ *  Copyright (c) 2016 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/* The purpose of this header is to provide compile time pluggable bit writer
+ * implementations with a common interface. */
+
+#ifndef VPX10_ENCODER_BITWRITER_H_
+#define VPX10_ENCODER_BITWRITER_H_
+
+#include "./vpx_config.h"
+#include "aom_dsp/prob.h"
+
+#if CONFIG_ANS
+typedef struct BufAnsCoder BufAnsCoder;
+#include "av1/encoder/buf_ans.h"
+#define vp10_writer BufAnsCoder
+#define vp10_write buf_uabs_write
+#define vp10_write_bit buf_uabs_write_bit
+#define vp10_write_literal buf_uabs_write_literal
+#else
+#include "aom_dsp/bitwriter.h"
+#define vp10_writer vpx_writer
+#define vp10_write vpx_write
+#define vp10_write_bit vpx_write_bit
+#define vp10_write_literal vpx_write_literal
+#endif
+
+#endif  // VPX10_ENCODER_BITWRITER_H_
diff --git a/av1/encoder/block.h b/av1/encoder/block.h
new file mode 100644
index 0000000..65bb1e2
--- /dev/null
+++ b/av1/encoder/block.h
@@ -0,0 +1,182 @@
+/*
+ *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP10_ENCODER_BLOCK_H_
+#define VP10_ENCODER_BLOCK_H_
+
+#include "av1/common/entropymv.h"
+#include "av1/common/entropy.h"
+#if CONFIG_REF_MV
+#include "av1/common/mvref_common.h"
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct {
+  unsigned int sse;
+  int sum;
+  unsigned int var;
+} diff;
+
+typedef struct macroblock_plane {
+  DECLARE_ALIGNED(16, int16_t, src_diff[MAX_SB_SQUARE]);
+  tran_low_t *qcoeff;
+  tran_low_t *coeff;
+  uint16_t *eobs;
+  struct buf_2d src;
+
+  // Quantizer setings
+  const int16_t *quant_fp;
+  const int16_t *round_fp;
+  const int16_t *quant;
+  const int16_t *quant_shift;
+  const int16_t *zbin;
+  const int16_t *round;
+#if CONFIG_NEW_QUANT
+  const cuml_bins_type_nuq *cuml_bins_nuq[QUANT_PROFILES];
+#endif  // CONFIG_NEW_QUANT
+
+  int64_t quant_thred[2];
+} MACROBLOCK_PLANE;
+
+/* The [2] dimension is for whether we skip the EOB node (i.e. if previous
+ * coefficient in this block was zero) or not. */
+typedef unsigned int vp10_coeff_cost[PLANE_TYPES][REF_TYPES][COEF_BANDS][2]
+                                    [COEFF_CONTEXTS][ENTROPY_TOKENS];
+
+typedef struct {
+  int_mv ref_mvs[MODE_CTX_REF_FRAMES][MAX_MV_REF_CANDIDATES];
+  int16_t mode_context[MODE_CTX_REF_FRAMES];
+#if CONFIG_REF_MV
+  uint8_t ref_mv_count[MODE_CTX_REF_FRAMES];
+  CANDIDATE_MV ref_mv_stack[MODE_CTX_REF_FRAMES][MAX_REF_MV_STACK_SIZE];
+#if CONFIG_EXT_INTER
+  int16_t compound_mode_context[MODE_CTX_REF_FRAMES];
+#endif  // CONFIG_EXT_INTER
+#endif
+} MB_MODE_INFO_EXT;
+
+typedef struct {
+  uint8_t best_palette_color_map[MAX_SB_SQUARE];
+  float kmeans_data_buf[2 * MAX_SB_SQUARE];
+} PALETTE_BUFFER;
+
+typedef struct macroblock MACROBLOCK;
+struct macroblock {
+  struct macroblock_plane plane[MAX_MB_PLANE];
+
+  MACROBLOCKD e_mbd;
+  MB_MODE_INFO_EXT *mbmi_ext;
+  int skip_block;
+  int select_tx_size;
+  int skip_optimize;
+  int q_index;
+
+  // The equivalent error at the current rdmult of one whole bit (not one
+  // bitcost unit).
+  int errorperbit;
+  // The equivalend SAD error of one (whole) bit at the current quantizer
+  // for large blocks.
+  int sadperbit16;
+  // The equivalend SAD error of one (whole) bit at the current quantizer
+  // for sub-8x8 blocks.
+  int sadperbit4;
+  int rddiv;
+  int rdmult;
+  int mb_energy;
+  int *m_search_count_ptr;
+  int *ex_search_count_ptr;
+
+  // These are set to their default values at the beginning, and then adjusted
+  // further in the encoding process.
+  BLOCK_SIZE min_partition_size;
+  BLOCK_SIZE max_partition_size;
+
+  int mv_best_ref_index[TOTAL_REFS_PER_FRAME];
+  unsigned int max_mv_context[TOTAL_REFS_PER_FRAME];
+  unsigned int source_variance;
+  unsigned int recon_variance;
+  unsigned int pred_sse[TOTAL_REFS_PER_FRAME];
+  int pred_mv_sad[TOTAL_REFS_PER_FRAME];
+
+#if CONFIG_REF_MV
+  int *nmvjointcost;
+  int nmv_vec_cost[NMV_CONTEXTS][MV_JOINTS];
+  int *nmvcost[NMV_CONTEXTS][2];
+  int *nmvcost_hp[NMV_CONTEXTS][2];
+  int **mv_cost_stack[NMV_CONTEXTS];
+  int *nmvjointsadcost;
+  int zero_rmv_cost[NMV_CONTEXTS][2];
+  int comp_rmv_cost[2];
+#else
+  int nmvjointcost[MV_JOINTS];
+  int *nmvcost[2];
+  int *nmvcost_hp[2];
+  int nmvjointsadcost[MV_JOINTS];
+#endif
+
+  int **mvcost;
+  int *nmvsadcost[2];
+  int *nmvsadcost_hp[2];
+  int **mvsadcost;
+
+  PALETTE_BUFFER *palette_buffer;
+
+  // These define limits to motion vector components to prevent them
+  // from extending outside the UMV borders
+  int mv_col_min;
+  int mv_col_max;
+  int mv_row_min;
+  int mv_row_max;
+
+#if CONFIG_VAR_TX
+  uint8_t blk_skip[MAX_MB_PLANE][MAX_MIB_SIZE * MAX_MIB_SIZE * 4];
+#if CONFIG_REF_MV
+  uint8_t blk_skip_drl[MAX_MB_PLANE][MAX_MIB_SIZE * MAX_MIB_SIZE * 4];
+#endif
+#endif
+
+  int skip;
+
+  int encode_breakout;
+
+  // note that token_costs is the cost when eob node is skipped
+  vp10_coeff_cost token_costs[TX_SIZES];
+
+  int optimize;
+
+  // indicate if it is in the rd search loop or encoding process
+  int use_lp32x32fdct;
+
+  // Used to store sub partition's choices.
+  MV pred_mv[TOTAL_REFS_PER_FRAME];
+
+  // Store the best motion vector during motion search
+  int_mv best_mv;
+  // Store the second best motion vector during full-pixel motion search
+  int_mv second_best_mv;
+
+  // Strong color activity detection. Used in RTC coding mode to enhance
+  // the visual quality at the boundary of moving color objects.
+  uint8_t color_sensitivity[2];
+
+  // use default transform and skip transform type search for intra modes
+  int use_default_intra_tx_type;
+  // use default transform and skip transform type search for inter modes
+  int use_default_inter_tx_type;
+};
+
+#ifdef __cplusplus
+}  // extern "C"
+#endif
+
+#endif  // VP10_ENCODER_BLOCK_H_
diff --git a/av1/encoder/blockiness.c b/av1/encoder/blockiness.c
new file mode 100644
index 0000000..97e201a
--- /dev/null
+++ b/av1/encoder/blockiness.c
@@ -0,0 +1,141 @@
+/*
+ *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "./vp10_rtcd.h"
+#include "./vpx_config.h"
+#include "./vpx_dsp_rtcd.h"
+#include "av1/common/common.h"
+#include "av1/common/filter.h"
+#include "aom/vpx_integer.h"
+#include "aom_dsp/vpx_convolve.h"
+#include "aom_dsp/vpx_filter.h"
+#include "aom_ports/mem.h"
+#include "aom_ports/system_state.h"
+
+static int horizontal_filter(const uint8_t *s) {
+  return (s[1] - s[-2]) * 2 + (s[-1] - s[0]) * 6;
+}
+
+static int vertical_filter(const uint8_t *s, int p) {
+  return (s[p] - s[-2 * p]) * 2 + (s[-p] - s[0]) * 6;
+}
+
+static int variance(int sum, int sum_squared, int size) {
+  return sum_squared / size - (sum / size) * (sum / size);
+}
+// Calculate a blockiness level for a vertical block edge.
+// This function returns a new blockiness metric that's defined as
+
+//              p0 p1 p2 p3
+//              q0 q1 q2 q3
+// block edge ->
+//              r0 r1 r2 r3
+//              s0 s1 s2 s3
+
+// blockiness =  p0*-2+q0*6+r0*-6+s0*2 +
+//               p1*-2+q1*6+r1*-6+s1*2 +
+//               p2*-2+q2*6+r2*-6+s2*2 +
+//               p3*-2+q3*6+r3*-6+s3*2 ;
+
+// reconstructed_blockiness = abs(blockiness from reconstructed buffer -
+//                                blockiness from source buffer,0)
+//
+// I make the assumption that flat blocks are much more visible than high
+// contrast blocks. As such, I scale the result of the blockiness calc
+// by dividing the blockiness by the variance of the pixels on either side
+// of the edge as follows:
+// var_0 = (q0^2+q1^2+q2^2+q3^2) - ((q0 + q1 + q2 + q3) / 4 )^2
+// var_1 = (r0^2+r1^2+r2^2+r3^2) - ((r0 + r1 + r2 + r3) / 4 )^2
+// The returned blockiness is the scaled value
+// Reconstructed blockiness / ( 1 + var_0 + var_1 ) ;
+static int blockiness_vertical(const uint8_t *s, int sp, const uint8_t *r,
+                               int rp, int size) {
+  int s_blockiness = 0;
+  int r_blockiness = 0;
+  int sum_0 = 0;
+  int sum_sq_0 = 0;
+  int sum_1 = 0;
+  int sum_sq_1 = 0;
+  int i;
+  int var_0;
+  int var_1;
+  for (i = 0; i < size; ++i, s += sp, r += rp) {
+    s_blockiness += horizontal_filter(s);
+    r_blockiness += horizontal_filter(r);
+    sum_0 += s[0];
+    sum_sq_0 += s[0] * s[0];
+    sum_1 += s[-1];
+    sum_sq_1 += s[-1] * s[-1];
+  }
+  var_0 = variance(sum_0, sum_sq_0, size);
+  var_1 = variance(sum_1, sum_sq_1, size);
+  r_blockiness = abs(r_blockiness);
+  s_blockiness = abs(s_blockiness);
+
+  if (r_blockiness > s_blockiness)
+    return (r_blockiness - s_blockiness) / (1 + var_0 + var_1);
+  else
+    return 0;
+}
+
+// Calculate a blockiness level for a horizontal block edge
+// same as above.
+static int blockiness_horizontal(const uint8_t *s, int sp, const uint8_t *r,
+                                 int rp, int size) {
+  int s_blockiness = 0;
+  int r_blockiness = 0;
+  int sum_0 = 0;
+  int sum_sq_0 = 0;
+  int sum_1 = 0;
+  int sum_sq_1 = 0;
+  int i;
+  int var_0;
+  int var_1;
+  for (i = 0; i < size; ++i, ++s, ++r) {
+    s_blockiness += vertical_filter(s, sp);
+    r_blockiness += vertical_filter(r, rp);
+    sum_0 += s[0];
+    sum_sq_0 += s[0] * s[0];
+    sum_1 += s[-sp];
+    sum_sq_1 += s[-sp] * s[-sp];
+  }
+  var_0 = variance(sum_0, sum_sq_0, size);
+  var_1 = variance(sum_1, sum_sq_1, size);
+  r_blockiness = abs(r_blockiness);
+  s_blockiness = abs(s_blockiness);
+
+  if (r_blockiness > s_blockiness)
+    return (r_blockiness - s_blockiness) / (1 + var_0 + var_1);
+  else
+    return 0;
+}
+
+// This function returns the blockiness for the entire frame currently by
+// looking at all borders in steps of 4.
+double vp10_get_blockiness(const unsigned char *img1, int img1_pitch,
+                           const unsigned char *img2, int img2_pitch, int width,
+                           int height) {
+  double blockiness = 0;
+  int i, j;
+  vpx_clear_system_state();
+  for (i = 0; i < height;
+       i += 4, img1 += img1_pitch * 4, img2 += img2_pitch * 4) {
+    for (j = 0; j < width; j += 4) {
+      if (i > 0 && i < height && j > 0 && j < width) {
+        blockiness +=
+            blockiness_vertical(img1 + j, img1_pitch, img2 + j, img2_pitch, 4);
+        blockiness += blockiness_horizontal(img1 + j, img1_pitch, img2 + j,
+                                            img2_pitch, 4);
+      }
+    }
+  }
+  blockiness /= width * height / 16;
+  return blockiness;
+}
diff --git a/av1/encoder/buf_ans.c b/av1/encoder/buf_ans.c
new file mode 100644
index 0000000..f87c1e1
--- /dev/null
+++ b/av1/encoder/buf_ans.c
@@ -0,0 +1,41 @@
+/*
+ *  Copyright (c) 2016 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <string.h>
+
+#include "av1/common/common.h"
+#include "av1/encoder/buf_ans.h"
+#include "av1/encoder/encoder.h"
+#include "aom_mem/vpx_mem.h"
+
+void vp10_buf_ans_alloc(struct BufAnsCoder *c, struct VP10Common *cm,
+                        int size_hint) {
+  c->cm = cm;
+  c->size = size_hint;
+  CHECK_MEM_ERROR(cm, c->buf, vpx_malloc(c->size * sizeof(*c->buf)));
+  // Initialize to overfull to trigger the assert in write.
+  c->offset = c->size + 1;
+}
+
+void vp10_buf_ans_free(struct BufAnsCoder *c) {
+  vpx_free(c->buf);
+  c->buf = NULL;
+  c->size = 0;
+}
+
+void vp10_buf_ans_grow(struct BufAnsCoder *c) {
+  struct buffered_ans_symbol *new_buf = NULL;
+  int new_size = c->size * 2;
+  CHECK_MEM_ERROR(c->cm, new_buf, vpx_malloc(new_size * sizeof(*new_buf)));
+  memcpy(new_buf, c->buf, c->size * sizeof(*c->buf));
+  vpx_free(c->buf);
+  c->buf = new_buf;
+  c->size = new_size;
+}
diff --git a/av1/encoder/buf_ans.h b/av1/encoder/buf_ans.h
new file mode 100644
index 0000000..8a88c32
--- /dev/null
+++ b/av1/encoder/buf_ans.h
@@ -0,0 +1,109 @@
+/*
+ *  Copyright (c) 2016 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP10_ENCODER_BUF_ANS_H_
+#define VP10_ENCODER_BUF_ANS_H_
+// Buffered forward ANS writer.
+// Symbols are written to the writer in forward (decode) order and serialzed
+// backwards due to ANS's stack like behavior.
+
+#include <assert.h>
+#include "./vpx_config.h"
+#include "aom/vpx_integer.h"
+#include "av1/common/ans.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif  // __cplusplus
+
+#define ANS_METHOD_UABS 0
+#define ANS_METHOD_RANS 1
+
+struct buffered_ans_symbol {
+  uint8_t method;  // one of ANS_METHOD_UABS or ANS_METHOD_RANS
+  // TODO(aconverse): Should be possible to write this interms of start for ABS
+  AnsP10 val_start;  // Boolean value for ABS, start in symbol cycle for Rans
+  AnsP10 prob;       // Probability of this symbol
+};
+
+struct BufAnsCoder {
+  struct VP10Common *cm;
+  struct buffered_ans_symbol *buf;
+  int size;
+  int offset;
+};
+
+void vp10_buf_ans_alloc(struct BufAnsCoder *c, struct VP10Common *cm,
+                        int size_hint);
+
+void vp10_buf_ans_free(struct BufAnsCoder *c);
+
+void vp10_buf_ans_grow(struct BufAnsCoder *c);
+
+static INLINE void buf_ans_write_reset(struct BufAnsCoder *const c) {
+  c->offset = 0;
+}
+
+static INLINE void buf_uabs_write(struct BufAnsCoder *const c, uint8_t val,
+                                  AnsP8 prob) {
+  assert(c->offset <= c->size);
+  if (c->offset == c->size) {
+    vp10_buf_ans_grow(c);
+  }
+  c->buf[c->offset].method = ANS_METHOD_UABS;
+  c->buf[c->offset].val_start = val;
+  c->buf[c->offset].prob = prob;
+  ++c->offset;
+}
+
+static INLINE void buf_rans_write(struct BufAnsCoder *const c,
+                                  const struct rans_sym *const sym) {
+  assert(c->offset <= c->size);
+  if (c->offset == c->size) {
+    vp10_buf_ans_grow(c);
+  }
+  c->buf[c->offset].method = ANS_METHOD_RANS;
+  c->buf[c->offset].val_start = sym->cum_prob;
+  c->buf[c->offset].prob = sym->prob;
+  ++c->offset;
+}
+
+static INLINE void buf_ans_flush(const struct BufAnsCoder *const c,
+                                 struct AnsCoder *ans) {
+  int offset;
+  for (offset = c->offset - 1; offset >= 0; --offset) {
+    if (c->buf[offset].method == ANS_METHOD_RANS) {
+      struct rans_sym sym;
+      sym.prob = c->buf[offset].prob;
+      sym.cum_prob = c->buf[offset].val_start;
+      rans_write(ans, &sym);
+    } else {
+      uabs_write(ans, (uint8_t)c->buf[offset].val_start,
+                 (AnsP8)c->buf[offset].prob);
+    }
+  }
+}
+
+static INLINE void buf_uabs_write_bit(struct BufAnsCoder *c, int bit) {
+  buf_uabs_write(c, bit, 128);
+}
+
+static INLINE void buf_uabs_write_literal(struct BufAnsCoder *c, int literal,
+                                          int bits) {
+  int bit;
+
+  assert(bits < 31);
+  for (bit = bits - 1; bit >= 0; bit--)
+    buf_uabs_write_bit(c, 1 & (literal >> bit));
+}
+#ifdef __cplusplus
+}  // extern "C"
+#endif  // __cplusplus
+#endif  // VP10_ENCODER_BUF_ANS_H_
diff --git a/av1/encoder/context_tree.c b/av1/encoder/context_tree.c
new file mode 100644
index 0000000..9346e1c
--- /dev/null
+++ b/av1/encoder/context_tree.c
@@ -0,0 +1,271 @@
+/*
+ *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "av1/encoder/context_tree.h"
+#include "av1/encoder/encoder.h"
+
+static const BLOCK_SIZE square[MAX_SB_SIZE_LOG2 - 2] = {
+  BLOCK_8X8,     BLOCK_16X16, BLOCK_32X32, BLOCK_64X64,
+#if CONFIG_EXT_PARTITION
+  BLOCK_128X128,
+#endif  // CONFIG_EXT_PARTITION
+};
+
+static void alloc_mode_context(VP10_COMMON *cm, int num_4x4_blk,
+#if CONFIG_EXT_PARTITION_TYPES
+                               PARTITION_TYPE partition,
+#endif
+                               PICK_MODE_CONTEXT *ctx) {
+  const int num_blk = (num_4x4_blk < 4 ? 4 : num_4x4_blk);
+  const int num_pix = num_blk << 4;
+  int i, k;
+  ctx->num_4x4_blk = num_blk;
+#if CONFIG_EXT_PARTITION_TYPES
+  ctx->partition = partition;
+#endif
+
+  for (i = 0; i < MAX_MB_PLANE; ++i) {
+#if CONFIG_VAR_TX
+    CHECK_MEM_ERROR(cm, ctx->blk_skip[i], vpx_calloc(num_blk, sizeof(uint8_t)));
+#endif
+    for (k = 0; k < 3; ++k) {
+      CHECK_MEM_ERROR(cm, ctx->coeff[i][k],
+                      vpx_memalign(32, num_pix * sizeof(*ctx->coeff[i][k])));
+      CHECK_MEM_ERROR(cm, ctx->qcoeff[i][k],
+                      vpx_memalign(32, num_pix * sizeof(*ctx->qcoeff[i][k])));
+      CHECK_MEM_ERROR(cm, ctx->dqcoeff[i][k],
+                      vpx_memalign(32, num_pix * sizeof(*ctx->dqcoeff[i][k])));
+      CHECK_MEM_ERROR(cm, ctx->eobs[i][k],
+                      vpx_memalign(32, num_blk * sizeof(*ctx->eobs[i][k])));
+    }
+  }
+
+  if (cm->allow_screen_content_tools) {
+    for (i = 0; i < 2; ++i) {
+      CHECK_MEM_ERROR(
+          cm, ctx->color_index_map[i],
+          vpx_memalign(32, num_pix * sizeof(*ctx->color_index_map[i])));
+    }
+  }
+}
+
+static void free_mode_context(PICK_MODE_CONTEXT *ctx) {
+  int i, k;
+  for (i = 0; i < MAX_MB_PLANE; ++i) {
+#if CONFIG_VAR_TX
+    vpx_free(ctx->blk_skip[i]);
+    ctx->blk_skip[i] = 0;
+#endif
+    for (k = 0; k < 3; ++k) {
+      vpx_free(ctx->coeff[i][k]);
+      ctx->coeff[i][k] = 0;
+      vpx_free(ctx->qcoeff[i][k]);
+      ctx->qcoeff[i][k] = 0;
+      vpx_free(ctx->dqcoeff[i][k]);
+      ctx->dqcoeff[i][k] = 0;
+      vpx_free(ctx->eobs[i][k]);
+      ctx->eobs[i][k] = 0;
+    }
+  }
+
+  for (i = 0; i < 2; ++i) {
+    vpx_free(ctx->color_index_map[i]);
+    ctx->color_index_map[i] = 0;
+  }
+}
+
+static void alloc_tree_contexts(VP10_COMMON *cm, PC_TREE *tree,
+                                int num_4x4_blk) {
+#if CONFIG_EXT_PARTITION_TYPES
+  alloc_mode_context(cm, num_4x4_blk, PARTITION_NONE, &tree->none);
+  alloc_mode_context(cm, num_4x4_blk / 2, PARTITION_HORZ, &tree->horizontal[0]);
+  alloc_mode_context(cm, num_4x4_blk / 2, PARTITION_VERT, &tree->vertical[0]);
+  alloc_mode_context(cm, num_4x4_blk / 2, PARTITION_VERT, &tree->horizontal[1]);
+  alloc_mode_context(cm, num_4x4_blk / 2, PARTITION_VERT, &tree->vertical[1]);
+
+  alloc_mode_context(cm, num_4x4_blk / 4, PARTITION_HORZ_A,
+                     &tree->horizontala[0]);
+  alloc_mode_context(cm, num_4x4_blk / 4, PARTITION_HORZ_A,
+                     &tree->horizontala[1]);
+  alloc_mode_context(cm, num_4x4_blk / 2, PARTITION_HORZ_A,
+                     &tree->horizontala[2]);
+  alloc_mode_context(cm, num_4x4_blk / 2, PARTITION_HORZ_B,
+                     &tree->horizontalb[0]);
+  alloc_mode_context(cm, num_4x4_blk / 4, PARTITION_HORZ_B,
+                     &tree->horizontalb[1]);
+  alloc_mode_context(cm, num_4x4_blk / 4, PARTITION_HORZ_B,
+                     &tree->horizontalb[2]);
+  alloc_mode_context(cm, num_4x4_blk / 4, PARTITION_VERT_A,
+                     &tree->verticala[0]);
+  alloc_mode_context(cm, num_4x4_blk / 4, PARTITION_VERT_A,
+                     &tree->verticala[1]);
+  alloc_mode_context(cm, num_4x4_blk / 2, PARTITION_VERT_A,
+                     &tree->verticala[2]);
+  alloc_mode_context(cm, num_4x4_blk / 2, PARTITION_VERT_B,
+                     &tree->verticalb[0]);
+  alloc_mode_context(cm, num_4x4_blk / 4, PARTITION_VERT_B,
+                     &tree->verticalb[1]);
+  alloc_mode_context(cm, num_4x4_blk / 4, PARTITION_VERT_B,
+                     &tree->verticalb[2]);
+#ifdef CONFIG_SUPERTX
+  alloc_mode_context(cm, num_4x4_blk, PARTITION_HORZ,
+                     &tree->horizontal_supertx);
+  alloc_mode_context(cm, num_4x4_blk, PARTITION_VERT, &tree->vertical_supertx);
+  alloc_mode_context(cm, num_4x4_blk, PARTITION_SPLIT, &tree->split_supertx);
+  alloc_mode_context(cm, num_4x4_blk, PARTITION_HORZ_A,
+                     &tree->horizontala_supertx);
+  alloc_mode_context(cm, num_4x4_blk, PARTITION_HORZ_B,
+                     &tree->horizontalb_supertx);
+  alloc_mode_context(cm, num_4x4_blk, PARTITION_VERT_A,
+                     &tree->verticala_supertx);
+  alloc_mode_context(cm, num_4x4_blk, PARTITION_VERT_B,
+                     &tree->verticalb_supertx);
+#endif  // CONFIG_SUPERTX
+#else
+  alloc_mode_context(cm, num_4x4_blk, &tree->none);
+  alloc_mode_context(cm, num_4x4_blk / 2, &tree->horizontal[0]);
+  alloc_mode_context(cm, num_4x4_blk / 2, &tree->vertical[0]);
+#ifdef CONFIG_SUPERTX
+  alloc_mode_context(cm, num_4x4_blk, &tree->horizontal_supertx);
+  alloc_mode_context(cm, num_4x4_blk, &tree->vertical_supertx);
+  alloc_mode_context(cm, num_4x4_blk, &tree->split_supertx);
+#endif
+
+  if (num_4x4_blk > 4) {
+    alloc_mode_context(cm, num_4x4_blk / 2, &tree->horizontal[1]);
+    alloc_mode_context(cm, num_4x4_blk / 2, &tree->vertical[1]);
+  } else {
+    memset(&tree->horizontal[1], 0, sizeof(tree->horizontal[1]));
+    memset(&tree->vertical[1], 0, sizeof(tree->vertical[1]));
+  }
+#endif  // CONFIG_EXT_PARTITION_TYPES
+}
+
+static void free_tree_contexts(PC_TREE *tree) {
+#if CONFIG_EXT_PARTITION_TYPES
+  int i;
+  for (i = 0; i < 3; i++) {
+    free_mode_context(&tree->horizontala[i]);
+    free_mode_context(&tree->horizontalb[i]);
+    free_mode_context(&tree->verticala[i]);
+    free_mode_context(&tree->verticalb[i]);
+  }
+#endif  // CONFIG_EXT_PARTITION_TYPES
+  free_mode_context(&tree->none);
+  free_mode_context(&tree->horizontal[0]);
+  free_mode_context(&tree->horizontal[1]);
+  free_mode_context(&tree->vertical[0]);
+  free_mode_context(&tree->vertical[1]);
+#ifdef CONFIG_SUPERTX
+  free_mode_context(&tree->horizontal_supertx);
+  free_mode_context(&tree->vertical_supertx);
+  free_mode_context(&tree->split_supertx);
+#if CONFIG_EXT_PARTITION_TYPES
+  free_mode_context(&tree->horizontala_supertx);
+  free_mode_context(&tree->horizontalb_supertx);
+  free_mode_context(&tree->verticala_supertx);
+  free_mode_context(&tree->verticalb_supertx);
+#endif  // CONFIG_EXT_PARTITION_TYPES
+#endif  // CONFIG_SUPERTX
+}
+
+// This function sets up a tree of contexts such that at each square
+// partition level. There are contexts for none, horizontal, vertical, and
+// split.  Along with a block_size value and a selected block_size which
+// represents the state of our search.
+void vp10_setup_pc_tree(VP10_COMMON *cm, ThreadData *td) {
+  int i, j;
+#if CONFIG_EXT_PARTITION
+  const int leaf_nodes = 256;
+  const int tree_nodes = 256 + 64 + 16 + 4 + 1;
+#else
+  const int leaf_nodes = 64;
+  const int tree_nodes = 64 + 16 + 4 + 1;
+#endif  // CONFIG_EXT_PARTITION
+  int pc_tree_index = 0;
+  PC_TREE *this_pc;
+  PICK_MODE_CONTEXT *this_leaf;
+  int square_index = 1;
+  int nodes;
+
+  vpx_free(td->leaf_tree);
+  CHECK_MEM_ERROR(cm, td->leaf_tree,
+                  vpx_calloc(leaf_nodes, sizeof(*td->leaf_tree)));
+  vpx_free(td->pc_tree);
+  CHECK_MEM_ERROR(cm, td->pc_tree,
+                  vpx_calloc(tree_nodes, sizeof(*td->pc_tree)));
+
+  this_pc = &td->pc_tree[0];
+  this_leaf = &td->leaf_tree[0];
+
+  // 4x4 blocks smaller than 8x8 but in the same 8x8 block share the same
+  // context so we only need to allocate 1 for each 8x8 block.
+  for (i = 0; i < leaf_nodes; ++i) {
+#if CONFIG_EXT_PARTITION_TYPES
+    alloc_mode_context(cm, 1, PARTITION_NONE, &td->leaf_tree[i]);
+#else
+    alloc_mode_context(cm, 1, &td->leaf_tree[i]);
+#endif
+  }
+
+  // Sets up all the leaf nodes in the tree.
+  for (pc_tree_index = 0; pc_tree_index < leaf_nodes; ++pc_tree_index) {
+    PC_TREE *const tree = &td->pc_tree[pc_tree_index];
+    tree->block_size = square[0];
+    alloc_tree_contexts(cm, tree, 4);
+    tree->leaf_split[0] = this_leaf++;
+    for (j = 1; j < 4; j++) tree->leaf_split[j] = tree->leaf_split[0];
+  }
+
+  // Each node has 4 leaf nodes, fill each block_size level of the tree
+  // from leafs to the root.
+  for (nodes = leaf_nodes >> 2; nodes > 0; nodes >>= 2) {
+    for (i = 0; i < nodes; ++i) {
+      PC_TREE *const tree = &td->pc_tree[pc_tree_index];
+      alloc_tree_contexts(cm, tree, 4 << (2 * square_index));
+      tree->block_size = square[square_index];
+      for (j = 0; j < 4; j++) tree->split[j] = this_pc++;
+      ++pc_tree_index;
+    }
+    ++square_index;
+  }
+
+  // Set up the root node for the largest superblock size
+  i = MAX_MIB_SIZE_LOG2 - MIN_MIB_SIZE_LOG2;
+  td->pc_root[i] = &td->pc_tree[tree_nodes - 1];
+  td->pc_root[i]->none.best_mode_index = 2;
+  // Set up the root nodes for the rest of the possible superblock sizes
+  while (--i >= 0) {
+    td->pc_root[i] = td->pc_root[i + 1]->split[0];
+    td->pc_root[i]->none.best_mode_index = 2;
+  }
+}
+
+void vp10_free_pc_tree(ThreadData *td) {
+#if CONFIG_EXT_PARTITION
+  const int leaf_nodes = 256;
+  const int tree_nodes = 256 + 64 + 16 + 4 + 1;
+#else
+  const int leaf_nodes = 64;
+  const int tree_nodes = 64 + 16 + 4 + 1;
+#endif  // CONFIG_EXT_PARTITION
+  int i;
+
+  // Set up all 4x4 mode contexts
+  for (i = 0; i < leaf_nodes; ++i) free_mode_context(&td->leaf_tree[i]);
+
+  // Sets up all the leaf nodes in the tree.
+  for (i = 0; i < tree_nodes; ++i) free_tree_contexts(&td->pc_tree[i]);
+
+  vpx_free(td->pc_tree);
+  td->pc_tree = NULL;
+  vpx_free(td->leaf_tree);
+  td->leaf_tree = NULL;
+}
diff --git a/av1/encoder/context_tree.h b/av1/encoder/context_tree.h
new file mode 100644
index 0000000..18f00bb
--- /dev/null
+++ b/av1/encoder/context_tree.h
@@ -0,0 +1,103 @@
+/*
+ *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP10_ENCODER_CONTEXT_TREE_H_
+#define VP10_ENCODER_CONTEXT_TREE_H_
+
+#include "av1/common/blockd.h"
+#include "av1/encoder/block.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct VP10_COMP;
+struct VP10Common;
+struct ThreadData;
+
+// Structure to hold snapshot of coding context during the mode picking process
+typedef struct {
+  MODE_INFO mic;
+  MB_MODE_INFO_EXT mbmi_ext;
+  uint8_t *color_index_map[2];
+#if CONFIG_VAR_TX
+  uint8_t *blk_skip[MAX_MB_PLANE];
+#endif
+
+  // dual buffer pointers, 0: in use, 1: best in store
+  tran_low_t *coeff[MAX_MB_PLANE][3];
+  tran_low_t *qcoeff[MAX_MB_PLANE][3];
+  tran_low_t *dqcoeff[MAX_MB_PLANE][3];
+  uint16_t *eobs[MAX_MB_PLANE][3];
+
+  int is_coded;
+  int num_4x4_blk;
+  int skip;
+  int pred_pixel_ready;
+  // For current partition, only if all Y, U, and V transform blocks'
+  // coefficients are quantized to 0, skippable is set to 0.
+  int skippable;
+  int best_mode_index;
+  int hybrid_pred_diff;
+  int comp_pred_diff;
+  int single_pred_diff;
+
+  // TODO(jingning) Use RD_COST struct here instead. This involves a boarder
+  // scope of refactoring.
+  int rate;
+  int64_t dist;
+
+  // motion vector cache for adaptive motion search control in partition
+  // search loop
+  MV pred_mv[TOTAL_REFS_PER_FRAME];
+  INTERP_FILTER pred_interp_filter;
+#if CONFIG_EXT_PARTITION_TYPES
+  PARTITION_TYPE partition;
+#endif
+} PICK_MODE_CONTEXT;
+
+typedef struct PC_TREE {
+  int index;
+  PARTITION_TYPE partitioning;
+  BLOCK_SIZE block_size;
+  PICK_MODE_CONTEXT none;
+  PICK_MODE_CONTEXT horizontal[2];
+  PICK_MODE_CONTEXT vertical[2];
+#if CONFIG_EXT_PARTITION_TYPES
+  PICK_MODE_CONTEXT horizontala[3];
+  PICK_MODE_CONTEXT horizontalb[3];
+  PICK_MODE_CONTEXT verticala[3];
+  PICK_MODE_CONTEXT verticalb[3];
+#endif
+  union {
+    struct PC_TREE *split[4];
+    PICK_MODE_CONTEXT *leaf_split[4];
+  };
+#ifdef CONFIG_SUPERTX
+  PICK_MODE_CONTEXT horizontal_supertx;
+  PICK_MODE_CONTEXT vertical_supertx;
+  PICK_MODE_CONTEXT split_supertx;
+#if CONFIG_EXT_PARTITION_TYPES
+  PICK_MODE_CONTEXT horizontala_supertx;
+  PICK_MODE_CONTEXT horizontalb_supertx;
+  PICK_MODE_CONTEXT verticala_supertx;
+  PICK_MODE_CONTEXT verticalb_supertx;
+#endif
+#endif
+} PC_TREE;
+
+void vp10_setup_pc_tree(struct VP10Common *cm, struct ThreadData *td);
+void vp10_free_pc_tree(struct ThreadData *td);
+
+#ifdef __cplusplus
+}  // extern "C"
+#endif
+
+#endif /* VP10_ENCODER_CONTEXT_TREE_H_ */
diff --git a/av1/encoder/corner_detect.c b/av1/encoder/corner_detect.c
new file mode 100644
index 0000000..2b2d82d
--- /dev/null
+++ b/av1/encoder/corner_detect.c
@@ -0,0 +1,35 @@
+/*
+ *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <memory.h>
+#include <math.h>
+#include <assert.h>
+
+#include "vp10/encoder/corner_detect.h"
+#include "third_party/fastfeat/fast.h"
+
+// Fast_9 wrapper
+#define FAST_BARRIER 40
+int FastCornerDetect(unsigned char *buf, int width, int height, int stride,
+                     int *points, int max_points) {
+  int num_points;
+  xy *frm_corners_xy = fast9_detect_nonmax(buf, width, height, stride,
+                                           FAST_BARRIER, &num_points);
+  num_points = (num_points <= max_points ? num_points : max_points);
+  if (num_points > 0 && frm_corners_xy) {
+    memcpy(points, frm_corners_xy, sizeof(xy) * num_points);
+    free(frm_corners_xy);
+    return num_points;
+  } else {
+    return 0;
+  }
+}
diff --git a/av1/encoder/corner_detect.h b/av1/encoder/corner_detect.h
new file mode 100644
index 0000000..8db713e
--- /dev/null
+++ b/av1/encoder/corner_detect.h
@@ -0,0 +1,21 @@
+/*
+ *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP10_ENCODER_CORNER_DETECT_H_
+#define VP10_ENCODER_CORNER_DETECT_H_
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <memory.h>
+
+int FastCornerDetect(unsigned char *buf, int width, int height, int stride,
+                     int *points, int max_points);
+
+#endif  // VP10_ENCODER_CORNER_DETECT_H
diff --git a/av1/encoder/corner_match.c b/av1/encoder/corner_match.c
new file mode 100644
index 0000000..6b19d5b
--- /dev/null
+++ b/av1/encoder/corner_match.c
@@ -0,0 +1,210 @@
+/*
+ *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <memory.h>
+#include <math.h>
+
+#include "vp10/encoder/corner_match.h"
+
+#define MATCH_SZ 15
+#define MATCH_SZ_BY2 ((MATCH_SZ - 1) / 2)
+#define MATCH_SZ_SQ (MATCH_SZ * MATCH_SZ)
+#define SEARCH_SZ 9
+#define SEARCH_SZ_BY2 ((SEARCH_SZ - 1) / 2)
+
+#define THRESHOLD_NCC 0.80
+
+static double compute_variance(unsigned char *im, int stride, int x, int y,
+                               double *mean) {
+  double sum = 0.0;
+  double sumsq = 0.0;
+  double var;
+  int i, j;
+  for (i = 0; i < MATCH_SZ; ++i)
+    for (j = 0; j < MATCH_SZ; ++j) {
+      sum += im[(i + y - MATCH_SZ_BY2) * stride + (j + x - MATCH_SZ_BY2)];
+      sumsq += im[(i + y - MATCH_SZ_BY2) * stride + (j + x - MATCH_SZ_BY2)] *
+               im[(i + y - MATCH_SZ_BY2) * stride + (j + x - MATCH_SZ_BY2)];
+    }
+  var = (sumsq * MATCH_SZ_SQ - sum * sum) / (MATCH_SZ_SQ * MATCH_SZ_SQ);
+  if (mean) *mean = sum / MATCH_SZ_SQ;
+  return var;
+}
+
+static double compute_cross_correlation(unsigned char *im1, int stride1, int x1,
+                                        int y1, unsigned char *im2, int stride2,
+                                        int x2, int y2) {
+  double sum1 = 0;
+  double sum2 = 0;
+  double cross = 0;
+  double corr;
+  int i, j;
+  for (i = 0; i < MATCH_SZ; ++i)
+    for (j = 0; j < MATCH_SZ; ++j) {
+      sum1 += im1[(i + y1 - MATCH_SZ_BY2) * stride1 + (j + x1 - MATCH_SZ_BY2)];
+      sum2 += im2[(i + y2 - MATCH_SZ_BY2) * stride2 + (j + x2 - MATCH_SZ_BY2)];
+      cross +=
+          im1[(i + y1 - MATCH_SZ_BY2) * stride1 + (j + x1 - MATCH_SZ_BY2)] *
+          im2[(i + y2 - MATCH_SZ_BY2) * stride2 + (j + x2 - MATCH_SZ_BY2)];
+    }
+  corr = (cross * MATCH_SZ_SQ - sum1 * sum2) / (MATCH_SZ_SQ * MATCH_SZ_SQ);
+  return corr;
+}
+
+static int is_eligible_point(double pointx, double pointy, int width,
+                             int height) {
+  return (pointx >= MATCH_SZ_BY2 && pointy >= MATCH_SZ_BY2 &&
+          pointx + MATCH_SZ_BY2 < width && pointy + MATCH_SZ_BY2 < height);
+}
+
+static int is_eligible_distance(double point1x, double point1y, double point2x,
+                                double point2y, int width, int height) {
+  const int thresh = (width < height ? height : width) >> 4;
+  return ((point1x - point2x) * (point1x - point2x) +
+          (point1y - point2y) * (point1y - point2y)) <= thresh * thresh;
+}
+
+static void improve_correspondence(unsigned char *frm, unsigned char *ref,
+                                   int width, int height, int frm_stride,
+                                   int ref_stride,
+                                   correspondence *correspondences,
+                                   int num_correspondences) {
+  int i;
+  for (i = 0; i < num_correspondences; ++i) {
+    double template_norm =
+        compute_variance(frm, frm_stride, (int)correspondences[i].x,
+                         (int)correspondences[i].y, NULL);
+    int x, y, best_x = 0, best_y = 0;
+    double best_match_ncc = 0.0;
+    for (y = -SEARCH_SZ_BY2; y <= SEARCH_SZ_BY2; ++y) {
+      for (x = -SEARCH_SZ_BY2; x <= SEARCH_SZ_BY2; ++x) {
+        double match_ncc;
+        double subimage_norm;
+        if (!is_eligible_point((int)correspondences[i].rx + x,
+                               (int)correspondences[i].ry + y, width, height))
+          continue;
+        if (!is_eligible_distance(
+                (int)correspondences[i].x, (int)correspondences[i].y,
+                (int)correspondences[i].rx + x, (int)correspondences[i].ry + y,
+                width, height))
+          continue;
+        subimage_norm =
+            compute_variance(ref, ref_stride, (int)correspondences[i].rx + x,
+                             (int)correspondences[i].ry + y, NULL);
+        match_ncc = compute_cross_correlation(
+                        frm, frm_stride, (int)correspondences[i].x,
+                        (int)correspondences[i].y, ref, ref_stride,
+                        (int)correspondences[i].rx + x,
+                        (int)correspondences[i].ry + y) /
+                    sqrt(template_norm * subimage_norm);
+        if (match_ncc > best_match_ncc) {
+          best_match_ncc = match_ncc;
+          best_y = y;
+          best_x = x;
+        }
+      }
+    }
+    correspondences[i].rx += (double)best_x;
+    correspondences[i].ry += (double)best_y;
+  }
+  for (i = 0; i < num_correspondences; ++i) {
+    double template_norm =
+        compute_variance(ref, ref_stride, (int)correspondences[i].rx,
+                         (int)correspondences[i].ry, NULL);
+    int x, y, best_x = 0, best_y = 0;
+    double best_match_ncc = 0.0;
+    for (y = -SEARCH_SZ_BY2; y <= SEARCH_SZ_BY2; ++y)
+      for (x = -SEARCH_SZ_BY2; x <= SEARCH_SZ_BY2; ++x) {
+        double match_ncc;
+        double subimage_norm;
+        if (!is_eligible_point((int)correspondences[i].x + x,
+                               (int)correspondences[i].y + y, width, height))
+          continue;
+        if (!is_eligible_distance((int)correspondences[i].x + x,
+                                  (int)correspondences[i].y + y,
+                                  (int)correspondences[i].rx,
+                                  (int)correspondences[i].ry, width, height))
+          continue;
+        subimage_norm =
+            compute_variance(frm, frm_stride, (int)correspondences[i].x + x,
+                             (int)correspondences[i].y + y, NULL);
+        match_ncc =
+            compute_cross_correlation(
+                frm, frm_stride, (int)correspondences[i].x + x,
+                (int)correspondences[i].y + y, ref, ref_stride,
+                (int)correspondences[i].rx, (int)correspondences[i].ry) /
+            sqrt(template_norm * subimage_norm);
+        if (match_ncc > best_match_ncc) {
+          best_match_ncc = match_ncc;
+          best_y = y;
+          best_x = x;
+        }
+      }
+    correspondences[i].x += best_x;
+    correspondences[i].y += best_y;
+  }
+}
+
+int determine_correspondence(unsigned char *frm, int *frm_corners,
+                             int num_frm_corners, unsigned char *ref,
+                             int *ref_corners, int num_ref_corners, int width,
+                             int height, int frm_stride, int ref_stride,
+                             double *correspondence_pts) {
+  // TODO(sarahparker) Improve this to include 2-way match
+  int i, j;
+  correspondence *correspondences = (correspondence *)correspondence_pts;
+  int num_correspondences = 0;
+  for (i = 0; i < num_frm_corners; ++i) {
+    double best_match_ncc = 0.0;
+    double template_norm;
+    int best_match_j = -1;
+    if (!is_eligible_point(frm_corners[2 * i], frm_corners[2 * i + 1], width,
+                           height))
+      continue;
+    template_norm = compute_variance(frm, frm_stride, frm_corners[2 * i],
+                                     frm_corners[2 * i + 1], NULL);
+    for (j = 0; j < num_ref_corners; ++j) {
+      double match_ncc;
+      double subimage_norm;
+      if (!is_eligible_point(ref_corners[2 * j], ref_corners[2 * j + 1], width,
+                             height))
+        continue;
+      if (!is_eligible_distance(frm_corners[2 * i], frm_corners[2 * i + 1],
+                                ref_corners[2 * j], ref_corners[2 * j + 1],
+                                width, height))
+        continue;
+      subimage_norm = compute_variance(ref, ref_stride, ref_corners[2 * j],
+                                       ref_corners[2 * j + 1], NULL);
+      match_ncc = compute_cross_correlation(frm, frm_stride, frm_corners[2 * i],
+                                            frm_corners[2 * i + 1], ref,
+                                            ref_stride, ref_corners[2 * j],
+                                            ref_corners[2 * j + 1]) /
+                  sqrt(template_norm * subimage_norm);
+      if (match_ncc > best_match_ncc) {
+        best_match_ncc = match_ncc;
+        best_match_j = j;
+      }
+    }
+    if (best_match_ncc > THRESHOLD_NCC) {
+      correspondences[num_correspondences].x = (double)frm_corners[2 * i];
+      correspondences[num_correspondences].y = (double)frm_corners[2 * i + 1];
+      correspondences[num_correspondences].rx =
+          (double)ref_corners[2 * best_match_j];
+      correspondences[num_correspondences].ry =
+          (double)ref_corners[2 * best_match_j + 1];
+      num_correspondences++;
+    }
+  }
+  improve_correspondence(frm, ref, width, height, frm_stride, ref_stride,
+                         correspondences, num_correspondences);
+  return num_correspondences;
+}
diff --git a/av1/encoder/corner_match.h b/av1/encoder/corner_match.h
new file mode 100644
index 0000000..3bc8cb9
--- /dev/null
+++ b/av1/encoder/corner_match.h
@@ -0,0 +1,29 @@
+/*
+ *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP10_ENCODER_CORNER_MATCH_H_
+#define VP10_ENCODER_CORNER_MATCH_H_
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <memory.h>
+
+typedef struct {
+  double x, y;
+  double rx, ry;
+} correspondence;
+
+int determine_correspondence(unsigned char *frm, int *frm_corners,
+                             int num_frm_corners, unsigned char *ref,
+                             int *ref_corners, int num_ref_corners, int width,
+                             int height, int frm_stride, int ref_stride,
+                             double *correspondence_pts);
+
+#endif  // VP10_ENCODER_CORNER_MATCH_H
diff --git a/av1/encoder/cost.c b/av1/encoder/cost.c
new file mode 100644
index 0000000..4542638
--- /dev/null
+++ b/av1/encoder/cost.c
@@ -0,0 +1,168 @@
+/*
+ *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+#include <assert.h>
+
+#include "av1/encoder/cost.h"
+#if CONFIG_ANS
+#include "av1/common/ans.h"
+#endif  // CONFIG_ANS
+#include "av1/common/entropy.h"
+
+/* round(-log2(i/256.) * (1 << VP10_PROB_COST_SHIFT))
+   Begins with a bogus entry for simpler addressing. */
+const uint16_t vp10_prob_cost[256] = {
+  4096, 4096, 3584, 3284, 3072, 2907, 2772, 2659, 2560, 2473, 2395, 2325, 2260,
+  2201, 2147, 2096, 2048, 2003, 1961, 1921, 1883, 1847, 1813, 1780, 1748, 1718,
+  1689, 1661, 1635, 1609, 1584, 1559, 1536, 1513, 1491, 1470, 1449, 1429, 1409,
+  1390, 1371, 1353, 1335, 1318, 1301, 1284, 1268, 1252, 1236, 1221, 1206, 1192,
+  1177, 1163, 1149, 1136, 1123, 1110, 1097, 1084, 1072, 1059, 1047, 1036, 1024,
+  1013, 1001, 990,  979,  968,  958,  947,  937,  927,  917,  907,  897,  887,
+  878,  868,  859,  850,  841,  832,  823,  814,  806,  797,  789,  780,  772,
+  764,  756,  748,  740,  732,  724,  717,  709,  702,  694,  687,  680,  673,
+  665,  658,  651,  644,  637,  631,  624,  617,  611,  604,  598,  591,  585,
+  578,  572,  566,  560,  554,  547,  541,  535,  530,  524,  518,  512,  506,
+  501,  495,  489,  484,  478,  473,  467,  462,  456,  451,  446,  441,  435,
+  430,  425,  420,  415,  410,  405,  400,  395,  390,  385,  380,  375,  371,
+  366,  361,  356,  352,  347,  343,  338,  333,  329,  324,  320,  316,  311,
+  307,  302,  298,  294,  289,  285,  281,  277,  273,  268,  264,  260,  256,
+  252,  248,  244,  240,  236,  232,  228,  224,  220,  216,  212,  209,  205,
+  201,  197,  194,  190,  186,  182,  179,  175,  171,  168,  164,  161,  157,
+  153,  150,  146,  143,  139,  136,  132,  129,  125,  122,  119,  115,  112,
+  109,  105,  102,  99,   95,   92,   89,   86,   82,   79,   76,   73,   70,
+  66,   63,   60,   57,   54,   51,   48,   45,   42,   38,   35,   32,   29,
+  26,   23,   20,   18,   15,   12,   9,    6,    3
+};
+
+#if CONFIG_ANS
+// round(-log2(i/1024.) * (1 << VP10_PROB_COST_SHIFT))
+static const uint16_t vp10_prob_cost10[1024] = {
+  5120, 5120, 4608, 4308, 4096, 3931, 3796, 3683, 3584, 3497, 3419, 3349, 3284,
+  3225, 3171, 3120, 3072, 3027, 2985, 2945, 2907, 2871, 2837, 2804, 2772, 2742,
+  2713, 2685, 2659, 2633, 2608, 2583, 2560, 2537, 2515, 2494, 2473, 2453, 2433,
+  2414, 2395, 2377, 2359, 2342, 2325, 2308, 2292, 2276, 2260, 2245, 2230, 2216,
+  2201, 2187, 2173, 2160, 2147, 2134, 2121, 2108, 2096, 2083, 2071, 2060, 2048,
+  2037, 2025, 2014, 2003, 1992, 1982, 1971, 1961, 1951, 1941, 1931, 1921, 1911,
+  1902, 1892, 1883, 1874, 1865, 1856, 1847, 1838, 1830, 1821, 1813, 1804, 1796,
+  1788, 1780, 1772, 1764, 1756, 1748, 1741, 1733, 1726, 1718, 1711, 1704, 1697,
+  1689, 1682, 1675, 1668, 1661, 1655, 1648, 1641, 1635, 1628, 1622, 1615, 1609,
+  1602, 1596, 1590, 1584, 1578, 1571, 1565, 1559, 1554, 1548, 1542, 1536, 1530,
+  1525, 1519, 1513, 1508, 1502, 1497, 1491, 1486, 1480, 1475, 1470, 1465, 1459,
+  1454, 1449, 1444, 1439, 1434, 1429, 1424, 1419, 1414, 1409, 1404, 1399, 1395,
+  1390, 1385, 1380, 1376, 1371, 1367, 1362, 1357, 1353, 1348, 1344, 1340, 1335,
+  1331, 1326, 1322, 1318, 1313, 1309, 1305, 1301, 1297, 1292, 1288, 1284, 1280,
+  1276, 1272, 1268, 1264, 1260, 1256, 1252, 1248, 1244, 1240, 1236, 1233, 1229,
+  1225, 1221, 1218, 1214, 1210, 1206, 1203, 1199, 1195, 1192, 1188, 1185, 1181,
+  1177, 1174, 1170, 1167, 1163, 1160, 1156, 1153, 1149, 1146, 1143, 1139, 1136,
+  1133, 1129, 1126, 1123, 1119, 1116, 1113, 1110, 1106, 1103, 1100, 1097, 1094,
+  1090, 1087, 1084, 1081, 1078, 1075, 1072, 1069, 1066, 1062, 1059, 1056, 1053,
+  1050, 1047, 1044, 1042, 1039, 1036, 1033, 1030, 1027, 1024, 1021, 1018, 1015,
+  1013, 1010, 1007, 1004, 1001, 998,  996,  993,  990,  987,  985,  982,  979,
+  977,  974,  971,  968,  966,  963,  960,  958,  955,  953,  950,  947,  945,
+  942,  940,  937,  934,  932,  929,  927,  924,  922,  919,  917,  914,  912,
+  909,  907,  904,  902,  899,  897,  895,  892,  890,  887,  885,  883,  880,
+  878,  876,  873,  871,  868,  866,  864,  861,  859,  857,  855,  852,  850,
+  848,  845,  843,  841,  839,  836,  834,  832,  830,  828,  825,  823,  821,
+  819,  817,  814,  812,  810,  808,  806,  804,  801,  799,  797,  795,  793,
+  791,  789,  787,  785,  783,  780,  778,  776,  774,  772,  770,  768,  766,
+  764,  762,  760,  758,  756,  754,  752,  750,  748,  746,  744,  742,  740,
+  738,  736,  734,  732,  730,  728,  726,  724,  723,  721,  719,  717,  715,
+  713,  711,  709,  707,  706,  704,  702,  700,  698,  696,  694,  693,  691,
+  689,  687,  685,  683,  682,  680,  678,  676,  674,  673,  671,  669,  667,
+  665,  664,  662,  660,  658,  657,  655,  653,  651,  650,  648,  646,  644,
+  643,  641,  639,  637,  636,  634,  632,  631,  629,  627,  626,  624,  622,
+  621,  619,  617,  616,  614,  612,  611,  609,  607,  606,  604,  602,  601,
+  599,  598,  596,  594,  593,  591,  590,  588,  586,  585,  583,  582,  580,
+  578,  577,  575,  574,  572,  571,  569,  567,  566,  564,  563,  561,  560,
+  558,  557,  555,  554,  552,  550,  549,  547,  546,  544,  543,  541,  540,
+  538,  537,  535,  534,  532,  531,  530,  528,  527,  525,  524,  522,  521,
+  519,  518,  516,  515,  513,  512,  511,  509,  508,  506,  505,  503,  502,
+  501,  499,  498,  496,  495,  493,  492,  491,  489,  488,  486,  485,  484,
+  482,  481,  480,  478,  477,  475,  474,  473,  471,  470,  469,  467,  466,
+  465,  463,  462,  460,  459,  458,  456,  455,  454,  452,  451,  450,  448,
+  447,  446,  444,  443,  442,  441,  439,  438,  437,  435,  434,  433,  431,
+  430,  429,  428,  426,  425,  424,  422,  421,  420,  419,  417,  416,  415,
+  414,  412,  411,  410,  409,  407,  406,  405,  404,  402,  401,  400,  399,
+  397,  396,  395,  394,  392,  391,  390,  389,  387,  386,  385,  384,  383,
+  381,  380,  379,  378,  377,  375,  374,  373,  372,  371,  369,  368,  367,
+  366,  365,  364,  362,  361,  360,  359,  358,  356,  355,  354,  353,  352,
+  351,  349,  348,  347,  346,  345,  344,  343,  341,  340,  339,  338,  337,
+  336,  335,  333,  332,  331,  330,  329,  328,  327,  326,  324,  323,  322,
+  321,  320,  319,  318,  317,  316,  314,  313,  312,  311,  310,  309,  308,
+  307,  306,  305,  303,  302,  301,  300,  299,  298,  297,  296,  295,  294,
+  293,  292,  291,  289,  288,  287,  286,  285,  284,  283,  282,  281,  280,
+  279,  278,  277,  276,  275,  274,  273,  272,  271,  269,  268,  267,  266,
+  265,  264,  263,  262,  261,  260,  259,  258,  257,  256,  255,  254,  253,
+  252,  251,  250,  249,  248,  247,  246,  245,  244,  243,  242,  241,  240,
+  239,  238,  237,  236,  235,  234,  233,  232,  231,  230,  229,  228,  227,
+  226,  225,  224,  223,  222,  221,  220,  219,  218,  217,  216,  215,  214,
+  213,  212,  212,  211,  210,  209,  208,  207,  206,  205,  204,  203,  202,
+  201,  200,  199,  198,  197,  196,  195,  194,  194,  193,  192,  191,  190,
+  189,  188,  187,  186,  185,  184,  183,  182,  181,  181,  180,  179,  178,
+  177,  176,  175,  174,  173,  172,  171,  170,  170,  169,  168,  167,  166,
+  165,  164,  163,  162,  161,  161,  160,  159,  158,  157,  156,  155,  154,
+  153,  152,  152,  151,  150,  149,  148,  147,  146,  145,  145,  144,  143,
+  142,  141,  140,  139,  138,  138,  137,  136,  135,  134,  133,  132,  132,
+  131,  130,  129,  128,  127,  126,  125,  125,  124,  123,  122,  121,  120,
+  120,  119,  118,  117,  116,  115,  114,  114,  113,  112,  111,  110,  109,
+  109,  108,  107,  106,  105,  104,  104,  103,  102,  101,  100,  99,   99,
+  98,   97,   96,   95,   95,   94,   93,   92,   91,   90,   90,   89,   88,
+  87,   86,   86,   85,   84,   83,   82,   82,   81,   80,   79,   78,   78,
+  77,   76,   75,   74,   74,   73,   72,   71,   70,   70,   69,   68,   67,
+  66,   66,   65,   64,   63,   62,   62,   61,   60,   59,   59,   58,   57,
+  56,   55,   55,   54,   53,   52,   52,   51,   50,   49,   48,   48,   47,
+  46,   45,   45,   44,   43,   42,   42,   41,   40,   39,   38,   38,   37,
+  36,   35,   35,   34,   33,   32,   32,   31,   30,   29,   29,   28,   27,
+  26,   26,   25,   24,   23,   23,   22,   21,   20,   20,   19,   18,   18,
+  17,   16,   15,   15,   14,   13,   12,   12,   11,   10,   9,    9,    8,
+  7,    7,    6,    5,    4,    4,    3,    2,    1,    1
+};
+#endif  // CONFIG_ANS
+
+static void cost(int *costs, vpx_tree tree, const vpx_prob *probs, int i,
+                 int c) {
+  const vpx_prob prob = probs[i / 2];
+  int b;
+
+  assert(prob != 0);
+  for (b = 0; b <= 1; ++b) {
+    const int cc = c + vp10_cost_bit(prob, b);
+    const vpx_tree_index ii = tree[i + b];
+
+    if (ii <= 0)
+      costs[-ii] = cc;
+    else
+      cost(costs, tree, probs, ii, cc);
+  }
+}
+
+#if CONFIG_ANS
+void vp10_cost_tokens_ans(int *costs, const vpx_prob *tree_probs,
+                          const rans_dec_lut token_cdf, int skip_eob) {
+  int c_tree = 0;  // Cost of the "tree" nodes EOB and ZERO.
+  int i;
+  costs[EOB_TOKEN] = vp10_cost_bit(tree_probs[0], 0);
+  if (!skip_eob) c_tree = vp10_cost_bit(tree_probs[0], 1);
+  for (i = ZERO_TOKEN; i <= CATEGORY6_TOKEN; ++i) {
+    const int p = token_cdf[i + 1] - token_cdf[i];
+    costs[i] = c_tree + vp10_prob_cost10[p];
+  }
+}
+#endif  // CONFIG_ANS
+
+void vp10_cost_tokens(int *costs, const vpx_prob *probs, vpx_tree tree) {
+  cost(costs, tree, probs, 0, 0);
+}
+
+void vp10_cost_tokens_skip(int *costs, const vpx_prob *probs, vpx_tree tree) {
+  assert(tree[0] <= 0 && tree[1] > 0);
+
+  costs[-tree[0]] = vp10_cost_bit(probs[0], 0);
+  cost(costs, tree, probs, 2, 0);
+}
diff --git a/av1/encoder/cost.h b/av1/encoder/cost.h
new file mode 100644
index 0000000..5ae2a79
--- /dev/null
+++ b/av1/encoder/cost.h
@@ -0,0 +1,70 @@
+/*
+ *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP10_ENCODER_COST_H_
+#define VP10_ENCODER_COST_H_
+
+#include "aom_dsp/prob.h"
+#include "aom/vpx_integer.h"
+#if CONFIG_ANS
+#include "av1/common/ans.h"
+#endif  // CONFIG_ANS
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern const uint16_t vp10_prob_cost[256];
+
+// The factor to scale from cost in bits to cost in vp10_prob_cost units.
+#define VP10_PROB_COST_SHIFT 9
+
+#define vp10_cost_zero(prob) (vp10_prob_cost[prob])
+
+#define vp10_cost_one(prob) vp10_cost_zero(256 - (prob))
+
+#define vp10_cost_bit(prob, bit) vp10_cost_zero((bit) ? 256 - (prob) : (prob))
+
+// Cost of coding an n bit literal, using 128 (i.e. 50%) probability
+// for each bit.
+#define vp10_cost_literal(n) ((n) * (1 << VP10_PROB_COST_SHIFT))
+
+static INLINE unsigned int cost_branch256(const unsigned int ct[2],
+                                          vpx_prob p) {
+  return ct[0] * vp10_cost_zero(p) + ct[1] * vp10_cost_one(p);
+}
+
+static INLINE int treed_cost(vpx_tree tree, const vpx_prob *probs, int bits,
+                             int len) {
+  int cost = 0;
+  vpx_tree_index i = 0;
+
+  do {
+    const int bit = (bits >> --len) & 1;
+    cost += vp10_cost_bit(probs[i >> 1], bit);
+    i = tree[i + bit];
+  } while (len);
+
+  return cost;
+}
+
+void vp10_cost_tokens(int *costs, const vpx_prob *probs, vpx_tree tree);
+void vp10_cost_tokens_skip(int *costs, const vpx_prob *probs, vpx_tree tree);
+
+#if CONFIG_ANS
+void vp10_cost_tokens_ans(int *costs, const vpx_prob *tree_probs,
+                          const rans_dec_lut token_cdf, int skip_eob);
+#endif
+
+#ifdef __cplusplus
+}  // extern "C"
+#endif
+
+#endif  // VP10_ENCODER_COST_H_
diff --git a/av1/encoder/dct.c b/av1/encoder/dct.c
new file mode 100644
index 0000000..8f7812e
--- /dev/null
+++ b/av1/encoder/dct.c
@@ -0,0 +1,1896 @@
+/*
+ *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include <math.h>
+
+#include "./vp10_rtcd.h"
+#include "./vpx_config.h"
+#include "./vpx_dsp_rtcd.h"
+#include "av1/common/blockd.h"
+#include "av1/common/idct.h"
+#include "aom_dsp/fwd_txfm.h"
+#include "aom_ports/mem.h"
+
+static INLINE void range_check(const tran_low_t *input, const int size,
+                               const int bit) {
+#if 0  // CONFIG_COEFFICIENT_RANGE_CHECKING
+// TODO(angiebird): the range_check is not used because the bit range
+// in fdct# is not correct. Since we are going to merge in a new version
+// of fdct# from nextgenv2, we won't fix the incorrect bit range now.
+  int i;
+  for (i = 0; i < size; ++i) {
+    assert(abs(input[i]) < (1 << bit));
+  }
+#else
+  (void)input;
+  (void)size;
+  (void)bit;
+#endif
+}
+
+static void fdct4(const tran_low_t *input, tran_low_t *output) {
+  tran_high_t temp;
+  tran_low_t step[4];
+
+  // stage 0
+  range_check(input, 4, 14);
+
+  // stage 1
+  output[0] = input[0] + input[3];
+  output[1] = input[1] + input[2];
+  output[2] = input[1] - input[2];
+  output[3] = input[0] - input[3];
+
+  range_check(output, 4, 15);
+
+  // stage 2
+  temp = output[0] * cospi_16_64 + output[1] * cospi_16_64;
+  step[0] = (tran_low_t)fdct_round_shift(temp);
+  temp = output[1] * -cospi_16_64 + output[0] * cospi_16_64;
+  step[1] = (tran_low_t)fdct_round_shift(temp);
+  temp = output[2] * cospi_24_64 + output[3] * cospi_8_64;
+  step[2] = (tran_low_t)fdct_round_shift(temp);
+  temp = output[3] * cospi_24_64 + output[2] * -cospi_8_64;
+  step[3] = (tran_low_t)fdct_round_shift(temp);
+
+  range_check(step, 4, 16);
+
+  // stage 3
+  output[0] = step[0];
+  output[1] = step[2];
+  output[2] = step[1];
+  output[3] = step[3];
+
+  range_check(output, 4, 16);
+}
+
+static void fdct8(const tran_low_t *input, tran_low_t *output) {
+  tran_high_t temp;
+  tran_low_t step[8];
+
+  // stage 0
+  range_check(input, 8, 13);
+
+  // stage 1
+  output[0] = input[0] + input[7];
+  output[1] = input[1] + input[6];
+  output[2] = input[2] + input[5];
+  output[3] = input[3] + input[4];
+  output[4] = input[3] - input[4];
+  output[5] = input[2] - input[5];
+  output[6] = input[1] - input[6];
+  output[7] = input[0] - input[7];
+
+  range_check(output, 8, 14);
+
+  // stage 2
+  step[0] = output[0] + output[3];
+  step[1] = output[1] + output[2];
+  step[2] = output[1] - output[2];
+  step[3] = output[0] - output[3];
+  step[4] = output[4];
+  temp = output[5] * -cospi_16_64 + output[6] * cospi_16_64;
+  step[5] = (tran_low_t)fdct_round_shift(temp);
+  temp = output[6] * cospi_16_64 + output[5] * cospi_16_64;
+  step[6] = (tran_low_t)fdct_round_shift(temp);
+  step[7] = output[7];
+
+  range_check(step, 8, 15);
+
+  // stage 3
+  temp = step[0] * cospi_16_64 + step[1] * cospi_16_64;
+  output[0] = (tran_low_t)fdct_round_shift(temp);
+  temp = step[1] * -cospi_16_64 + step[0] * cospi_16_64;
+  output[1] = (tran_low_t)fdct_round_shift(temp);
+  temp = step[2] * cospi_24_64 + step[3] * cospi_8_64;
+  output[2] = (tran_low_t)fdct_round_shift(temp);
+  temp = step[3] * cospi_24_64 + step[2] * -cospi_8_64;
+  output[3] = (tran_low_t)fdct_round_shift(temp);
+  output[4] = step[4] + step[5];
+  output[5] = step[4] - step[5];
+  output[6] = step[7] - step[6];
+  output[7] = step[7] + step[6];
+
+  range_check(output, 8, 16);
+
+  // stage 4
+  step[0] = output[0];
+  step[1] = output[1];
+  step[2] = output[2];
+  step[3] = output[3];
+  temp = output[4] * cospi_28_64 + output[7] * cospi_4_64;
+  step[4] = (tran_low_t)fdct_round_shift(temp);
+  temp = output[5] * cospi_12_64 + output[6] * cospi_20_64;
+  step[5] = (tran_low_t)fdct_round_shift(temp);
+  temp = output[6] * cospi_12_64 + output[5] * -cospi_20_64;
+  step[6] = (tran_low_t)fdct_round_shift(temp);
+  temp = output[7] * cospi_28_64 + output[4] * -cospi_4_64;
+  step[7] = (tran_low_t)fdct_round_shift(temp);
+
+  range_check(step, 8, 16);
+
+  // stage 5
+  output[0] = step[0];
+  output[1] = step[4];
+  output[2] = step[2];
+  output[3] = step[6];
+  output[4] = step[1];
+  output[5] = step[5];
+  output[6] = step[3];
+  output[7] = step[7];
+
+  range_check(output, 8, 16);
+}
+
+static void fdct16(const tran_low_t *input, tran_low_t *output) {
+  tran_high_t temp;
+  tran_low_t step[16];
+
+  // stage 0
+  range_check(input, 16, 13);
+
+  // stage 1
+  output[0] = input[0] + input[15];
+  output[1] = input[1] + input[14];
+  output[2] = input[2] + input[13];
+  output[3] = input[3] + input[12];
+  output[4] = input[4] + input[11];
+  output[5] = input[5] + input[10];
+  output[6] = input[6] + input[9];
+  output[7] = input[7] + input[8];
+  output[8] = input[7] - input[8];
+  output[9] = input[6] - input[9];
+  output[10] = input[5] - input[10];
+  output[11] = input[4] - input[11];
+  output[12] = input[3] - input[12];
+  output[13] = input[2] - input[13];
+  output[14] = input[1] - input[14];
+  output[15] = input[0] - input[15];
+
+  range_check(output, 16, 14);
+
+  // stage 2
+  step[0] = output[0] + output[7];
+  step[1] = output[1] + output[6];
+  step[2] = output[2] + output[5];
+  step[3] = output[3] + output[4];
+  step[4] = output[3] - output[4];
+  step[5] = output[2] - output[5];
+  step[6] = output[1] - output[6];
+  step[7] = output[0] - output[7];
+  step[8] = output[8];
+  step[9] = output[9];
+  temp = output[10] * -cospi_16_64 + output[13] * cospi_16_64;
+  step[10] = (tran_low_t)fdct_round_shift(temp);
+  temp = output[11] * -cospi_16_64 + output[12] * cospi_16_64;
+  step[11] = (tran_low_t)fdct_round_shift(temp);
+  temp = output[12] * cospi_16_64 + output[11] * cospi_16_64;
+  step[12] = (tran_low_t)fdct_round_shift(temp);
+  temp = output[13] * cospi_16_64 + output[10] * cospi_16_64;
+  step[13] = (tran_low_t)fdct_round_shift(temp);
+  step[14] = output[14];
+  step[15] = output[15];
+
+  range_check(step, 16, 15);
+
+  // stage 3
+  output[0] = step[0] + step[3];
+  output[1] = step[1] + step[2];
+  output[2] = step[1] - step[2];
+  output[3] = step[0] - step[3];
+  output[4] = step[4];
+  temp = step[5] * -cospi_16_64 + step[6] * cospi_16_64;
+  output[5] = (tran_low_t)fdct_round_shift(temp);
+  temp = step[6] * cospi_16_64 + step[5] * cospi_16_64;
+  output[6] = (tran_low_t)fdct_round_shift(temp);
+  output[7] = step[7];
+  output[8] = step[8] + step[11];
+  output[9] = step[9] + step[10];
+  output[10] = step[9] - step[10];
+  output[11] = step[8] - step[11];
+  output[12] = step[15] - step[12];
+  output[13] = step[14] - step[13];
+  output[14] = step[14] + step[13];
+  output[15] = step[15] + step[12];
+
+  range_check(output, 16, 16);
+
+  // stage 4
+  temp = output[0] * cospi_16_64 + output[1] * cospi_16_64;
+  step[0] = (tran_low_t)fdct_round_shift(temp);
+  temp = output[1] * -cospi_16_64 + output[0] * cospi_16_64;
+  step[1] = (tran_low_t)fdct_round_shift(temp);
+  temp = output[2] * cospi_24_64 + output[3] * cospi_8_64;
+  step[2] = (tran_low_t)fdct_round_shift(temp);
+  temp = output[3] * cospi_24_64 + output[2] * -cospi_8_64;
+  step[3] = (tran_low_t)fdct_round_shift(temp);
+  step[4] = output[4] + output[5];
+  step[5] = output[4] - output[5];
+  step[6] = output[7] - output[6];
+  step[7] = output[7] + output[6];
+  step[8] = output[8];
+  temp = output[9] * -cospi_8_64 + output[14] * cospi_24_64;
+  step[9] = (tran_low_t)fdct_round_shift(temp);
+  temp = output[10] * -cospi_24_64 + output[13] * -cospi_8_64;
+  step[10] = (tran_low_t)fdct_round_shift(temp);
+  step[11] = output[11];
+  step[12] = output[12];
+  temp = output[13] * cospi_24_64 + output[10] * -cospi_8_64;
+  step[13] = (tran_low_t)fdct_round_shift(temp);
+  temp = output[14] * cospi_8_64 + output[9] * cospi_24_64;
+  step[14] = (tran_low_t)fdct_round_shift(temp);
+  step[15] = output[15];
+
+  range_check(step, 16, 16);
+
+  // stage 5
+  output[0] = step[0];
+  output[1] = step[1];
+  output[2] = step[2];
+  output[3] = step[3];
+  temp = step[4] * cospi_28_64 + step[7] * cospi_4_64;
+  output[4] = (tran_low_t)fdct_round_shift(temp);
+  temp = step[5] * cospi_12_64 + step[6] * cospi_20_64;
+  output[5] = (tran_low_t)fdct_round_shift(temp);
+  temp = step[6] * cospi_12_64 + step[5] * -cospi_20_64;
+  output[6] = (tran_low_t)fdct_round_shift(temp);
+  temp = step[7] * cospi_28_64 + step[4] * -cospi_4_64;
+  output[7] = (tran_low_t)fdct_round_shift(temp);
+  output[8] = step[8] + step[9];
+  output[9] = step[8] - step[9];
+  output[10] = step[11] - step[10];
+  output[11] = step[11] + step[10];
+  output[12] = step[12] + step[13];
+  output[13] = step[12] - step[13];
+  output[14] = step[15] - step[14];
+  output[15] = step[15] + step[14];
+
+  range_check(output, 16, 16);
+
+  // stage 6
+  step[0] = output[0];
+  step[1] = output[1];
+  step[2] = output[2];
+  step[3] = output[3];
+  step[4] = output[4];
+  step[5] = output[5];
+  step[6] = output[6];
+  step[7] = output[7];
+  temp = output[8] * cospi_30_64 + output[15] * cospi_2_64;
+  step[8] = (tran_low_t)fdct_round_shift(temp);
+  temp = output[9] * cospi_14_64 + output[14] * cospi_18_64;
+  step[9] = (tran_low_t)fdct_round_shift(temp);
+  temp = output[10] * cospi_22_64 + output[13] * cospi_10_64;
+  step[10] = (tran_low_t)fdct_round_shift(temp);
+  temp = output[11] * cospi_6_64 + output[12] * cospi_26_64;
+  step[11] = (tran_low_t)fdct_round_shift(temp);
+  temp = output[12] * cospi_6_64 + output[11] * -cospi_26_64;
+  step[12] = (tran_low_t)fdct_round_shift(temp);
+  temp = output[13] * cospi_22_64 + output[10] * -cospi_10_64;
+  step[13] = (tran_low_t)fdct_round_shift(temp);
+  temp = output[14] * cospi_14_64 + output[9] * -cospi_18_64;
+  step[14] = (tran_low_t)fdct_round_shift(temp);
+  temp = output[15] * cospi_30_64 + output[8] * -cospi_2_64;
+  step[15] = (tran_low_t)fdct_round_shift(temp);
+
+  range_check(step, 16, 16);
+
+  // stage 7
+  output[0] = step[0];
+  output[1] = step[8];
+  output[2] = step[4];
+  output[3] = step[12];
+  output[4] = step[2];
+  output[5] = step[10];
+  output[6] = step[6];
+  output[7] = step[14];
+  output[8] = step[1];
+  output[9] = step[9];
+  output[10] = step[5];
+  output[11] = step[13];
+  output[12] = step[3];
+  output[13] = step[11];
+  output[14] = step[7];
+  output[15] = step[15];
+
+  range_check(output, 16, 16);
+}
+
+#if CONFIG_EXT_TX
+static void fdct32(const tran_low_t *input, tran_low_t *output) {
+  tran_high_t temp;
+  tran_low_t step[32];
+
+  // stage 0
+  range_check(input, 32, 14);
+
+  // stage 1
+  output[0] = input[0] + input[31];
+  output[1] = input[1] + input[30];
+  output[2] = input[2] + input[29];
+  output[3] = input[3] + input[28];
+  output[4] = input[4] + input[27];
+  output[5] = input[5] + input[26];
+  output[6] = input[6] + input[25];
+  output[7] = input[7] + input[24];
+  output[8] = input[8] + input[23];
+  output[9] = input[9] + input[22];
+  output[10] = input[10] + input[21];
+  output[11] = input[11] + input[20];
+  output[12] = input[12] + input[19];
+  output[13] = input[13] + input[18];
+  output[14] = input[14] + input[17];
+  output[15] = input[15] + input[16];
+  output[16] = input[15] - input[16];
+  output[17] = input[14] - input[17];
+  output[18] = input[13] - input[18];
+  output[19] = input[12] - input[19];
+  output[20] = input[11] - input[20];
+  output[21] = input[10] - input[21];
+  output[22] = input[9] - input[22];
+  output[23] = input[8] - input[23];
+  output[24] = input[7] - input[24];
+  output[25] = input[6] - input[25];
+  output[26] = input[5] - input[26];
+  output[27] = input[4] - input[27];
+  output[28] = input[3] - input[28];
+  output[29] = input[2] - input[29];
+  output[30] = input[1] - input[30];
+  output[31] = input[0] - input[31];
+
+  range_check(output, 32, 15);
+
+  // stage 2
+  step[0] = output[0] + output[15];
+  step[1] = output[1] + output[14];
+  step[2] = output[2] + output[13];
+  step[3] = output[3] + output[12];
+  step[4] = output[4] + output[11];
+  step[5] = output[5] + output[10];
+  step[6] = output[6] + output[9];
+  step[7] = output[7] + output[8];
+  step[8] = output[7] - output[8];
+  step[9] = output[6] - output[9];
+  step[10] = output[5] - output[10];
+  step[11] = output[4] - output[11];
+  step[12] = output[3] - output[12];
+  step[13] = output[2] - output[13];
+  step[14] = output[1] - output[14];
+  step[15] = output[0] - output[15];
+  step[16] = output[16];
+  step[17] = output[17];
+  step[18] = output[18];
+  step[19] = output[19];
+  temp = output[20] * -cospi_16_64 + output[27] * cospi_16_64;
+  step[20] = (tran_low_t)fdct_round_shift(temp);
+  temp = output[21] * -cospi_16_64 + output[26] * cospi_16_64;
+  step[21] = (tran_low_t)fdct_round_shift(temp);
+  temp = output[22] * -cospi_16_64 + output[25] * cospi_16_64;
+  step[22] = (tran_low_t)fdct_round_shift(temp);
+  temp = output[23] * -cospi_16_64 + output[24] * cospi_16_64;
+  step[23] = (tran_low_t)fdct_round_shift(temp);
+  temp = output[24] * cospi_16_64 + output[23] * cospi_16_64;
+  step[24] = (tran_low_t)fdct_round_shift(temp);
+  temp = output[25] * cospi_16_64 + output[22] * cospi_16_64;
+  step[25] = (tran_low_t)fdct_round_shift(temp);
+  temp = output[26] * cospi_16_64 + output[21] * cospi_16_64;
+  step[26] = (tran_low_t)fdct_round_shift(temp);
+  temp = output[27] * cospi_16_64 + output[20] * cospi_16_64;
+  step[27] = (tran_low_t)fdct_round_shift(temp);
+  step[28] = output[28];
+  step[29] = output[29];
+  step[30] = output[30];
+  step[31] = output[31];
+
+  range_check(step, 32, 16);
+
+  // stage 3
+  output[0] = step[0] + step[7];
+  output[1] = step[1] + step[6];
+  output[2] = step[2] + step[5];
+  output[3] = step[3] + step[4];
+  output[4] = step[3] - step[4];
+  output[5] = step[2] - step[5];
+  output[6] = step[1] - step[6];
+  output[7] = step[0] - step[7];
+  output[8] = step[8];
+  output[9] = step[9];
+  temp = step[10] * -cospi_16_64 + step[13] * cospi_16_64;
+  output[10] = (tran_low_t)fdct_round_shift(temp);
+  temp = step[11] * -cospi_16_64 + step[12] * cospi_16_64;
+  output[11] = (tran_low_t)fdct_round_shift(temp);
+  temp = step[12] * cospi_16_64 + step[11] * cospi_16_64;
+  output[12] = (tran_low_t)fdct_round_shift(temp);
+  temp = step[13] * cospi_16_64 + step[10] * cospi_16_64;
+  output[13] = (tran_low_t)fdct_round_shift(temp);
+  output[14] = step[14];
+  output[15] = step[15];
+  output[16] = step[16] + step[23];
+  output[17] = step[17] + step[22];
+  output[18] = step[18] + step[21];
+  output[19] = step[19] + step[20];
+  output[20] = step[19] - step[20];
+  output[21] = step[18] - step[21];
+  output[22] = step[17] - step[22];
+  output[23] = step[16] - step[23];
+  output[24] = step[31] - step[24];
+  output[25] = step[30] - step[25];
+  output[26] = step[29] - step[26];
+  output[27] = step[28] - step[27];
+  output[28] = step[28] + step[27];
+  output[29] = step[29] + step[26];
+  output[30] = step[30] + step[25];
+  output[31] = step[31] + step[24];
+
+  range_check(output, 32, 17);
+
+  // stage 4
+  step[0] = output[0] + output[3];
+  step[1] = output[1] + output[2];
+  step[2] = output[1] - output[2];
+  step[3] = output[0] - output[3];
+  step[4] = output[4];
+  temp = output[5] * -cospi_16_64 + output[6] * cospi_16_64;
+  step[5] = (tran_low_t)fdct_round_shift(temp);
+  temp = output[6] * cospi_16_64 + output[5] * cospi_16_64;
+  step[6] = (tran_low_t)fdct_round_shift(temp);
+  step[7] = output[7];
+  step[8] = output[8] + output[11];
+  step[9] = output[9] + output[10];
+  step[10] = output[9] - output[10];
+  step[11] = output[8] - output[11];
+  step[12] = output[15] - output[12];
+  step[13] = output[14] - output[13];
+  step[14] = output[14] + output[13];
+  step[15] = output[15] + output[12];
+  step[16] = output[16];
+  step[17] = output[17];
+  temp = output[18] * -cospi_8_64 + output[29] * cospi_24_64;
+  step[18] = (tran_low_t)fdct_round_shift(temp);
+  temp = output[19] * -cospi_8_64 + output[28] * cospi_24_64;
+  step[19] = (tran_low_t)fdct_round_shift(temp);
+  temp = output[20] * -cospi_24_64 + output[27] * -cospi_8_64;
+  step[20] = (tran_low_t)fdct_round_shift(temp);
+  temp = output[21] * -cospi_24_64 + output[26] * -cospi_8_64;
+  step[21] = (tran_low_t)fdct_round_shift(temp);
+  step[22] = output[22];
+  step[23] = output[23];
+  step[24] = output[24];
+  step[25] = output[25];
+  temp = output[26] * cospi_24_64 + output[21] * -cospi_8_64;
+  step[26] = (tran_low_t)fdct_round_shift(temp);
+  temp = output[27] * cospi_24_64 + output[20] * -cospi_8_64;
+  step[27] = (tran_low_t)fdct_round_shift(temp);
+  temp = output[28] * cospi_8_64 + output[19] * cospi_24_64;
+  step[28] = (tran_low_t)fdct_round_shift(temp);
+  temp = output[29] * cospi_8_64 + output[18] * cospi_24_64;
+  step[29] = (tran_low_t)fdct_round_shift(temp);
+  step[30] = output[30];
+  step[31] = output[31];
+
+  range_check(step, 32, 18);
+
+  // stage 5
+  temp = step[0] * cospi_16_64 + step[1] * cospi_16_64;
+  output[0] = (tran_low_t)fdct_round_shift(temp);
+  temp = step[1] * -cospi_16_64 + step[0] * cospi_16_64;
+  output[1] = (tran_low_t)fdct_round_shift(temp);
+  temp = step[2] * cospi_24_64 + step[3] * cospi_8_64;
+  output[2] = (tran_low_t)fdct_round_shift(temp);
+  temp = step[3] * cospi_24_64 + step[2] * -cospi_8_64;
+  output[3] = (tran_low_t)fdct_round_shift(temp);
+  output[4] = step[4] + step[5];
+  output[5] = step[4] - step[5];
+  output[6] = step[7] - step[6];
+  output[7] = step[7] + step[6];
+  output[8] = step[8];
+  temp = step[9] * -cospi_8_64 + step[14] * cospi_24_64;
+  output[9] = (tran_low_t)fdct_round_shift(temp);
+  temp = step[10] * -cospi_24_64 + step[13] * -cospi_8_64;
+  output[10] = (tran_low_t)fdct_round_shift(temp);
+  output[11] = step[11];
+  output[12] = step[12];
+  temp = step[13] * cospi_24_64 + step[10] * -cospi_8_64;
+  output[13] = (tran_low_t)fdct_round_shift(temp);
+  temp = step[14] * cospi_8_64 + step[9] * cospi_24_64;
+  output[14] = (tran_low_t)fdct_round_shift(temp);
+  output[15] = step[15];
+  output[16] = step[16] + step[19];
+  output[17] = step[17] + step[18];
+  output[18] = step[17] - step[18];
+  output[19] = step[16] - step[19];
+  output[20] = step[23] - step[20];
+  output[21] = step[22] - step[21];
+  output[22] = step[22] + step[21];
+  output[23] = step[23] + step[20];
+  output[24] = step[24] + step[27];
+  output[25] = step[25] + step[26];
+  output[26] = step[25] - step[26];
+  output[27] = step[24] - step[27];
+  output[28] = step[31] - step[28];
+  output[29] = step[30] - step[29];
+  output[30] = step[30] + step[29];
+  output[31] = step[31] + step[28];
+
+  range_check(output, 32, 18);
+
+  // stage 6
+  step[0] = output[0];
+  step[1] = output[1];
+  step[2] = output[2];
+  step[3] = output[3];
+  temp = output[4] * cospi_28_64 + output[7] * cospi_4_64;
+  step[4] = (tran_low_t)fdct_round_shift(temp);
+  temp = output[5] * cospi_12_64 + output[6] * cospi_20_64;
+  step[5] = (tran_low_t)fdct_round_shift(temp);
+  temp = output[6] * cospi_12_64 + output[5] * -cospi_20_64;
+  step[6] = (tran_low_t)fdct_round_shift(temp);
+  temp = output[7] * cospi_28_64 + output[4] * -cospi_4_64;
+  step[7] = (tran_low_t)fdct_round_shift(temp);
+  step[8] = output[8] + output[9];
+  step[9] = output[8] - output[9];
+  step[10] = output[11] - output[10];
+  step[11] = output[11] + output[10];
+  step[12] = output[12] + output[13];
+  step[13] = output[12] - output[13];
+  step[14] = output[15] - output[14];
+  step[15] = output[15] + output[14];
+  step[16] = output[16];
+  temp = output[17] * -cospi_4_64 + output[30] * cospi_28_64;
+  step[17] = (tran_low_t)fdct_round_shift(temp);
+  temp = output[18] * -cospi_28_64 + output[29] * -cospi_4_64;
+  step[18] = (tran_low_t)fdct_round_shift(temp);
+  step[19] = output[19];
+  step[20] = output[20];
+  temp = output[21] * -cospi_20_64 + output[26] * cospi_12_64;
+  step[21] = (tran_low_t)fdct_round_shift(temp);
+  temp = output[22] * -cospi_12_64 + output[25] * -cospi_20_64;
+  step[22] = (tran_low_t)fdct_round_shift(temp);
+  step[23] = output[23];
+  step[24] = output[24];
+  temp = output[25] * cospi_12_64 + output[22] * -cospi_20_64;
+  step[25] = (tran_low_t)fdct_round_shift(temp);
+  temp = output[26] * cospi_20_64 + output[21] * cospi_12_64;
+  step[26] = (tran_low_t)fdct_round_shift(temp);
+  step[27] = output[27];
+  step[28] = output[28];
+  temp = output[29] * cospi_28_64 + output[18] * -cospi_4_64;
+  step[29] = (tran_low_t)fdct_round_shift(temp);
+  temp = output[30] * cospi_4_64 + output[17] * cospi_28_64;
+  step[30] = (tran_low_t)fdct_round_shift(temp);
+  step[31] = output[31];
+
+  range_check(step, 32, 18);
+
+  // stage 7
+  output[0] = step[0];
+  output[1] = step[1];
+  output[2] = step[2];
+  output[3] = step[3];
+  output[4] = step[4];
+  output[5] = step[5];
+  output[6] = step[6];
+  output[7] = step[7];
+  temp = step[8] * cospi_30_64 + step[15] * cospi_2_64;
+  output[8] = (tran_low_t)fdct_round_shift(temp);
+  temp = step[9] * cospi_14_64 + step[14] * cospi_18_64;
+  output[9] = (tran_low_t)fdct_round_shift(temp);
+  temp = step[10] * cospi_22_64 + step[13] * cospi_10_64;
+  output[10] = (tran_low_t)fdct_round_shift(temp);
+  temp = step[11] * cospi_6_64 + step[12] * cospi_26_64;
+  output[11] = (tran_low_t)fdct_round_shift(temp);
+  temp = step[12] * cospi_6_64 + step[11] * -cospi_26_64;
+  output[12] = (tran_low_t)fdct_round_shift(temp);
+  temp = step[13] * cospi_22_64 + step[10] * -cospi_10_64;
+  output[13] = (tran_low_t)fdct_round_shift(temp);
+  temp = step[14] * cospi_14_64 + step[9] * -cospi_18_64;
+  output[14] = (tran_low_t)fdct_round_shift(temp);
+  temp = step[15] * cospi_30_64 + step[8] * -cospi_2_64;
+  output[15] = (tran_low_t)fdct_round_shift(temp);
+  output[16] = step[16] + step[17];
+  output[17] = step[16] - step[17];
+  output[18] = step[19] - step[18];
+  output[19] = step[19] + step[18];
+  output[20] = step[20] + step[21];
+  output[21] = step[20] - step[21];
+  output[22] = step[23] - step[22];
+  output[23] = step[23] + step[22];
+  output[24] = step[24] + step[25];
+  output[25] = step[24] - step[25];
+  output[26] = step[27] - step[26];
+  output[27] = step[27] + step[26];
+  output[28] = step[28] + step[29];
+  output[29] = step[28] - step[29];
+  output[30] = step[31] - step[30];
+  output[31] = step[31] + step[30];
+
+  range_check(output, 32, 18);
+
+  // stage 8
+  step[0] = output[0];
+  step[1] = output[1];
+  step[2] = output[2];
+  step[3] = output[3];
+  step[4] = output[4];
+  step[5] = output[5];
+  step[6] = output[6];
+  step[7] = output[7];
+  step[8] = output[8];
+  step[9] = output[9];
+  step[10] = output[10];
+  step[11] = output[11];
+  step[12] = output[12];
+  step[13] = output[13];
+  step[14] = output[14];
+  step[15] = output[15];
+  temp = output[16] * cospi_31_64 + output[31] * cospi_1_64;
+  step[16] = (tran_low_t)fdct_round_shift(temp);
+  temp = output[17] * cospi_15_64 + output[30] * cospi_17_64;
+  step[17] = (tran_low_t)fdct_round_shift(temp);
+  temp = output[18] * cospi_23_64 + output[29] * cospi_9_64;
+  step[18] = (tran_low_t)fdct_round_shift(temp);
+  temp = output[19] * cospi_7_64 + output[28] * cospi_25_64;
+  step[19] = (tran_low_t)fdct_round_shift(temp);
+  temp = output[20] * cospi_27_64 + output[27] * cospi_5_64;
+  step[20] = (tran_low_t)fdct_round_shift(temp);
+  temp = output[21] * cospi_11_64 + output[26] * cospi_21_64;
+  step[21] = (tran_low_t)fdct_round_shift(temp);
+  temp = output[22] * cospi_19_64 + output[25] * cospi_13_64;
+  step[22] = (tran_low_t)fdct_round_shift(temp);
+  temp = output[23] * cospi_3_64 + output[24] * cospi_29_64;
+  step[23] = (tran_low_t)fdct_round_shift(temp);
+  temp = output[24] * cospi_3_64 + output[23] * -cospi_29_64;
+  step[24] = (tran_low_t)fdct_round_shift(temp);
+  temp = output[25] * cospi_19_64 + output[22] * -cospi_13_64;
+  step[25] = (tran_low_t)fdct_round_shift(temp);
+  temp = output[26] * cospi_11_64 + output[21] * -cospi_21_64;
+  step[26] = (tran_low_t)fdct_round_shift(temp);
+  temp = output[27] * cospi_27_64 + output[20] * -cospi_5_64;
+  step[27] = (tran_low_t)fdct_round_shift(temp);
+  temp = output[28] * cospi_7_64 + output[19] * -cospi_25_64;
+  step[28] = (tran_low_t)fdct_round_shift(temp);
+  temp = output[29] * cospi_23_64 + output[18] * -cospi_9_64;
+  step[29] = (tran_low_t)fdct_round_shift(temp);
+  temp = output[30] * cospi_15_64 + output[17] * -cospi_17_64;
+  step[30] = (tran_low_t)fdct_round_shift(temp);
+  temp = output[31] * cospi_31_64 + output[16] * -cospi_1_64;
+  step[31] = (tran_low_t)fdct_round_shift(temp);
+
+  range_check(step, 32, 18);
+
+  // stage 9
+  output[0] = step[0];
+  output[1] = step[16];
+  output[2] = step[8];
+  output[3] = step[24];
+  output[4] = step[4];
+  output[5] = step[20];
+  output[6] = step[12];
+  output[7] = step[28];
+  output[8] = step[2];
+  output[9] = step[18];
+  output[10] = step[10];
+  output[11] = step[26];
+  output[12] = step[6];
+  output[13] = step[22];
+  output[14] = step[14];
+  output[15] = step[30];
+  output[16] = step[1];
+  output[17] = step[17];
+  output[18] = step[9];
+  output[19] = step[25];
+  output[20] = step[5];
+  output[21] = step[21];
+  output[22] = step[13];
+  output[23] = step[29];
+  output[24] = step[3];
+  output[25] = step[19];
+  output[26] = step[11];
+  output[27] = step[27];
+  output[28] = step[7];
+  output[29] = step[23];
+  output[30] = step[15];
+  output[31] = step[31];
+
+  range_check(output, 32, 18);
+}
+#endif  // CONFIG_EXT_TX
+
+static void fadst4(const tran_low_t *input, tran_low_t *output) {
+  tran_high_t x0, x1, x2, x3;
+  tran_high_t s0, s1, s2, s3, s4, s5, s6, s7;
+
+  x0 = input[0];
+  x1 = input[1];
+  x2 = input[2];
+  x3 = input[3];
+
+  if (!(x0 | x1 | x2 | x3)) {
+    output[0] = output[1] = output[2] = output[3] = 0;
+    return;
+  }
+
+  s0 = sinpi_1_9 * x0;
+  s1 = sinpi_4_9 * x0;
+  s2 = sinpi_2_9 * x1;
+  s3 = sinpi_1_9 * x1;
+  s4 = sinpi_3_9 * x2;
+  s5 = sinpi_4_9 * x3;
+  s6 = sinpi_2_9 * x3;
+  s7 = x0 + x1 - x3;
+
+  x0 = s0 + s2 + s5;
+  x1 = sinpi_3_9 * s7;
+  x2 = s1 - s3 + s6;
+  x3 = s4;
+
+  s0 = x0 + x3;
+  s1 = x1;
+  s2 = x2 - x3;
+  s3 = x2 - x0 + x3;
+
+  // 1-D transform scaling factor is sqrt(2).
+  output[0] = (tran_low_t)fdct_round_shift(s0);
+  output[1] = (tran_low_t)fdct_round_shift(s1);
+  output[2] = (tran_low_t)fdct_round_shift(s2);
+  output[3] = (tran_low_t)fdct_round_shift(s3);
+}
+
+static void fadst8(const tran_low_t *input, tran_low_t *output) {
+  tran_high_t s0, s1, s2, s3, s4, s5, s6, s7;
+
+  tran_high_t x0 = input[7];
+  tran_high_t x1 = input[0];
+  tran_high_t x2 = input[5];
+  tran_high_t x3 = input[2];
+  tran_high_t x4 = input[3];
+  tran_high_t x5 = input[4];
+  tran_high_t x6 = input[1];
+  tran_high_t x7 = input[6];
+
+  // stage 1
+  s0 = cospi_2_64 * x0 + cospi_30_64 * x1;
+  s1 = cospi_30_64 * x0 - cospi_2_64 * x1;
+  s2 = cospi_10_64 * x2 + cospi_22_64 * x3;
+  s3 = cospi_22_64 * x2 - cospi_10_64 * x3;
+  s4 = cospi_18_64 * x4 + cospi_14_64 * x5;
+  s5 = cospi_14_64 * x4 - cospi_18_64 * x5;
+  s6 = cospi_26_64 * x6 + cospi_6_64 * x7;
+  s7 = cospi_6_64 * x6 - cospi_26_64 * x7;
+
+  x0 = fdct_round_shift(s0 + s4);
+  x1 = fdct_round_shift(s1 + s5);
+  x2 = fdct_round_shift(s2 + s6);
+  x3 = fdct_round_shift(s3 + s7);
+  x4 = fdct_round_shift(s0 - s4);
+  x5 = fdct_round_shift(s1 - s5);
+  x6 = fdct_round_shift(s2 - s6);
+  x7 = fdct_round_shift(s3 - s7);
+
+  // stage 2
+  s0 = x0;
+  s1 = x1;
+  s2 = x2;
+  s3 = x3;
+  s4 = cospi_8_64 * x4 + cospi_24_64 * x5;
+  s5 = cospi_24_64 * x4 - cospi_8_64 * x5;
+  s6 = -cospi_24_64 * x6 + cospi_8_64 * x7;
+  s7 = cospi_8_64 * x6 + cospi_24_64 * x7;
+
+  x0 = s0 + s2;
+  x1 = s1 + s3;
+  x2 = s0 - s2;
+  x3 = s1 - s3;
+  x4 = fdct_round_shift(s4 + s6);
+  x5 = fdct_round_shift(s5 + s7);
+  x6 = fdct_round_shift(s4 - s6);
+  x7 = fdct_round_shift(s5 - s7);
+
+  // stage 3
+  s2 = cospi_16_64 * (x2 + x3);
+  s3 = cospi_16_64 * (x2 - x3);
+  s6 = cospi_16_64 * (x6 + x7);
+  s7 = cospi_16_64 * (x6 - x7);
+
+  x2 = fdct_round_shift(s2);
+  x3 = fdct_round_shift(s3);
+  x6 = fdct_round_shift(s6);
+  x7 = fdct_round_shift(s7);
+
+  output[0] = (tran_low_t)x0;
+  output[1] = (tran_low_t)-x4;
+  output[2] = (tran_low_t)x6;
+  output[3] = (tran_low_t)-x2;
+  output[4] = (tran_low_t)x3;
+  output[5] = (tran_low_t)-x7;
+  output[6] = (tran_low_t)x5;
+  output[7] = (tran_low_t)-x1;
+}
+
+static void fadst16(const tran_low_t *input, tran_low_t *output) {
+  tran_high_t s0, s1, s2, s3, s4, s5, s6, s7, s8;
+  tran_high_t s9, s10, s11, s12, s13, s14, s15;
+
+  tran_high_t x0 = input[15];
+  tran_high_t x1 = input[0];
+  tran_high_t x2 = input[13];
+  tran_high_t x3 = input[2];
+  tran_high_t x4 = input[11];
+  tran_high_t x5 = input[4];
+  tran_high_t x6 = input[9];
+  tran_high_t x7 = input[6];
+  tran_high_t x8 = input[7];
+  tran_high_t x9 = input[8];
+  tran_high_t x10 = input[5];
+  tran_high_t x11 = input[10];
+  tran_high_t x12 = input[3];
+  tran_high_t x13 = input[12];
+  tran_high_t x14 = input[1];
+  tran_high_t x15 = input[14];
+
+  // stage 1
+  s0 = x0 * cospi_1_64 + x1 * cospi_31_64;
+  s1 = x0 * cospi_31_64 - x1 * cospi_1_64;
+  s2 = x2 * cospi_5_64 + x3 * cospi_27_64;
+  s3 = x2 * cospi_27_64 - x3 * cospi_5_64;
+  s4 = x4 * cospi_9_64 + x5 * cospi_23_64;
+  s5 = x4 * cospi_23_64 - x5 * cospi_9_64;
+  s6 = x6 * cospi_13_64 + x7 * cospi_19_64;
+  s7 = x6 * cospi_19_64 - x7 * cospi_13_64;
+  s8 = x8 * cospi_17_64 + x9 * cospi_15_64;
+  s9 = x8 * cospi_15_64 - x9 * cospi_17_64;
+  s10 = x10 * cospi_21_64 + x11 * cospi_11_64;
+  s11 = x10 * cospi_11_64 - x11 * cospi_21_64;
+  s12 = x12 * cospi_25_64 + x13 * cospi_7_64;
+  s13 = x12 * cospi_7_64 - x13 * cospi_25_64;
+  s14 = x14 * cospi_29_64 + x15 * cospi_3_64;
+  s15 = x14 * cospi_3_64 - x15 * cospi_29_64;
+
+  x0 = fdct_round_shift(s0 + s8);
+  x1 = fdct_round_shift(s1 + s9);
+  x2 = fdct_round_shift(s2 + s10);
+  x3 = fdct_round_shift(s3 + s11);
+  x4 = fdct_round_shift(s4 + s12);
+  x5 = fdct_round_shift(s5 + s13);
+  x6 = fdct_round_shift(s6 + s14);
+  x7 = fdct_round_shift(s7 + s15);
+  x8 = fdct_round_shift(s0 - s8);
+  x9 = fdct_round_shift(s1 - s9);
+  x10 = fdct_round_shift(s2 - s10);
+  x11 = fdct_round_shift(s3 - s11);
+  x12 = fdct_round_shift(s4 - s12);
+  x13 = fdct_round_shift(s5 - s13);
+  x14 = fdct_round_shift(s6 - s14);
+  x15 = fdct_round_shift(s7 - s15);
+
+  // stage 2
+  s0 = x0;
+  s1 = x1;
+  s2 = x2;
+  s3 = x3;
+  s4 = x4;
+  s5 = x5;
+  s6 = x6;
+  s7 = x7;
+  s8 = x8 * cospi_4_64 + x9 * cospi_28_64;
+  s9 = x8 * cospi_28_64 - x9 * cospi_4_64;
+  s10 = x10 * cospi_20_64 + x11 * cospi_12_64;
+  s11 = x10 * cospi_12_64 - x11 * cospi_20_64;
+  s12 = -x12 * cospi_28_64 + x13 * cospi_4_64;
+  s13 = x12 * cospi_4_64 + x13 * cospi_28_64;
+  s14 = -x14 * cospi_12_64 + x15 * cospi_20_64;
+  s15 = x14 * cospi_20_64 + x15 * cospi_12_64;
+
+  x0 = s0 + s4;
+  x1 = s1 + s5;
+  x2 = s2 + s6;
+  x3 = s3 + s7;
+  x4 = s0 - s4;
+  x5 = s1 - s5;
+  x6 = s2 - s6;
+  x7 = s3 - s7;
+  x8 = fdct_round_shift(s8 + s12);
+  x9 = fdct_round_shift(s9 + s13);
+  x10 = fdct_round_shift(s10 + s14);
+  x11 = fdct_round_shift(s11 + s15);
+  x12 = fdct_round_shift(s8 - s12);
+  x13 = fdct_round_shift(s9 - s13);
+  x14 = fdct_round_shift(s10 - s14);
+  x15 = fdct_round_shift(s11 - s15);
+
+  // stage 3
+  s0 = x0;
+  s1 = x1;
+  s2 = x2;
+  s3 = x3;
+  s4 = x4 * cospi_8_64 + x5 * cospi_24_64;
+  s5 = x4 * cospi_24_64 - x5 * cospi_8_64;
+  s6 = -x6 * cospi_24_64 + x7 * cospi_8_64;
+  s7 = x6 * cospi_8_64 + x7 * cospi_24_64;
+  s8 = x8;
+  s9 = x9;
+  s10 = x10;
+  s11 = x11;
+  s12 = x12 * cospi_8_64 + x13 * cospi_24_64;
+  s13 = x12 * cospi_24_64 - x13 * cospi_8_64;
+  s14 = -x14 * cospi_24_64 + x15 * cospi_8_64;
+  s15 = x14 * cospi_8_64 + x15 * cospi_24_64;
+
+  x0 = s0 + s2;
+  x1 = s1 + s3;
+  x2 = s0 - s2;
+  x3 = s1 - s3;
+  x4 = fdct_round_shift(s4 + s6);
+  x5 = fdct_round_shift(s5 + s7);
+  x6 = fdct_round_shift(s4 - s6);
+  x7 = fdct_round_shift(s5 - s7);
+  x8 = s8 + s10;
+  x9 = s9 + s11;
+  x10 = s8 - s10;
+  x11 = s9 - s11;
+  x12 = fdct_round_shift(s12 + s14);
+  x13 = fdct_round_shift(s13 + s15);
+  x14 = fdct_round_shift(s12 - s14);
+  x15 = fdct_round_shift(s13 - s15);
+
+  // stage 4
+  s2 = (-cospi_16_64) * (x2 + x3);
+  s3 = cospi_16_64 * (x2 - x3);
+  s6 = cospi_16_64 * (x6 + x7);
+  s7 = cospi_16_64 * (-x6 + x7);
+  s10 = cospi_16_64 * (x10 + x11);
+  s11 = cospi_16_64 * (-x10 + x11);
+  s14 = (-cospi_16_64) * (x14 + x15);
+  s15 = cospi_16_64 * (x14 - x15);
+
+  x2 = fdct_round_shift(s2);
+  x3 = fdct_round_shift(s3);
+  x6 = fdct_round_shift(s6);
+  x7 = fdct_round_shift(s7);
+  x10 = fdct_round_shift(s10);
+  x11 = fdct_round_shift(s11);
+  x14 = fdct_round_shift(s14);
+  x15 = fdct_round_shift(s15);
+
+  output[0] = (tran_low_t)x0;
+  output[1] = (tran_low_t)-x8;
+  output[2] = (tran_low_t)x12;
+  output[3] = (tran_low_t)-x4;
+  output[4] = (tran_low_t)x6;
+  output[5] = (tran_low_t)x14;
+  output[6] = (tran_low_t)x10;
+  output[7] = (tran_low_t)x2;
+  output[8] = (tran_low_t)x3;
+  output[9] = (tran_low_t)x11;
+  output[10] = (tran_low_t)x15;
+  output[11] = (tran_low_t)x7;
+  output[12] = (tran_low_t)x5;
+  output[13] = (tran_low_t)-x13;
+  output[14] = (tran_low_t)x9;
+  output[15] = (tran_low_t)-x1;
+}
+
+#if CONFIG_EXT_TX
+static void fidtx4(const tran_low_t *input, tran_low_t *output) {
+  int i;
+  for (i = 0; i < 4; ++i)
+    output[i] = (tran_low_t)fdct_round_shift(input[i] * Sqrt2);
+}
+
+static void fidtx8(const tran_low_t *input, tran_low_t *output) {
+  int i;
+  for (i = 0; i < 8; ++i) output[i] = input[i] * 2;
+}
+
+static void fidtx16(const tran_low_t *input, tran_low_t *output) {
+  int i;
+  for (i = 0; i < 16; ++i)
+    output[i] = (tran_low_t)fdct_round_shift(input[i] * 2 * Sqrt2);
+}
+
+static void fidtx32(const tran_low_t *input, tran_low_t *output) {
+  int i;
+  for (i = 0; i < 32; ++i) output[i] = input[i] * 4;
+}
+
+// For use in lieu of ADST
+static void fhalfright32(const tran_low_t *input, tran_low_t *output) {
+  int i;
+  tran_low_t inputhalf[16];
+  for (i = 0; i < 16; ++i) {
+    output[16 + i] = input[i] * 4;
+  }
+  // Multiply input by sqrt(2)
+  for (i = 0; i < 16; ++i) {
+    inputhalf[i] = (tran_low_t)fdct_round_shift(input[i + 16] * Sqrt2);
+  }
+  fdct16(inputhalf, output);
+  // Note overall scaling factor is 4 times orthogonal
+}
+
+static void copy_block(const int16_t *src, int src_stride, int l, int w,
+                       int16_t *dest, int dest_stride) {
+  int i;
+  for (i = 0; i < l; ++i) {
+    memcpy(dest + dest_stride * i, src + src_stride * i, w * sizeof(int16_t));
+  }
+}
+
+static void fliplr(int16_t *dest, int stride, int l, int w) {
+  int i, j;
+  for (i = 0; i < l; ++i) {
+    for (j = 0; j < w / 2; ++j) {
+      const int16_t tmp = dest[i * stride + j];
+      dest[i * stride + j] = dest[i * stride + w - 1 - j];
+      dest[i * stride + w - 1 - j] = tmp;
+    }
+  }
+}
+
+static void flipud(int16_t *dest, int stride, int l, int w) {
+  int i, j;
+  for (j = 0; j < w; ++j) {
+    for (i = 0; i < l / 2; ++i) {
+      const int16_t tmp = dest[i * stride + j];
+      dest[i * stride + j] = dest[(l - 1 - i) * stride + j];
+      dest[(l - 1 - i) * stride + j] = tmp;
+    }
+  }
+}
+
+static void fliplrud(int16_t *dest, int stride, int l, int w) {
+  int i, j;
+  for (i = 0; i < l / 2; ++i) {
+    for (j = 0; j < w; ++j) {
+      const int16_t tmp = dest[i * stride + j];
+      dest[i * stride + j] = dest[(l - 1 - i) * stride + w - 1 - j];
+      dest[(l - 1 - i) * stride + w - 1 - j] = tmp;
+    }
+  }
+}
+
+static void copy_fliplr(const int16_t *src, int src_stride, int l, int w,
+                        int16_t *dest, int dest_stride) {
+  copy_block(src, src_stride, l, w, dest, dest_stride);
+  fliplr(dest, dest_stride, l, w);
+}
+
+static void copy_flipud(const int16_t *src, int src_stride, int l, int w,
+                        int16_t *dest, int dest_stride) {
+  copy_block(src, src_stride, l, w, dest, dest_stride);
+  flipud(dest, dest_stride, l, w);
+}
+
+static void copy_fliplrud(const int16_t *src, int src_stride, int l, int w,
+                          int16_t *dest, int dest_stride) {
+  copy_block(src, src_stride, l, w, dest, dest_stride);
+  fliplrud(dest, dest_stride, l, w);
+}
+
+static void maybe_flip_input(const int16_t **src, int *src_stride, int l, int w,
+                             int16_t *buff, int tx_type) {
+  switch (tx_type) {
+    case DCT_DCT:
+    case ADST_DCT:
+    case DCT_ADST:
+    case ADST_ADST:
+    case IDTX:
+    case V_DCT:
+    case H_DCT:
+    case V_ADST:
+    case H_ADST: break;
+    case FLIPADST_DCT:
+    case FLIPADST_ADST:
+    case V_FLIPADST:
+      copy_flipud(*src, *src_stride, l, w, buff, w);
+      *src = buff;
+      *src_stride = w;
+      break;
+    case DCT_FLIPADST:
+    case ADST_FLIPADST:
+    case H_FLIPADST:
+      copy_fliplr(*src, *src_stride, l, w, buff, w);
+      *src = buff;
+      *src_stride = w;
+      break;
+    case FLIPADST_FLIPADST:
+      copy_fliplrud(*src, *src_stride, l, w, buff, w);
+      *src = buff;
+      *src_stride = w;
+      break;
+    default: assert(0); break;
+  }
+}
+#endif  // CONFIG_EXT_TX
+
+static const transform_2d FHT_4[] = {
+  { fdct4, fdct4 },    // DCT_DCT
+  { fadst4, fdct4 },   // ADST_DCT
+  { fdct4, fadst4 },   // DCT_ADST
+  { fadst4, fadst4 },  // ADST_ADST
+#if CONFIG_EXT_TX
+  { fadst4, fdct4 },   // FLIPADST_DCT
+  { fdct4, fadst4 },   // DCT_FLIPADST
+  { fadst4, fadst4 },  // FLIPADST_FLIPADST
+  { fadst4, fadst4 },  // ADST_FLIPADST
+  { fadst4, fadst4 },  // FLIPADST_ADST
+  { fidtx4, fidtx4 },  // IDTX
+  { fdct4, fidtx4 },   // V_DCT
+  { fidtx4, fdct4 },   // H_DCT
+  { fadst4, fidtx4 },  // V_ADST
+  { fidtx4, fadst4 },  // H_ADST
+  { fadst4, fidtx4 },  // V_FLIPADST
+  { fidtx4, fadst4 },  // H_FLIPADST
+#endif                 // CONFIG_EXT_TX
+};
+
+static const transform_2d FHT_8[] = {
+  { fdct8, fdct8 },    // DCT_DCT
+  { fadst8, fdct8 },   // ADST_DCT
+  { fdct8, fadst8 },   // DCT_ADST
+  { fadst8, fadst8 },  // ADST_ADST
+#if CONFIG_EXT_TX
+  { fadst8, fdct8 },   // FLIPADST_DCT
+  { fdct8, fadst8 },   // DCT_FLIPADST
+  { fadst8, fadst8 },  // FLIPADST_FLIPADST
+  { fadst8, fadst8 },  // ADST_FLIPADST
+  { fadst8, fadst8 },  // FLIPADST_ADST
+  { fidtx8, fidtx8 },  // IDTX
+  { fdct8, fidtx8 },   // V_DCT
+  { fidtx8, fdct8 },   // H_DCT
+  { fadst8, fidtx8 },  // V_ADST
+  { fidtx8, fadst8 },  // H_ADST
+  { fadst8, fidtx8 },  // V_FLIPADST
+  { fidtx8, fadst8 },  // H_FLIPADST
+#endif                 // CONFIG_EXT_TX
+};
+
+static const transform_2d FHT_16[] = {
+  { fdct16, fdct16 },    // DCT_DCT
+  { fadst16, fdct16 },   // ADST_DCT
+  { fdct16, fadst16 },   // DCT_ADST
+  { fadst16, fadst16 },  // ADST_ADST
+#if CONFIG_EXT_TX
+  { fadst16, fdct16 },   // FLIPADST_DCT
+  { fdct16, fadst16 },   // DCT_FLIPADST
+  { fadst16, fadst16 },  // FLIPADST_FLIPADST
+  { fadst16, fadst16 },  // ADST_FLIPADST
+  { fadst16, fadst16 },  // FLIPADST_ADST
+  { fidtx16, fidtx16 },  // IDTX
+  { fdct16, fidtx16 },   // V_DCT
+  { fidtx16, fdct16 },   // H_DCT
+  { fadst16, fidtx16 },  // V_ADST
+  { fidtx16, fadst16 },  // H_ADST
+  { fadst16, fidtx16 },  // V_FLIPADST
+  { fidtx16, fadst16 },  // H_FLIPADST
+#endif                   // CONFIG_EXT_TX
+};
+
+#if CONFIG_EXT_TX
+static const transform_2d FHT_32[] = {
+  { fdct32, fdct32 },              // DCT_DCT
+  { fhalfright32, fdct32 },        // ADST_DCT
+  { fdct32, fhalfright32 },        // DCT_ADST
+  { fhalfright32, fhalfright32 },  // ADST_ADST
+  { fhalfright32, fdct32 },        // FLIPADST_DCT
+  { fdct32, fhalfright32 },        // DCT_FLIPADST
+  { fhalfright32, fhalfright32 },  // FLIPADST_FLIPADST
+  { fhalfright32, fhalfright32 },  // ADST_FLIPADST
+  { fhalfright32, fhalfright32 },  // FLIPADST_ADST
+  { fidtx32, fidtx32 },            // IDTX
+  { fdct32, fidtx32 },             // V_DCT
+  { fidtx32, fdct32 },             // H_DCT
+  { fhalfright32, fidtx32 },       // V_ADST
+  { fidtx32, fhalfright32 },       // H_ADST
+  { fhalfright32, fidtx32 },       // V_FLIPADST
+  { fidtx32, fhalfright32 },       // H_FLIPADST
+};
+
+static const transform_2d FHT_4x8[] = {
+  { fdct8, fdct4 },    // DCT_DCT
+  { fadst8, fdct4 },   // ADST_DCT
+  { fdct8, fadst4 },   // DCT_ADST
+  { fadst8, fadst4 },  // ADST_ADST
+  { fadst8, fdct4 },   // FLIPADST_DCT
+  { fdct8, fadst4 },   // DCT_FLIPADST
+  { fadst8, fadst4 },  // FLIPADST_FLIPADST
+  { fadst8, fadst4 },  // ADST_FLIPADST
+  { fadst8, fadst4 },  // FLIPADST_ADST
+  { fidtx8, fidtx4 },  // IDTX
+  { fdct8, fidtx4 },   // V_DCT
+  { fidtx8, fdct4 },   // H_DCT
+  { fadst8, fidtx4 },  // V_ADST
+  { fidtx8, fadst4 },  // H_ADST
+  { fadst8, fidtx4 },  // V_FLIPADST
+  { fidtx8, fadst4 },  // H_FLIPADST
+};
+
+static const transform_2d FHT_8x4[] = {
+  { fdct4, fdct8 },    // DCT_DCT
+  { fadst4, fdct8 },   // ADST_DCT
+  { fdct4, fadst8 },   // DCT_ADST
+  { fadst4, fadst8 },  // ADST_ADST
+  { fadst4, fdct8 },   // FLIPADST_DCT
+  { fdct4, fadst8 },   // DCT_FLIPADST
+  { fadst4, fadst8 },  // FLIPADST_FLIPADST
+  { fadst4, fadst8 },  // ADST_FLIPADST
+  { fadst4, fadst8 },  // FLIPADST_ADST
+  { fidtx4, fidtx8 },  // IDTX
+  { fdct4, fidtx8 },   // V_DCT
+  { fidtx4, fdct8 },   // H_DCT
+  { fadst4, fidtx8 },  // V_ADST
+  { fidtx4, fadst8 },  // H_ADST
+  { fadst4, fidtx8 },  // V_FLIPADST
+  { fidtx4, fadst8 },  // H_FLIPADST
+};
+
+static const transform_2d FHT_8x16[] = {
+  { fdct16, fdct8 },    // DCT_DCT
+  { fadst16, fdct8 },   // ADST_DCT
+  { fdct16, fadst8 },   // DCT_ADST
+  { fadst16, fadst8 },  // ADST_ADST
+  { fadst16, fdct8 },   // FLIPADST_DCT
+  { fdct16, fadst8 },   // DCT_FLIPADST
+  { fadst16, fadst8 },  // FLIPADST_FLIPADST
+  { fadst16, fadst8 },  // ADST_FLIPADST
+  { fadst16, fadst8 },  // FLIPADST_ADST
+  { fidtx16, fidtx8 },  // IDTX
+  { fdct16, fidtx8 },   // V_DCT
+  { fidtx16, fdct8 },   // H_DCT
+  { fadst16, fidtx8 },  // V_ADST
+  { fidtx16, fadst8 },  // H_ADST
+  { fadst16, fidtx8 },  // V_FLIPADST
+  { fidtx16, fadst8 },  // H_FLIPADST
+};
+
+static const transform_2d FHT_16x8[] = {
+  { fdct8, fdct16 },    // DCT_DCT
+  { fadst8, fdct16 },   // ADST_DCT
+  { fdct8, fadst16 },   // DCT_ADST
+  { fadst8, fadst16 },  // ADST_ADST
+  { fadst8, fdct16 },   // FLIPADST_DCT
+  { fdct8, fadst16 },   // DCT_FLIPADST
+  { fadst8, fadst16 },  // FLIPADST_FLIPADST
+  { fadst8, fadst16 },  // ADST_FLIPADST
+  { fadst8, fadst16 },  // FLIPADST_ADST
+  { fidtx8, fidtx16 },  // IDTX
+  { fdct8, fidtx16 },   // V_DCT
+  { fidtx8, fdct16 },   // H_DCT
+  { fadst8, fidtx16 },  // V_ADST
+  { fidtx8, fadst16 },  // H_ADST
+  { fadst8, fidtx16 },  // V_FLIPADST
+  { fidtx8, fadst16 },  // H_FLIPADST
+};
+
+static const transform_2d FHT_16x32[] = {
+  { fdct32, fdct16 },         // DCT_DCT
+  { fhalfright32, fdct16 },   // ADST_DCT
+  { fdct32, fadst16 },        // DCT_ADST
+  { fhalfright32, fadst16 },  // ADST_ADST
+  { fhalfright32, fdct16 },   // FLIPADST_DCT
+  { fdct32, fadst16 },        // DCT_FLIPADST
+  { fhalfright32, fadst16 },  // FLIPADST_FLIPADST
+  { fhalfright32, fadst16 },  // ADST_FLIPADST
+  { fhalfright32, fadst16 },  // FLIPADST_ADST
+  { fidtx32, fidtx16 },       // IDTX
+  { fdct32, fidtx16 },        // V_DCT
+  { fidtx32, fdct16 },        // H_DCT
+  { fhalfright32, fidtx16 },  // V_ADST
+  { fidtx32, fadst16 },       // H_ADST
+  { fhalfright32, fidtx16 },  // V_FLIPADST
+  { fidtx32, fadst16 },       // H_FLIPADST
+};
+
+static const transform_2d FHT_32x16[] = {
+  { fdct16, fdct32 },         // DCT_DCT
+  { fadst16, fdct32 },        // ADST_DCT
+  { fdct16, fhalfright32 },   // DCT_ADST
+  { fadst16, fhalfright32 },  // ADST_ADST
+  { fadst16, fdct32 },        // FLIPADST_DCT
+  { fdct16, fhalfright32 },   // DCT_FLIPADST
+  { fadst16, fhalfright32 },  // FLIPADST_FLIPADST
+  { fadst16, fhalfright32 },  // ADST_FLIPADST
+  { fadst16, fhalfright32 },  // FLIPADST_ADST
+  { fidtx16, fidtx32 },       // IDTX
+  { fdct16, fidtx32 },        // V_DCT
+  { fidtx16, fdct32 },        // H_DCT
+  { fadst16, fidtx32 },       // V_ADST
+  { fidtx16, fhalfright32 },  // H_ADST
+  { fadst16, fidtx32 },       // V_FLIPADST
+  { fidtx16, fhalfright32 },  // H_FLIPADST
+};
+#endif  // CONFIG_EXT_TX
+
+void vp10_fht4x4_c(const int16_t *input, tran_low_t *output, int stride,
+                   int tx_type) {
+  if (tx_type == DCT_DCT) {
+    vpx_fdct4x4_c(input, output, stride);
+  } else {
+    tran_low_t out[4 * 4];
+    int i, j;
+    tran_low_t temp_in[4], temp_out[4];
+    const transform_2d ht = FHT_4[tx_type];
+
+#if CONFIG_EXT_TX
+    int16_t flipped_input[4 * 4];
+    maybe_flip_input(&input, &stride, 4, 4, flipped_input, tx_type);
+#endif
+
+    // Columns
+    for (i = 0; i < 4; ++i) {
+      for (j = 0; j < 4; ++j) temp_in[j] = input[j * stride + i] * 16;
+      if (i == 0 && temp_in[0]) temp_in[0] += 1;
+      ht.cols(temp_in, temp_out);
+      for (j = 0; j < 4; ++j) out[j * 4 + i] = temp_out[j];
+    }
+
+    // Rows
+    for (i = 0; i < 4; ++i) {
+      for (j = 0; j < 4; ++j) temp_in[j] = out[j + i * 4];
+      ht.rows(temp_in, temp_out);
+      for (j = 0; j < 4; ++j) output[j + i * 4] = (temp_out[j] + 1) >> 2;
+    }
+  }
+}
+
+#if CONFIG_EXT_TX
+void vp10_fht4x8_c(const int16_t *input, tran_low_t *output, int stride,
+                   int tx_type) {
+  const int n = 4;
+  const int n2 = 8;
+  tran_low_t out[8 * 4];
+  tran_low_t temp_in[8], temp_out[8];
+  int i, j;
+  const transform_2d ht = FHT_4x8[tx_type];
+  int16_t flipped_input[8 * 4];
+  maybe_flip_input(&input, &stride, n2, n, flipped_input, tx_type);
+
+  // Columns
+  for (i = 0; i < n; ++i) {
+    for (j = 0; j < n2; ++j)
+      temp_in[j] =
+          (tran_low_t)fdct_round_shift(input[j * stride + i] * 8 * Sqrt2);
+    ht.cols(temp_in, temp_out);
+    for (j = 0; j < n2; ++j) out[j * n + i] = temp_out[j];
+  }
+
+  // Rows
+  for (i = 0; i < n2; ++i) {
+    for (j = 0; j < n; ++j) temp_in[j] = out[j + i * n];
+    ht.rows(temp_in, temp_out);
+    for (j = 0; j < n; ++j) output[j + i * n] = (temp_out[j] + 1) >> 2;
+  }
+  // Note: overall scale factor of transform is 8 times unitary
+}
+
+void vp10_fht8x4_c(const int16_t *input, tran_low_t *output, int stride,
+                   int tx_type) {
+  const int n = 4;
+  const int n2 = 8;
+  tran_low_t out[8 * 4];
+  tran_low_t temp_in[8], temp_out[8];
+  int i, j;
+  const transform_2d ht = FHT_8x4[tx_type];
+  int16_t flipped_input[8 * 4];
+  maybe_flip_input(&input, &stride, n, n2, flipped_input, tx_type);
+
+  // Columns
+  for (i = 0; i < n2; ++i) {
+    for (j = 0; j < n; ++j)
+      temp_in[j] =
+          (tran_low_t)fdct_round_shift(input[j * stride + i] * 8 * Sqrt2);
+    ht.cols(temp_in, temp_out);
+    for (j = 0; j < n; ++j) out[j * n2 + i] = temp_out[j];
+  }
+
+  // Rows
+  for (i = 0; i < n; ++i) {
+    for (j = 0; j < n2; ++j) temp_in[j] = out[j + i * n2];
+    ht.rows(temp_in, temp_out);
+    for (j = 0; j < n2; ++j) output[j + i * n2] = (temp_out[j] + 1) >> 2;
+  }
+  // Note: overall scale factor of transform is 8 times unitary
+}
+
+void vp10_fht8x16_c(const int16_t *input, tran_low_t *output, int stride,
+                    int tx_type) {
+  const int n = 8;
+  const int n2 = 16;
+  tran_low_t out[16 * 8];
+  tran_low_t temp_in[16], temp_out[16];
+  int i, j;
+  const transform_2d ht = FHT_8x16[tx_type];
+  int16_t flipped_input[16 * 8];
+  maybe_flip_input(&input, &stride, n2, n, flipped_input, tx_type);
+
+  // Columns
+  for (i = 0; i < n; ++i) {
+    for (j = 0; j < n2; ++j)
+      temp_in[j] =
+          (tran_low_t)fdct_round_shift(input[j * stride + i] * 4 * Sqrt2);
+    ht.cols(temp_in, temp_out);
+    for (j = 0; j < n2; ++j) out[j * n + i] = temp_out[j];
+  }
+
+  // Rows
+  for (i = 0; i < n2; ++i) {
+    for (j = 0; j < n; ++j) temp_in[j] = out[j + i * n];
+    ht.rows(temp_in, temp_out);
+    for (j = 0; j < n; ++j) output[j + i * n] = (temp_out[j] + 1) >> 2;
+  }
+  // Note: overall scale factor of transform is 8 times unitary
+}
+
+void vp10_fht16x8_c(const int16_t *input, tran_low_t *output, int stride,
+                    int tx_type) {
+  const int n = 8;
+  const int n2 = 16;
+  tran_low_t out[16 * 8];
+  tran_low_t temp_in[16], temp_out[16];
+  int i, j;
+  const transform_2d ht = FHT_16x8[tx_type];
+  int16_t flipped_input[16 * 8];
+  maybe_flip_input(&input, &stride, n, n2, flipped_input, tx_type);
+
+  // Columns
+  for (i = 0; i < n2; ++i) {
+    for (j = 0; j < n; ++j)
+      temp_in[j] =
+          (tran_low_t)fdct_round_shift(input[j * stride + i] * 4 * Sqrt2);
+    ht.cols(temp_in, temp_out);
+    for (j = 0; j < n; ++j) out[j * n2 + i] = temp_out[j];
+  }
+
+  // Rows
+  for (i = 0; i < n; ++i) {
+    for (j = 0; j < n2; ++j) temp_in[j] = out[j + i * n2];
+    ht.rows(temp_in, temp_out);
+    for (j = 0; j < n2; ++j) output[j + i * n2] = (temp_out[j] + 1) >> 2;
+  }
+  // Note: overall scale factor of transform is 8 times unitary
+}
+
+void vp10_fht16x32_c(const int16_t *input, tran_low_t *output, int stride,
+                     int tx_type) {
+  const int n = 16;
+  const int n2 = 32;
+  tran_low_t out[32 * 16];
+  tran_low_t temp_in[32], temp_out[32];
+  int i, j;
+  const transform_2d ht = FHT_16x32[tx_type];
+  int16_t flipped_input[32 * 16];
+  maybe_flip_input(&input, &stride, n2, n, flipped_input, tx_type);
+
+  // Columns
+  for (i = 0; i < n; ++i) {
+    for (j = 0; j < n2; ++j)
+      temp_in[j] = (tran_low_t)fdct_round_shift(input[j * stride + i] * Sqrt2);
+    ht.cols(temp_in, temp_out);
+    for (j = 0; j < n2; ++j) out[j * n + i] = temp_out[j];
+  }
+
+  // Rows
+  for (i = 0; i < n2; ++i) {
+    for (j = 0; j < n; ++j) temp_in[j] = out[j + i * n];
+    ht.rows(temp_in, temp_out);
+    for (j = 0; j < n; ++j)
+      output[j + i * n] =
+          (tran_low_t)((temp_out[j] + 1 + (temp_out[j] < 0)) >> 2);
+  }
+  // Note: overall scale factor of transform is 4 times unitary
+}
+
+void vp10_fht32x16_c(const int16_t *input, tran_low_t *output, int stride,
+                     int tx_type) {
+  const int n = 16;
+  const int n2 = 32;
+  tran_low_t out[32 * 16];
+  tran_low_t temp_in[32], temp_out[32];
+  int i, j;
+  const transform_2d ht = FHT_32x16[tx_type];
+  int16_t flipped_input[32 * 16];
+  maybe_flip_input(&input, &stride, n, n2, flipped_input, tx_type);
+
+  // Columns
+  for (i = 0; i < n2; ++i) {
+    for (j = 0; j < n; ++j)
+      temp_in[j] = (tran_low_t)fdct_round_shift(input[j * stride + i] * Sqrt2);
+    ht.cols(temp_in, temp_out);
+    for (j = 0; j < n; ++j) out[j * n2 + i] = temp_out[j];
+  }
+
+  // Rows
+  for (i = 0; i < n; ++i) {
+    for (j = 0; j < n2; ++j) temp_in[j] = out[j + i * n2];
+    ht.rows(temp_in, temp_out);
+    for (j = 0; j < n2; ++j)
+      output[j + i * n2] =
+          (tran_low_t)((temp_out[j] + 1 + (temp_out[j] < 0)) >> 2);
+  }
+  // Note: overall scale factor of transform is 4 times unitary
+}
+
+#endif  // CONFIG_EXT_TX
+
+void vp10_fdct8x8_quant_c(const int16_t *input, int stride,
+                          tran_low_t *coeff_ptr, intptr_t n_coeffs,
+                          int skip_block, const int16_t *zbin_ptr,
+                          const int16_t *round_ptr, const int16_t *quant_ptr,
+                          const int16_t *quant_shift_ptr,
+                          tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
+                          const int16_t *dequant_ptr, uint16_t *eob_ptr,
+                          const int16_t *scan, const int16_t *iscan
+#if CONFIG_AOM_QM
+                          ,
+                          const qm_val_t *qm_ptr, const qm_val_t *iqm_ptr
+#endif
+                          ) {
+  int eob = -1;
+
+  int i, j;
+  tran_low_t intermediate[64];
+
+  // Transform columns
+  {
+    tran_low_t *output = intermediate;
+    tran_high_t s0, s1, s2, s3, s4, s5, s6, s7;  // canbe16
+    tran_high_t t0, t1, t2, t3;                  // needs32
+    tran_high_t x0, x1, x2, x3;                  // canbe16
+
+    int i;
+    for (i = 0; i < 8; i++) {
+      // stage 1
+      s0 = (input[0 * stride] + input[7 * stride]) * 4;
+      s1 = (input[1 * stride] + input[6 * stride]) * 4;
+      s2 = (input[2 * stride] + input[5 * stride]) * 4;
+      s3 = (input[3 * stride] + input[4 * stride]) * 4;
+      s4 = (input[3 * stride] - input[4 * stride]) * 4;
+      s5 = (input[2 * stride] - input[5 * stride]) * 4;
+      s6 = (input[1 * stride] - input[6 * stride]) * 4;
+      s7 = (input[0 * stride] - input[7 * stride]) * 4;
+
+      // fdct4(step, step);
+      x0 = s0 + s3;
+      x1 = s1 + s2;
+      x2 = s1 - s2;
+      x3 = s0 - s3;
+      t0 = (x0 + x1) * cospi_16_64;
+      t1 = (x0 - x1) * cospi_16_64;
+      t2 = x2 * cospi_24_64 + x3 * cospi_8_64;
+      t3 = -x2 * cospi_8_64 + x3 * cospi_24_64;
+      output[0 * 8] = (tran_low_t)fdct_round_shift(t0);
+      output[2 * 8] = (tran_low_t)fdct_round_shift(t2);
+      output[4 * 8] = (tran_low_t)fdct_round_shift(t1);
+      output[6 * 8] = (tran_low_t)fdct_round_shift(t3);
+
+      // stage 2
+      t0 = (s6 - s5) * cospi_16_64;
+      t1 = (s6 + s5) * cospi_16_64;
+      t2 = fdct_round_shift(t0);
+      t3 = fdct_round_shift(t1);
+
+      // stage 3
+      x0 = s4 + t2;
+      x1 = s4 - t2;
+      x2 = s7 - t3;
+      x3 = s7 + t3;
+
+      // stage 4
+      t0 = x0 * cospi_28_64 + x3 * cospi_4_64;
+      t1 = x1 * cospi_12_64 + x2 * cospi_20_64;
+      t2 = x2 * cospi_12_64 + x1 * -cospi_20_64;
+      t3 = x3 * cospi_28_64 + x0 * -cospi_4_64;
+      output[1 * 8] = (tran_low_t)fdct_round_shift(t0);
+      output[3 * 8] = (tran_low_t)fdct_round_shift(t2);
+      output[5 * 8] = (tran_low_t)fdct_round_shift(t1);
+      output[7 * 8] = (tran_low_t)fdct_round_shift(t3);
+      input++;
+      output++;
+    }
+  }
+
+  // Rows
+  for (i = 0; i < 8; ++i) {
+    fdct8(&intermediate[i * 8], &coeff_ptr[i * 8]);
+    for (j = 0; j < 8; ++j) coeff_ptr[j + i * 8] /= 2;
+  }
+
+  // TODO(jingning) Decide the need of these arguments after the
+  // quantization process is completed.
+  (void)zbin_ptr;
+  (void)quant_shift_ptr;
+  (void)iscan;
+
+  memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
+  memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
+
+  if (!skip_block) {
+    // Quantization pass: All coefficients with index >= zero_flag are
+    // skippable. Note: zero_flag can be zero.
+    for (i = 0; i < n_coeffs; i++) {
+      const int rc = scan[i];
+      const int coeff = coeff_ptr[rc];
+#if CONFIG_AOM_QM
+      const qm_val_t wt = qm_ptr[rc];
+      const qm_val_t iwt = iqm_ptr[rc];
+      const int dequant =
+          (dequant_ptr[rc != 0] * iwt + (1 << (AOM_QM_BITS - 1))) >>
+          AOM_QM_BITS;
+#endif
+      const int coeff_sign = (coeff >> 31);
+      const int abs_coeff = (coeff ^ coeff_sign) - coeff_sign;
+
+      int64_t tmp = clamp(abs_coeff + round_ptr[rc != 0], INT16_MIN, INT16_MAX);
+      int tmp32;
+#if CONFIG_AOM_QM
+      tmp32 = (int)((tmp * quant_ptr[rc != 0] * wt) >> (16 + AOM_QM_BITS));
+      qcoeff_ptr[rc] = (tmp32 ^ coeff_sign) - coeff_sign;
+      dqcoeff_ptr[rc] = qcoeff_ptr[rc] * dequant;
+#else
+      tmp32 = (int)((tmp * quant_ptr[rc != 0]) >> 16);
+      qcoeff_ptr[rc] = (tmp32 ^ coeff_sign) - coeff_sign;
+      dqcoeff_ptr[rc] = qcoeff_ptr[rc] * dequant_ptr[rc != 0];
+#endif
+
+      if (tmp32) eob = i;
+    }
+  }
+  *eob_ptr = eob + 1;
+}
+
+void vp10_fht8x8_c(const int16_t *input, tran_low_t *output, int stride,
+                   int tx_type) {
+  if (tx_type == DCT_DCT) {
+    vpx_fdct8x8_c(input, output, stride);
+  } else {
+    tran_low_t out[64];
+    int i, j;
+    tran_low_t temp_in[8], temp_out[8];
+    const transform_2d ht = FHT_8[tx_type];
+
+#if CONFIG_EXT_TX
+    int16_t flipped_input[8 * 8];
+    maybe_flip_input(&input, &stride, 8, 8, flipped_input, tx_type);
+#endif
+
+    // Columns
+    for (i = 0; i < 8; ++i) {
+      for (j = 0; j < 8; ++j) temp_in[j] = input[j * stride + i] * 4;
+      ht.cols(temp_in, temp_out);
+      for (j = 0; j < 8; ++j) out[j * 8 + i] = temp_out[j];
+    }
+
+    // Rows
+    for (i = 0; i < 8; ++i) {
+      for (j = 0; j < 8; ++j) temp_in[j] = out[j + i * 8];
+      ht.rows(temp_in, temp_out);
+      for (j = 0; j < 8; ++j)
+        output[j + i * 8] = (temp_out[j] + (temp_out[j] < 0)) >> 1;
+    }
+  }
+}
+
+/* 4-point reversible, orthonormal Walsh-Hadamard in 3.5 adds, 0.5 shifts per
+   pixel. */
+void vp10_fwht4x4_c(const int16_t *input, tran_low_t *output, int stride) {
+  int i;
+  tran_high_t a1, b1, c1, d1, e1;
+  const int16_t *ip_pass0 = input;
+  const tran_low_t *ip = NULL;
+  tran_low_t *op = output;
+
+  for (i = 0; i < 4; i++) {
+    a1 = ip_pass0[0 * stride];
+    b1 = ip_pass0[1 * stride];
+    c1 = ip_pass0[2 * stride];
+    d1 = ip_pass0[3 * stride];
+
+    a1 += b1;
+    d1 = d1 - c1;
+    e1 = (a1 - d1) >> 1;
+    b1 = e1 - b1;
+    c1 = e1 - c1;
+    a1 -= c1;
+    d1 += b1;
+    op[0] = (tran_low_t)a1;
+    op[4] = (tran_low_t)c1;
+    op[8] = (tran_low_t)d1;
+    op[12] = (tran_low_t)b1;
+
+    ip_pass0++;
+    op++;
+  }
+  ip = output;
+  op = output;
+
+  for (i = 0; i < 4; i++) {
+    a1 = ip[0];
+    b1 = ip[1];
+    c1 = ip[2];
+    d1 = ip[3];
+
+    a1 += b1;
+    d1 -= c1;
+    e1 = (a1 - d1) >> 1;
+    b1 = e1 - b1;
+    c1 = e1 - c1;
+    a1 -= c1;
+    d1 += b1;
+    op[0] = (tran_low_t)(a1 * UNIT_QUANT_FACTOR);
+    op[1] = (tran_low_t)(c1 * UNIT_QUANT_FACTOR);
+    op[2] = (tran_low_t)(d1 * UNIT_QUANT_FACTOR);
+    op[3] = (tran_low_t)(b1 * UNIT_QUANT_FACTOR);
+
+    ip += 4;
+    op += 4;
+  }
+}
+
+void vp10_fht16x16_c(const int16_t *input, tran_low_t *output, int stride,
+                     int tx_type) {
+  if (tx_type == DCT_DCT) {
+    vpx_fdct16x16_c(input, output, stride);
+  } else {
+    tran_low_t out[256];
+    int i, j;
+    tran_low_t temp_in[16], temp_out[16];
+    const transform_2d ht = FHT_16[tx_type];
+
+#if CONFIG_EXT_TX
+    int16_t flipped_input[16 * 16];
+    maybe_flip_input(&input, &stride, 16, 16, flipped_input, tx_type);
+#endif
+
+    // Columns
+    for (i = 0; i < 16; ++i) {
+      for (j = 0; j < 16; ++j) temp_in[j] = input[j * stride + i] * 4;
+      ht.cols(temp_in, temp_out);
+      for (j = 0; j < 16; ++j)
+        out[j * 16 + i] = (temp_out[j] + 1 + (temp_out[j] < 0)) >> 2;
+    }
+
+    // Rows
+    for (i = 0; i < 16; ++i) {
+      for (j = 0; j < 16; ++j) temp_in[j] = out[j + i * 16];
+      ht.rows(temp_in, temp_out);
+      for (j = 0; j < 16; ++j) output[j + i * 16] = temp_out[j];
+    }
+  }
+}
+
+#if CONFIG_VP9_HIGHBITDEPTH
+void vp10_highbd_fht4x4_c(const int16_t *input, tran_low_t *output, int stride,
+                          int tx_type) {
+  vp10_fht4x4_c(input, output, stride, tx_type);
+}
+
+#if CONFIG_EXT_TX
+void vp10_highbd_fht4x8_c(const int16_t *input, tran_low_t *output, int stride,
+                          int tx_type) {
+  vp10_fht4x8_c(input, output, stride, tx_type);
+}
+
+void vp10_highbd_fht8x4_c(const int16_t *input, tran_low_t *output, int stride,
+                          int tx_type) {
+  vp10_fht8x4_c(input, output, stride, tx_type);
+}
+
+void vp10_highbd_fht8x16_c(const int16_t *input, tran_low_t *output, int stride,
+                           int tx_type) {
+  vp10_fht8x16_c(input, output, stride, tx_type);
+}
+
+void vp10_highbd_fht16x8_c(const int16_t *input, tran_low_t *output, int stride,
+                           int tx_type) {
+  vp10_fht16x8_c(input, output, stride, tx_type);
+}
+
+void vp10_highbd_fht16x32_c(const int16_t *input, tran_low_t *output,
+                            int stride, int tx_type) {
+  vp10_fht16x32_c(input, output, stride, tx_type);
+}
+
+void vp10_highbd_fht32x16_c(const int16_t *input, tran_low_t *output,
+                            int stride, int tx_type) {
+  vp10_fht32x16_c(input, output, stride, tx_type);
+}
+#endif  // CONFIG_EXT_TX
+
+void vp10_highbd_fht8x8_c(const int16_t *input, tran_low_t *output, int stride,
+                          int tx_type) {
+  vp10_fht8x8_c(input, output, stride, tx_type);
+}
+
+void vp10_highbd_fwht4x4_c(const int16_t *input, tran_low_t *output,
+                           int stride) {
+  vp10_fwht4x4_c(input, output, stride);
+}
+
+void vp10_highbd_fht16x16_c(const int16_t *input, tran_low_t *output,
+                            int stride, int tx_type) {
+  vp10_fht16x16_c(input, output, stride, tx_type);
+}
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+#if CONFIG_EXT_TX
+void vp10_fht32x32_c(const int16_t *input, tran_low_t *output, int stride,
+                     int tx_type) {
+  if (tx_type == DCT_DCT) {
+    vpx_fdct32x32_c(input, output, stride);
+  } else {
+    tran_low_t out[1024];
+    int i, j;
+    tran_low_t temp_in[32], temp_out[32];
+    const transform_2d ht = FHT_32[tx_type];
+
+    int16_t flipped_input[32 * 32];
+    maybe_flip_input(&input, &stride, 32, 32, flipped_input, tx_type);
+
+    // Columns
+    for (i = 0; i < 32; ++i) {
+      for (j = 0; j < 32; ++j) temp_in[j] = input[j * stride + i] * 4;
+      ht.cols(temp_in, temp_out);
+      for (j = 0; j < 32; ++j)
+        out[j * 32 + i] = (temp_out[j] + 1 + (temp_out[j] > 0)) >> 2;
+    }
+
+    // Rows
+    for (i = 0; i < 32; ++i) {
+      for (j = 0; j < 32; ++j) temp_in[j] = out[j + i * 32];
+      ht.rows(temp_in, temp_out);
+      for (j = 0; j < 32; ++j)
+        output[j + i * 32] =
+            (tran_low_t)((temp_out[j] + 1 + (temp_out[j] < 0)) >> 2);
+    }
+  }
+}
+
+// Forward identity transform.
+void vp10_fwd_idtx_c(const int16_t *src_diff, tran_low_t *coeff, int stride,
+                     int bs, int tx_type) {
+  int r, c;
+  const int shift = bs < 32 ? 3 : 2;
+  if (tx_type == IDTX) {
+    for (r = 0; r < bs; ++r) {
+      for (c = 0; c < bs; ++c) coeff[c] = src_diff[c] << shift;
+      src_diff += stride;
+      coeff += bs;
+    }
+  }
+}
+
+#if CONFIG_VP9_HIGHBITDEPTH
+void vp10_highbd_fht32x32_c(const int16_t *input, tran_low_t *output,
+                            int stride, int tx_type) {
+  vp10_fht32x32_c(input, output, stride, tx_type);
+}
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_EXT_TX
diff --git a/av1/encoder/encodeframe.c b/av1/encoder/encodeframe.c
new file mode 100644
index 0000000..b2635b4
--- /dev/null
+++ b/av1/encoder/encodeframe.c
@@ -0,0 +1,6109 @@
+/*
+ *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <limits.h>
+#include <math.h>
+#include <stdio.h>
+
+#include "./vp10_rtcd.h"
+#include "./vpx_dsp_rtcd.h"
+#include "./vpx_config.h"
+
+#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_ports/mem.h"
+#include "aom_ports/vpx_timer.h"
+#include "aom_ports/system_state.h"
+
+#include "av1/common/common.h"
+#include "av1/common/entropy.h"
+#include "av1/common/entropymode.h"
+#include "av1/common/idct.h"
+#include "av1/common/mvref_common.h"
+#include "av1/common/pred_common.h"
+#include "av1/common/quant_common.h"
+#include "av1/common/reconintra.h"
+#include "av1/common/reconinter.h"
+#include "av1/common/seg_common.h"
+#include "av1/common/tile_common.h"
+
+#include "av1/encoder/aq_complexity.h"
+#include "av1/encoder/aq_cyclicrefresh.h"
+#include "av1/encoder/aq_variance.h"
+#if CONFIG_SUPERTX
+#include "av1/encoder/cost.h"
+#endif
+#if CONFIG_GLOBAL_MOTION
+#include "av1/encoder/global_motion.h"
+#endif
+#include "av1/encoder/encodeframe.h"
+#include "av1/encoder/encodemb.h"
+#include "av1/encoder/encodemv.h"
+#include "av1/encoder/ethread.h"
+#include "av1/encoder/extend.h"
+#include "av1/encoder/rd.h"
+#include "av1/encoder/rdopt.h"
+#include "av1/encoder/segmentation.h"
+#include "av1/encoder/tokenize.h"
+
+#if CONFIG_VP9_HIGHBITDEPTH
+#define IF_HBD(...) __VA_ARGS__
+#else
+#define IF_HBD(...)
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+static void encode_superblock(VP10_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
+                              int output_enabled, int mi_row, int mi_col,
+                              BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx);
+
+#if CONFIG_SUPERTX
+static int check_intra_b(PICK_MODE_CONTEXT *ctx);
+
+static int check_intra_sb(VP10_COMP *cpi, const TileInfo *const tile,
+                          int mi_row, int mi_col, BLOCK_SIZE bsize,
+                          PC_TREE *pc_tree);
+static void predict_superblock(VP10_COMP *cpi, ThreadData *td,
+#if CONFIG_EXT_INTER
+                               int mi_row_ori, int mi_col_ori,
+#endif  // CONFIG_EXT_INTER
+                               int mi_row_pred, int mi_col_pred,
+                               BLOCK_SIZE bsize_pred, int b_sub8x8, int block);
+static int check_supertx_sb(BLOCK_SIZE bsize, TX_SIZE supertx_size,
+                            PC_TREE *pc_tree);
+static void predict_sb_complex(VP10_COMP *cpi, ThreadData *td,
+                               const TileInfo *const tile, int mi_row,
+                               int mi_col, int mi_row_ori, int mi_col_ori,
+                               int output_enabled, BLOCK_SIZE bsize,
+                               BLOCK_SIZE top_bsize, uint8_t *dst_buf[3],
+                               int dst_stride[3], PC_TREE *pc_tree);
+static void update_state_sb_supertx(VP10_COMP *cpi, ThreadData *td,
+                                    const TileInfo *const tile, int mi_row,
+                                    int mi_col, BLOCK_SIZE bsize,
+                                    int output_enabled, PC_TREE *pc_tree);
+static void rd_supertx_sb(VP10_COMP *cpi, ThreadData *td,
+                          const TileInfo *const tile, int mi_row, int mi_col,
+                          BLOCK_SIZE bsize, int *tmp_rate, int64_t *tmp_dist,
+                          TX_TYPE *best_tx, PC_TREE *pc_tree);
+#endif  // CONFIG_SUPERTX
+
+// This is used as a reference when computing the source variance for the
+//  purposes of activity masking.
+// Eventually this should be replaced by custom no-reference routines,
+//  which will be faster.
+static const uint8_t VP10_VAR_OFFS[MAX_SB_SIZE] = {
+  128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
+  128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
+  128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
+  128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
+  128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
+#if CONFIG_EXT_PARTITION
+  128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
+  128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
+  128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
+  128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
+  128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128
+#endif  // CONFIG_EXT_PARTITION
+};
+
+#if CONFIG_VP9_HIGHBITDEPTH
+static const uint16_t VP10_HIGH_VAR_OFFS_8[MAX_SB_SIZE] = {
+  128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
+  128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
+  128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
+  128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
+  128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
+#if CONFIG_EXT_PARTITION
+  128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
+  128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
+  128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
+  128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
+  128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128
+#endif  // CONFIG_EXT_PARTITION
+};
+
+static const uint16_t VP10_HIGH_VAR_OFFS_10[MAX_SB_SIZE] = {
+  128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
+  128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
+  128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
+  128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
+  128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
+  128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
+  128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
+  128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
+#if CONFIG_EXT_PARTITION
+  128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
+  128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
+  128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
+  128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
+  128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
+  128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
+  128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
+  128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4
+#endif  // CONFIG_EXT_PARTITION
+};
+
+static const uint16_t VP10_HIGH_VAR_OFFS_12[MAX_SB_SIZE] = {
+  128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
+  128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
+  128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
+  128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
+  128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
+  128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
+  128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
+  128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
+  128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
+  128 * 16,
+#if CONFIG_EXT_PARTITION
+  128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
+  128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
+  128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
+  128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
+  128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
+  128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
+  128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
+  128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
+  128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
+  128 * 16
+#endif  // CONFIG_EXT_PARTITION
+};
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+unsigned int vp10_get_sby_perpixel_variance(VP10_COMP *cpi,
+                                            const struct buf_2d *ref,
+                                            BLOCK_SIZE bs) {
+  unsigned int sse;
+  const unsigned int var =
+      cpi->fn_ptr[bs].vf(ref->buf, ref->stride, VP10_VAR_OFFS, 0, &sse);
+  return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]);
+}
+
+#if CONFIG_VP9_HIGHBITDEPTH
+unsigned int vp10_high_get_sby_perpixel_variance(VP10_COMP *cpi,
+                                                 const struct buf_2d *ref,
+                                                 BLOCK_SIZE bs, int bd) {
+  unsigned int var, sse;
+  switch (bd) {
+    case 10:
+      var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
+                               CONVERT_TO_BYTEPTR(VP10_HIGH_VAR_OFFS_10), 0,
+                               &sse);
+      break;
+    case 12:
+      var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
+                               CONVERT_TO_BYTEPTR(VP10_HIGH_VAR_OFFS_12), 0,
+                               &sse);
+      break;
+    case 8:
+    default:
+      var =
+          cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
+                             CONVERT_TO_BYTEPTR(VP10_HIGH_VAR_OFFS_8), 0, &sse);
+      break;
+  }
+  return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]);
+}
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+static unsigned int get_sby_perpixel_diff_variance(VP10_COMP *cpi,
+                                                   const struct buf_2d *ref,
+                                                   int mi_row, int mi_col,
+                                                   BLOCK_SIZE bs) {
+  unsigned int sse, var;
+  uint8_t *last_y;
+  const YV12_BUFFER_CONFIG *last = get_ref_frame_buffer(cpi, LAST_FRAME);
+
+  assert(last != NULL);
+  last_y =
+      &last->y_buffer[mi_row * MI_SIZE * last->y_stride + mi_col * MI_SIZE];
+  var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride, last_y, last->y_stride, &sse);
+  return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]);
+}
+
+static BLOCK_SIZE get_rd_var_based_fixed_partition(VP10_COMP *cpi,
+                                                   MACROBLOCK *x, int mi_row,
+                                                   int mi_col) {
+  unsigned int var = get_sby_perpixel_diff_variance(
+      cpi, &x->plane[0].src, mi_row, mi_col, BLOCK_64X64);
+  if (var < 8)
+    return BLOCK_64X64;
+  else if (var < 128)
+    return BLOCK_32X32;
+  else if (var < 2048)
+    return BLOCK_16X16;
+  else
+    return BLOCK_8X8;
+}
+
+// Lighter version of set_offsets that only sets the mode info
+// pointers.
+static void set_mode_info_offsets(VP10_COMP *const cpi, MACROBLOCK *const x,
+                                  MACROBLOCKD *const xd, int mi_row,
+                                  int mi_col) {
+  VP10_COMMON *const cm = &cpi->common;
+  const int idx_str = xd->mi_stride * mi_row + mi_col;
+  xd->mi = cm->mi_grid_visible + idx_str;
+  xd->mi[0] = cm->mi + idx_str;
+  x->mbmi_ext = cpi->mbmi_ext_base + (mi_row * cm->mi_cols + mi_col);
+}
+
+static void set_offsets_without_segment_id(VP10_COMP *cpi,
+                                           const TileInfo *const tile,
+                                           MACROBLOCK *const x, int mi_row,
+                                           int mi_col, BLOCK_SIZE bsize) {
+  VP10_COMMON *const cm = &cpi->common;
+  MACROBLOCKD *const xd = &x->e_mbd;
+  const int mi_width = num_8x8_blocks_wide_lookup[bsize];
+  const int mi_height = num_8x8_blocks_high_lookup[bsize];
+
+  set_skip_context(xd, mi_row, mi_col);
+
+  set_mode_info_offsets(cpi, x, xd, mi_row, mi_col);
+
+#if CONFIG_VAR_TX
+  xd->above_txfm_context = cm->above_txfm_context + mi_col;
+  xd->left_txfm_context =
+      xd->left_txfm_context_buffer + (mi_row & MAX_MIB_MASK);
+  xd->max_tx_size = max_txsize_lookup[bsize];
+#endif
+
+  // Set up destination pointers.
+  vp10_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
+
+  // Set up limit values for MV components.
+  // Mv beyond the range do not produce new/different prediction block.
+  x->mv_row_min = -(((mi_row + mi_height) * MI_SIZE) + VPX_INTERP_EXTEND);
+  x->mv_col_min = -(((mi_col + mi_width) * MI_SIZE) + VPX_INTERP_EXTEND);
+  x->mv_row_max = (cm->mi_rows - mi_row) * MI_SIZE + VPX_INTERP_EXTEND;
+  x->mv_col_max = (cm->mi_cols - mi_col) * MI_SIZE + VPX_INTERP_EXTEND;
+
+  // Set up distance of MB to edge of frame in 1/8th pel units.
+  assert(!(mi_col & (mi_width - 1)) && !(mi_row & (mi_height - 1)));
+  set_mi_row_col(xd, tile, mi_row, mi_height, mi_col, mi_width, cm->mi_rows,
+                 cm->mi_cols);
+
+  // Set up source buffers.
+  vp10_setup_src_planes(x, cpi->Source, mi_row, mi_col);
+
+  // R/D setup.
+  x->rddiv = cpi->rd.RDDIV;
+  x->rdmult = cpi->rd.RDMULT;
+
+  // required by vp10_append_sub8x8_mvs_for_idx() and vp10_find_best_ref_mvs()
+  xd->tile = *tile;
+}
+
+static void set_offsets(VP10_COMP *cpi, const TileInfo *const tile,
+                        MACROBLOCK *const x, int mi_row, int mi_col,
+                        BLOCK_SIZE bsize) {
+  VP10_COMMON *const cm = &cpi->common;
+  MACROBLOCKD *const xd = &x->e_mbd;
+  MB_MODE_INFO *mbmi;
+  const struct segmentation *const seg = &cm->seg;
+
+  set_offsets_without_segment_id(cpi, tile, x, mi_row, mi_col, bsize);
+
+  mbmi = &xd->mi[0]->mbmi;
+
+  // Setup segment ID.
+  if (seg->enabled) {
+    if (!cpi->vaq_refresh) {
+      const uint8_t *const map =
+          seg->update_map ? cpi->segmentation_map : cm->last_frame_seg_map;
+      mbmi->segment_id = get_segment_id(cm, map, bsize, mi_row, mi_col);
+    }
+    vp10_init_plane_quantizers(cpi, x, mbmi->segment_id);
+
+    x->encode_breakout = cpi->segment_encode_breakout[mbmi->segment_id];
+  } else {
+    mbmi->segment_id = 0;
+    x->encode_breakout = cpi->encode_breakout;
+  }
+
+#if CONFIG_SUPERTX
+  mbmi->segment_id_supertx = MAX_SEGMENTS;
+#endif  // CONFIG_SUPERTX
+}
+
+#if CONFIG_SUPERTX
+static void set_offsets_supertx(VP10_COMP *cpi, ThreadData *td,
+                                const TileInfo *const tile, int mi_row,
+                                int mi_col, BLOCK_SIZE bsize) {
+  MACROBLOCK *const x = &td->mb;
+  VP10_COMMON *const cm = &cpi->common;
+  MACROBLOCKD *const xd = &x->e_mbd;
+  const int mi_width = num_8x8_blocks_wide_lookup[bsize];
+  const int mi_height = num_8x8_blocks_high_lookup[bsize];
+
+  set_mode_info_offsets(cpi, x, xd, mi_row, mi_col);
+
+  // Set up distance of MB to edge of frame in 1/8th pel units.
+  assert(!(mi_col & (mi_width - 1)) && !(mi_row & (mi_height - 1)));
+  set_mi_row_col(xd, tile, mi_row, mi_height, mi_col, mi_width, cm->mi_rows,
+                 cm->mi_cols);
+}
+
+static void set_offsets_extend(VP10_COMP *cpi, ThreadData *td,
+                               const TileInfo *const tile, int mi_row_pred,
+                               int mi_col_pred, int mi_row_ori, int mi_col_ori,
+                               BLOCK_SIZE bsize_pred) {
+  // Used in supertx
+  // (mi_row_ori, mi_col_ori, bsize_ori): region for mv
+  // (mi_row_pred, mi_col_pred, bsize_pred): region to predict
+  MACROBLOCK *const x = &td->mb;
+  VP10_COMMON *const cm = &cpi->common;
+  MACROBLOCKD *const xd = &x->e_mbd;
+  const int mi_width = num_8x8_blocks_wide_lookup[bsize_pred];
+  const int mi_height = num_8x8_blocks_high_lookup[bsize_pred];
+
+  set_mode_info_offsets(cpi, x, xd, mi_row_ori, mi_col_ori);
+
+  // Set up limit values for MV components.
+  // Mv beyond the range do not produce new/different prediction block.
+  x->mv_row_min = -(((mi_row_pred + mi_height) * MI_SIZE) + VPX_INTERP_EXTEND);
+  x->mv_col_min = -(((mi_col_pred + mi_width) * MI_SIZE) + VPX_INTERP_EXTEND);
+  x->mv_row_max = (cm->mi_rows - mi_row_pred) * MI_SIZE + VPX_INTERP_EXTEND;
+  x->mv_col_max = (cm->mi_cols - mi_col_pred) * MI_SIZE + VPX_INTERP_EXTEND;
+
+  // Set up distance of MB to edge of frame in 1/8th pel units.
+  assert(!(mi_col_pred & (mi_width - 1)) && !(mi_row_pred & (mi_height - 1)));
+  set_mi_row_col(xd, tile, mi_row_pred, mi_height, mi_col_pred, mi_width,
+                 cm->mi_rows, cm->mi_cols);
+  xd->up_available = (mi_row_ori > tile->mi_row_start);
+  xd->left_available = (mi_col_ori > tile->mi_col_start);
+
+  // R/D setup.
+  x->rddiv = cpi->rd.RDDIV;
+  x->rdmult = cpi->rd.RDMULT;
+}
+
+static void set_segment_id_supertx(const VP10_COMP *const cpi,
+                                   MACROBLOCK *const x, const int mi_row,
+                                   const int mi_col, const BLOCK_SIZE bsize) {
+  const VP10_COMMON *cm = &cpi->common;
+  const struct segmentation *seg = &cm->seg;
+  const int miw =
+      VPXMIN(num_8x8_blocks_wide_lookup[bsize], cm->mi_cols - mi_col);
+  const int mih =
+      VPXMIN(num_8x8_blocks_high_lookup[bsize], cm->mi_rows - mi_row);
+  const int mi_offset = mi_row * cm->mi_stride + mi_col;
+  MODE_INFO **const mip = cm->mi_grid_visible + mi_offset;
+  int r, c;
+  int seg_id_supertx = MAX_SEGMENTS;
+
+  if (!seg->enabled) {
+    seg_id_supertx = 0;
+    x->encode_breakout = cpi->encode_breakout;
+  } else {
+    // Find the minimum segment_id
+    for (r = 0; r < mih; r++)
+      for (c = 0; c < miw; c++)
+        seg_id_supertx =
+            VPXMIN(mip[r * cm->mi_stride + c]->mbmi.segment_id, seg_id_supertx);
+    assert(0 <= seg_id_supertx && seg_id_supertx < MAX_SEGMENTS);
+
+    // Initialize plane quantisers
+    vp10_init_plane_quantizers(cpi, x, seg_id_supertx);
+    x->encode_breakout = cpi->segment_encode_breakout[seg_id_supertx];
+  }
+
+  // Assign the the segment_id back to segment_id_supertx
+  for (r = 0; r < mih; r++)
+    for (c = 0; c < miw; c++)
+      mip[r * cm->mi_stride + c]->mbmi.segment_id_supertx = seg_id_supertx;
+}
+#endif  // CONFIG_SUPERTX
+
+static void set_block_size(VP10_COMP *const cpi, MACROBLOCK *const x,
+                           MACROBLOCKD *const xd, int mi_row, int mi_col,
+                           BLOCK_SIZE bsize) {
+  if (cpi->common.mi_cols > mi_col && cpi->common.mi_rows > mi_row) {
+    set_mode_info_offsets(cpi, x, xd, mi_row, mi_col);
+    xd->mi[0]->mbmi.sb_type = bsize;
+  }
+}
+
+static void set_vt_partitioning(VP10_COMP *cpi, MACROBLOCK *const x,
+                                MACROBLOCKD *const xd, VAR_TREE *vt, int mi_row,
+                                int mi_col, const int64_t *const threshold,
+                                const BLOCK_SIZE *const bsize_min) {
+  VP10_COMMON *const cm = &cpi->common;
+  const int hbw = num_8x8_blocks_wide_lookup[vt->bsize] / 2;
+  const int hbh = num_8x8_blocks_high_lookup[vt->bsize] / 2;
+  const int has_cols = mi_col + hbw < cm->mi_cols;
+  const int has_rows = mi_row + hbh < cm->mi_rows;
+
+  if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
+
+  assert(vt->bsize >= BLOCK_8X8);
+
+  assert(hbh == hbw);
+
+  if (vt->bsize == BLOCK_8X8 && cm->frame_type != KEY_FRAME) {
+    set_block_size(cpi, x, xd, mi_row, mi_col, BLOCK_8X8);
+    return;
+  }
+
+  if (vt->force_split || (!has_cols && !has_rows)) goto split;
+
+  // For bsize=bsize_min (16x16/8x8 for 8x8/4x4 downsampling), select if
+  // variance is below threshold, otherwise split will be selected.
+  // No check for vert/horiz split as too few samples for variance.
+  if (vt->bsize == bsize_min[0]) {
+    if (has_cols && has_rows && vt->variances.none.variance < threshold[0]) {
+      set_block_size(cpi, x, xd, mi_row, mi_col, vt->bsize);
+      return;
+    } else {
+      BLOCK_SIZE subsize = get_subsize(vt->bsize, PARTITION_SPLIT);
+      set_block_size(cpi, x, xd, mi_row, mi_col, subsize);
+      if (vt->bsize > BLOCK_8X8) {
+        set_block_size(cpi, x, xd, mi_row, mi_col + hbw, subsize);
+        set_block_size(cpi, x, xd, mi_row + hbh, mi_col, subsize);
+        set_block_size(cpi, x, xd, mi_row + hbh, mi_col + hbw, subsize);
+      }
+      return;
+    }
+  } else if (vt->bsize > bsize_min[0]) {
+    // For key frame: take split for bsize above 32X32 or very high variance.
+    if (cm->frame_type == KEY_FRAME &&
+        (vt->bsize > BLOCK_32X32 ||
+         vt->variances.none.variance > (threshold[0] << 4))) {
+      goto split;
+    }
+    // If variance is low, take the bsize (no split).
+    if (has_cols && has_rows && vt->variances.none.variance < threshold[0]) {
+      set_block_size(cpi, x, xd, mi_row, mi_col, vt->bsize);
+      return;
+    }
+
+    // Check vertical split.
+    if (has_rows) {
+      BLOCK_SIZE subsize = get_subsize(vt->bsize, PARTITION_VERT);
+      if (vt->variances.vert[0].variance < threshold[0] &&
+          vt->variances.vert[1].variance < threshold[0] &&
+          get_plane_block_size(subsize, &xd->plane[1]) < BLOCK_INVALID) {
+        set_block_size(cpi, x, xd, mi_row, mi_col, subsize);
+        set_block_size(cpi, x, xd, mi_row, mi_col + hbw, subsize);
+        return;
+      }
+    }
+    // Check horizontal split.
+    if (has_cols) {
+      BLOCK_SIZE subsize = get_subsize(vt->bsize, PARTITION_HORZ);
+      if (vt->variances.horz[0].variance < threshold[0] &&
+          vt->variances.horz[1].variance < threshold[0] &&
+          get_plane_block_size(subsize, &xd->plane[1]) < BLOCK_INVALID) {
+        set_block_size(cpi, x, xd, mi_row, mi_col, subsize);
+        set_block_size(cpi, x, xd, mi_row + hbh, mi_col, subsize);
+        return;
+      }
+    }
+  }
+
+split : {
+  set_vt_partitioning(cpi, x, xd, vt->split[0], mi_row, mi_col, threshold + 1,
+                      bsize_min + 1);
+  set_vt_partitioning(cpi, x, xd, vt->split[1], mi_row, mi_col + hbw,
+                      threshold + 1, bsize_min + 1);
+  set_vt_partitioning(cpi, x, xd, vt->split[2], mi_row + hbh, mi_col,
+                      threshold + 1, bsize_min + 1);
+  set_vt_partitioning(cpi, x, xd, vt->split[3], mi_row + hbh, mi_col + hbw,
+                      threshold + 1, bsize_min + 1);
+  return;
+}
+}
+
+// Set the variance split thresholds for following the block sizes:
+// 0 - threshold_64x64, 1 - threshold_32x32, 2 - threshold_16x16,
+// 3 - vbp_threshold_8x8. vbp_threshold_8x8 (to split to 4x4 partition) is
+// currently only used on key frame.
+static void set_vbp_thresholds(VP10_COMP *cpi, int64_t thresholds[], int q) {
+  VP10_COMMON *const cm = &cpi->common;
+  const int is_key_frame = (cm->frame_type == KEY_FRAME);
+  const int threshold_multiplier = is_key_frame ? 20 : 1;
+  const int64_t threshold_base =
+      (int64_t)(threshold_multiplier * cpi->y_dequant[q][1]);
+  if (is_key_frame) {
+    thresholds[1] = threshold_base;
+    thresholds[2] = threshold_base >> 2;
+    thresholds[3] = threshold_base >> 2;
+    thresholds[4] = threshold_base << 2;
+  } else {
+    thresholds[2] = threshold_base;
+    if (cm->width <= 352 && cm->height <= 288) {
+      thresholds[1] = threshold_base >> 2;
+      thresholds[3] = threshold_base << 3;
+    } else {
+      thresholds[1] = threshold_base;
+      thresholds[2] = (5 * threshold_base) >> 2;
+      if (cm->width >= 1920 && cm->height >= 1080)
+        thresholds[2] = (7 * threshold_base) >> 2;
+      thresholds[3] = threshold_base << cpi->oxcf.speed;
+    }
+  }
+  thresholds[0] = INT64_MIN;
+}
+
+void vp10_set_variance_partition_thresholds(VP10_COMP *cpi, int q) {
+  VP10_COMMON *const cm = &cpi->common;
+  SPEED_FEATURES *const sf = &cpi->sf;
+  const int is_key_frame = (cm->frame_type == KEY_FRAME);
+  if (sf->partition_search_type != VAR_BASED_PARTITION &&
+      sf->partition_search_type != REFERENCE_PARTITION) {
+    return;
+  } else {
+    set_vbp_thresholds(cpi, cpi->vbp_thresholds, q);
+    // The thresholds below are not changed locally.
+    if (is_key_frame) {
+      cpi->vbp_threshold_sad = 0;
+      cpi->vbp_bsize_min = BLOCK_8X8;
+    } else {
+      if (cm->width <= 352 && cm->height <= 288)
+        cpi->vbp_threshold_sad = 100;
+      else
+        cpi->vbp_threshold_sad = (cpi->y_dequant[q][1] << 1) > 1000
+                                     ? (cpi->y_dequant[q][1] << 1)
+                                     : 1000;
+      cpi->vbp_bsize_min = BLOCK_16X16;
+    }
+    cpi->vbp_threshold_minmax = 15 + (q >> 3);
+  }
+}
+
+// Compute the minmax over the 8x8 subblocks.
+static int compute_minmax_8x8(const uint8_t *src, int src_stride,
+                              const uint8_t *ref, int ref_stride,
+#if CONFIG_VP9_HIGHBITDEPTH
+                              int highbd,
+#endif
+                              int pixels_wide, int pixels_high) {
+  int k;
+  int minmax_max = 0;
+  int minmax_min = 255;
+  // Loop over the 4 8x8 subblocks.
+  for (k = 0; k < 4; k++) {
+    const int x8_idx = ((k & 1) << 3);
+    const int y8_idx = ((k >> 1) << 3);
+    int min = 0;
+    int max = 0;
+    if (x8_idx < pixels_wide && y8_idx < pixels_high) {
+      const int src_offset = y8_idx * src_stride + x8_idx;
+      const int ref_offset = y8_idx * ref_stride + x8_idx;
+#if CONFIG_VP9_HIGHBITDEPTH
+      if (highbd) {
+        vpx_highbd_minmax_8x8(src + src_offset, src_stride, ref + ref_offset,
+                              ref_stride, &min, &max);
+      } else {
+        vpx_minmax_8x8(src + src_offset, src_stride, ref + ref_offset,
+                       ref_stride, &min, &max);
+      }
+#else
+      vpx_minmax_8x8(src + src_offset, src_stride, ref + ref_offset, ref_stride,
+                     &min, &max);
+#endif
+      if ((max - min) > minmax_max) minmax_max = (max - min);
+      if ((max - min) < minmax_min) minmax_min = (max - min);
+    }
+  }
+  return (minmax_max - minmax_min);
+}
+
+#if CONFIG_VP9_HIGHBITDEPTH
+static INLINE int avg_4x4(const uint8_t *const src, const int stride,
+                          const int highbd) {
+  if (highbd) {
+    return vpx_highbd_avg_4x4(src, stride);
+  } else {
+    return vpx_avg_4x4(src, stride);
+  }
+}
+#else
+static INLINE int avg_4x4(const uint8_t *const src, const int stride) {
+  return vpx_avg_4x4(src, stride);
+}
+#endif
+
+#if CONFIG_VP9_HIGHBITDEPTH
+static INLINE int avg_8x8(const uint8_t *const src, const int stride,
+                          const int highbd) {
+  if (highbd) {
+    return vpx_highbd_avg_8x8(src, stride);
+  } else {
+    return vpx_avg_8x8(src, stride);
+  }
+}
+#else
+static INLINE int avg_8x8(const uint8_t *const src, const int stride) {
+  return vpx_avg_8x8(src, stride);
+}
+#endif
+
+static void init_variance_tree(VAR_TREE *const vt,
+#if CONFIG_VP9_HIGHBITDEPTH
+                               const int highbd,
+#endif
+                               BLOCK_SIZE bsize, BLOCK_SIZE leaf_size,
+                               const int width, const int height,
+                               const uint8_t *const src, const int src_stride,
+                               const uint8_t *const ref, const int ref_stride) {
+  assert(bsize >= leaf_size);
+
+  vt->bsize = bsize;
+
+  vt->force_split = 0;
+
+  vt->src = src;
+  vt->src_stride = src_stride;
+  vt->ref = ref;
+  vt->ref_stride = ref_stride;
+
+  vt->width = width;
+  vt->height = height;
+
+#if CONFIG_VP9_HIGHBITDEPTH
+  vt->highbd = highbd;
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+  if (bsize > leaf_size) {
+    const BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_SPLIT);
+    const int px = num_4x4_blocks_wide_lookup[subsize] * 4;
+
+    init_variance_tree(vt->split[0],
+#if CONFIG_VP9_HIGHBITDEPTH
+                       highbd,
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+                       subsize, leaf_size, VPXMIN(px, width),
+                       VPXMIN(px, height), src, src_stride, ref, ref_stride);
+    init_variance_tree(vt->split[1],
+#if CONFIG_VP9_HIGHBITDEPTH
+                       highbd,
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+                       subsize, leaf_size, width - px, VPXMIN(px, height),
+                       src + px, src_stride, ref + px, ref_stride);
+    init_variance_tree(vt->split[2],
+#if CONFIG_VP9_HIGHBITDEPTH
+                       highbd,
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+                       subsize, leaf_size, VPXMIN(px, width), height - px,
+                       src + px * src_stride, src_stride, ref + px * ref_stride,
+                       ref_stride);
+    init_variance_tree(vt->split[3],
+#if CONFIG_VP9_HIGHBITDEPTH
+                       highbd,
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+                       subsize, leaf_size, width - px, height - px,
+                       src + px * src_stride + px, src_stride,
+                       ref + px * ref_stride + px, ref_stride);
+  }
+}
+
+// Fill the variance tree based on averaging pixel values (sub-sampling), at
+// the leaf node size.
+static void fill_variance_tree(VAR_TREE *const vt, const BLOCK_SIZE leaf_size) {
+  if (vt->bsize > leaf_size) {
+    fill_variance_tree(vt->split[0], leaf_size);
+    fill_variance_tree(vt->split[1], leaf_size);
+    fill_variance_tree(vt->split[2], leaf_size);
+    fill_variance_tree(vt->split[3], leaf_size);
+    fill_variance_node(vt);
+  } else if (vt->width <= 0 || vt->height <= 0) {
+    fill_variance(0, 0, 0, &vt->variances.none);
+  } else {
+    unsigned int sse = 0;
+    int sum = 0;
+    int src_avg;
+    int ref_avg;
+    assert(leaf_size == BLOCK_4X4 || leaf_size == BLOCK_8X8);
+    if (leaf_size == BLOCK_4X4) {
+      src_avg = avg_4x4(vt->src, vt->src_stride IF_HBD(, vt->highbd));
+      ref_avg = avg_4x4(vt->ref, vt->ref_stride IF_HBD(, vt->highbd));
+    } else {
+      src_avg = avg_8x8(vt->src, vt->src_stride IF_HBD(, vt->highbd));
+      ref_avg = avg_8x8(vt->ref, vt->ref_stride IF_HBD(, vt->highbd));
+    }
+    sum = src_avg - ref_avg;
+    sse = sum * sum;
+    fill_variance(sse, sum, 0, &vt->variances.none);
+  }
+}
+
+static void refine_variance_tree(VAR_TREE *const vt, const int64_t threshold) {
+  if (vt->bsize >= BLOCK_8X8) {
+    if (vt->bsize == BLOCK_16X16) {
+      if (vt->variances.none.variance <= threshold)
+        return;
+      else
+        vt->force_split = 0;
+    }
+
+    refine_variance_tree(vt->split[0], threshold);
+    refine_variance_tree(vt->split[1], threshold);
+    refine_variance_tree(vt->split[2], threshold);
+    refine_variance_tree(vt->split[3], threshold);
+
+    if (vt->bsize <= BLOCK_16X16) fill_variance_node(vt);
+  } else if (vt->width <= 0 || vt->height <= 0) {
+    fill_variance(0, 0, 0, &vt->variances.none);
+  } else {
+    const int src_avg = avg_4x4(vt->src, vt->src_stride IF_HBD(, vt->highbd));
+    const int ref_avg = avg_4x4(vt->ref, vt->ref_stride IF_HBD(, vt->highbd));
+    const int sum = src_avg - ref_avg;
+    const unsigned int sse = sum * sum;
+    assert(vt->bsize == BLOCK_4X4);
+    fill_variance(sse, sum, 0, &vt->variances.none);
+  }
+}
+
+static int check_split_key_frame(VAR_TREE *const vt, const int64_t threshold) {
+  if (vt->bsize == BLOCK_32X32) {
+    vt->force_split = vt->variances.none.variance > threshold;
+  } else {
+    vt->force_split |= check_split_key_frame(vt->split[0], threshold);
+    vt->force_split |= check_split_key_frame(vt->split[1], threshold);
+    vt->force_split |= check_split_key_frame(vt->split[2], threshold);
+    vt->force_split |= check_split_key_frame(vt->split[3], threshold);
+  }
+  return vt->force_split;
+}
+
+static int check_split(VP10_COMP *const cpi, VAR_TREE *const vt,
+                       const int segment_id, const int64_t *const thresholds) {
+  if (vt->bsize == BLOCK_16X16) {
+    vt->force_split = vt->variances.none.variance > thresholds[0];
+    if (!vt->force_split && vt->variances.none.variance > thresholds[-1] &&
+        !cyclic_refresh_segment_id_boosted(segment_id)) {
+      // We have some nominal amount of 16x16 variance (based on average),
+      // compute the minmax over the 8x8 sub-blocks, and if above threshold,
+      // force split to 8x8 block for this 16x16 block.
+      int minmax =
+          compute_minmax_8x8(vt->src, vt->src_stride, vt->ref, vt->ref_stride,
+#if CONFIG_VP9_HIGHBITDEPTH
+                             vt->highbd,
+#endif
+                             vt->width, vt->height);
+      vt->force_split = minmax > cpi->vbp_threshold_minmax;
+    }
+  } else {
+    vt->force_split |=
+        check_split(cpi, vt->split[0], segment_id, thresholds + 1);
+    vt->force_split |=
+        check_split(cpi, vt->split[1], segment_id, thresholds + 1);
+    vt->force_split |=
+        check_split(cpi, vt->split[2], segment_id, thresholds + 1);
+    vt->force_split |=
+        check_split(cpi, vt->split[3], segment_id, thresholds + 1);
+
+    if (vt->bsize == BLOCK_32X32 && !vt->force_split) {
+      vt->force_split = vt->variances.none.variance > thresholds[0];
+    }
+  }
+
+  return vt->force_split;
+}
+
+// This function chooses partitioning based on the variance between source and
+// reconstructed last (or golden), where variance is computed for down-sampled
+// inputs.
+static void choose_partitioning(VP10_COMP *const cpi, ThreadData *const td,
+                                const TileInfo *const tile, MACROBLOCK *const x,
+                                const int mi_row, const int mi_col) {
+  VP10_COMMON *const cm = &cpi->common;
+  MACROBLOCKD *const xd = &x->e_mbd;
+  VAR_TREE *const vt = td->var_root[cm->mib_size_log2 - MIN_MIB_SIZE_LOG2];
+  int i;
+  const uint8_t *src;
+  const uint8_t *ref;
+  int src_stride;
+  int ref_stride;
+  int pixels_wide = 8 * num_8x8_blocks_wide_lookup[cm->sb_size];
+  int pixels_high = 8 * num_8x8_blocks_high_lookup[cm->sb_size];
+  int64_t thresholds[5] = {
+    cpi->vbp_thresholds[0], cpi->vbp_thresholds[1], cpi->vbp_thresholds[2],
+    cpi->vbp_thresholds[3], cpi->vbp_thresholds[4],
+  };
+  BLOCK_SIZE bsize_min[5] = { BLOCK_16X16, BLOCK_16X16, BLOCK_16X16,
+                              cpi->vbp_bsize_min, BLOCK_8X8 };
+  const int start_level = cm->sb_size == BLOCK_64X64 ? 1 : 0;
+  const int64_t *const thre = thresholds + start_level;
+  const BLOCK_SIZE *const bmin = bsize_min + start_level;
+
+  const int is_key_frame = (cm->frame_type == KEY_FRAME);
+  const int low_res = (cm->width <= 352 && cm->height <= 288);
+
+  int segment_id = CR_SEGMENT_ID_BASE;
+
+  if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->seg.enabled) {
+    const uint8_t *const map =
+        cm->seg.update_map ? cpi->segmentation_map : cm->last_frame_seg_map;
+    segment_id = get_segment_id(cm, map, cm->sb_size, mi_row, mi_col);
+
+    if (cyclic_refresh_segment_id_boosted(segment_id)) {
+      int q = vp10_get_qindex(&cm->seg, segment_id, cm->base_qindex);
+      set_vbp_thresholds(cpi, thresholds, q);
+    }
+  }
+
+  set_offsets(cpi, tile, x, mi_row, mi_col, cm->sb_size);
+
+  if (xd->mb_to_right_edge < 0) pixels_wide += (xd->mb_to_right_edge >> 3);
+  if (xd->mb_to_bottom_edge < 0) pixels_high += (xd->mb_to_bottom_edge >> 3);
+
+  src = x->plane[0].src.buf;
+  src_stride = x->plane[0].src.stride;
+
+  if (!is_key_frame) {
+    MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
+    unsigned int uv_sad;
+    const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, LAST_FRAME);
+    const YV12_BUFFER_CONFIG *yv12_g = get_ref_frame_buffer(cpi, GOLDEN_FRAME);
+    unsigned int y_sad, y_sad_g;
+
+    const int hbs = cm->mib_size / 2;
+    const int split_vert = mi_col + hbs >= cm->mi_cols;
+    const int split_horz = mi_row + hbs >= cm->mi_rows;
+    BLOCK_SIZE bsize;
+
+    if (split_vert && split_horz)
+      bsize = get_subsize(cm->sb_size, PARTITION_SPLIT);
+    else if (split_vert)
+      bsize = get_subsize(cm->sb_size, PARTITION_VERT);
+    else if (split_horz)
+      bsize = get_subsize(cm->sb_size, PARTITION_HORZ);
+    else
+      bsize = cm->sb_size;
+
+    assert(yv12 != NULL);
+
+    if (yv12_g && yv12_g != yv12) {
+      vp10_setup_pre_planes(xd, 0, yv12_g, mi_row, mi_col,
+                            &cm->frame_refs[GOLDEN_FRAME - 1].sf);
+      y_sad_g = cpi->fn_ptr[bsize].sdf(
+          x->plane[0].src.buf, x->plane[0].src.stride, xd->plane[0].pre[0].buf,
+          xd->plane[0].pre[0].stride);
+    } else {
+      y_sad_g = UINT_MAX;
+    }
+
+    vp10_setup_pre_planes(xd, 0, yv12, mi_row, mi_col,
+                          &cm->frame_refs[LAST_FRAME - 1].sf);
+    mbmi->ref_frame[0] = LAST_FRAME;
+    mbmi->ref_frame[1] = NONE;
+    mbmi->sb_type = cm->sb_size;
+    mbmi->mv[0].as_int = 0;
+#if CONFIG_DUAL_FILTER
+    for (i = 0; i < 4; ++i) mbmi->interp_filter[i] = BILINEAR;
+#else
+    mbmi->interp_filter = BILINEAR;
+#endif
+
+    y_sad = vp10_int_pro_motion_estimation(cpi, x, bsize, mi_row, mi_col);
+
+    if (y_sad_g < y_sad) {
+      vp10_setup_pre_planes(xd, 0, yv12_g, mi_row, mi_col,
+                            &cm->frame_refs[GOLDEN_FRAME - 1].sf);
+      mbmi->ref_frame[0] = GOLDEN_FRAME;
+      mbmi->mv[0].as_int = 0;
+      y_sad = y_sad_g;
+    } else {
+      x->pred_mv[LAST_FRAME] = mbmi->mv[0].as_mv;
+    }
+
+    vp10_build_inter_predictors_sb(xd, mi_row, mi_col, cm->sb_size);
+
+    for (i = 1; i < MAX_MB_PLANE; ++i) {
+      struct macroblock_plane *p = &x->plane[i];
+      struct macroblockd_plane *pd = &xd->plane[i];
+      const BLOCK_SIZE bs = get_plane_block_size(bsize, pd);
+
+      if (bs == BLOCK_INVALID)
+        uv_sad = UINT_MAX;
+      else
+        uv_sad = cpi->fn_ptr[bs].sdf(p->src.buf, p->src.stride, pd->dst.buf,
+                                     pd->dst.stride);
+
+      x->color_sensitivity[i - 1] = uv_sad > (y_sad >> 2);
+    }
+
+    ref = xd->plane[0].dst.buf;
+    ref_stride = xd->plane[0].dst.stride;
+
+    // If the y_sad is very small, take the largest partition and exit.
+    // Don't check on boosted segment for now, as largest is suppressed there.
+    if (segment_id == CR_SEGMENT_ID_BASE && y_sad < cpi->vbp_threshold_sad) {
+      if (!split_vert && !split_horz) {
+        set_block_size(cpi, x, xd, mi_row, mi_col, cm->sb_size);
+        return;
+      }
+    }
+  } else {
+    ref = VP10_VAR_OFFS;
+    ref_stride = 0;
+#if CONFIG_VP9_HIGHBITDEPTH
+    if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+      switch (xd->bd) {
+        case 10: ref = CONVERT_TO_BYTEPTR(VP10_HIGH_VAR_OFFS_10); break;
+        case 12: ref = CONVERT_TO_BYTEPTR(VP10_HIGH_VAR_OFFS_12); break;
+        case 8:
+        default: ref = CONVERT_TO_BYTEPTR(VP10_HIGH_VAR_OFFS_8); break;
+      }
+    }
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+  }
+
+  init_variance_tree(
+      vt,
+#if CONFIG_VP9_HIGHBITDEPTH
+      xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH,
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+      cm->sb_size, (is_key_frame || low_res) ? BLOCK_4X4 : BLOCK_8X8,
+      pixels_wide, pixels_high, src, src_stride, ref, ref_stride);
+
+  // Fill in the entire tree of variances and compute splits.
+  if (is_key_frame) {
+    fill_variance_tree(vt, BLOCK_4X4);
+    check_split_key_frame(vt, thre[1]);
+  } else {
+    fill_variance_tree(vt, BLOCK_8X8);
+    check_split(cpi, vt, segment_id, thre);
+    if (low_res) {
+      refine_variance_tree(vt, thre[1] << 1);
+    }
+  }
+
+  vt->force_split |= mi_col + cm->mib_size > cm->mi_cols ||
+                     mi_row + cm->mib_size > cm->mi_rows;
+
+  // Now go through the entire structure, splitting every block size until
+  // we get to one that's got a variance lower than our threshold.
+  set_vt_partitioning(cpi, x, xd, vt, mi_row, mi_col, thre, bmin);
+}
+
+#if CONFIG_DUAL_FILTER
+static void reset_intmv_filter_type(VP10_COMMON *cm, MACROBLOCKD *xd,
+                                    MB_MODE_INFO *mbmi) {
+  int dir;
+  for (dir = 0; dir < 2; ++dir) {
+    if (!has_subpel_mv_component(xd->mi[0], xd, dir) &&
+        (mbmi->ref_frame[1] == NONE ||
+         !has_subpel_mv_component(xd->mi[0], xd, dir + 2)))
+      mbmi->interp_filter[dir] = (cm->interp_filter == SWITCHABLE)
+                                     ? EIGHTTAP_REGULAR
+                                     : cm->interp_filter;
+    mbmi->interp_filter[dir + 2] = mbmi->interp_filter[dir];
+  }
+}
+
+static void update_filter_type_count(FRAME_COUNTS *counts,
+                                     const MACROBLOCKD *xd,
+                                     const MB_MODE_INFO *mbmi) {
+  int dir;
+  for (dir = 0; dir < 2; ++dir) {
+    if (has_subpel_mv_component(xd->mi[0], xd, dir) ||
+        (mbmi->ref_frame[1] > INTRA_FRAME &&
+         has_subpel_mv_component(xd->mi[0], xd, dir + 2))) {
+      const int ctx = vp10_get_pred_context_switchable_interp(xd, dir);
+      ++counts->switchable_interp[ctx][mbmi->interp_filter[dir]];
+    }
+  }
+}
+#endif
+#if CONFIG_GLOBAL_MOTION
+static void update_global_motion_used(PREDICTION_MODE mode,
+                                      const MB_MODE_INFO *mbmi,
+                                      VP10_COMP *cpi) {
+  if (mode == ZEROMV) {
+    ++cpi->global_motion_used[mbmi->ref_frame[0]];
+    if (has_second_ref(mbmi)) ++cpi->global_motion_used[mbmi->ref_frame[1]];
+  }
+}
+#endif  // CONFIG_GLOBAL_MOTION
+
+static void update_state(VP10_COMP *cpi, ThreadData *td, PICK_MODE_CONTEXT *ctx,
+                         int mi_row, int mi_col, BLOCK_SIZE bsize,
+                         int output_enabled) {
+  int i, x_idx, y;
+  VP10_COMMON *const cm = &cpi->common;
+  RD_COUNTS *const rdc = &td->rd_counts;
+  MACROBLOCK *const x = &td->mb;
+  MACROBLOCKD *const xd = &x->e_mbd;
+  struct macroblock_plane *const p = x->plane;
+  struct macroblockd_plane *const pd = xd->plane;
+  MODE_INFO *mi = &ctx->mic;
+  MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
+  MODE_INFO *mi_addr = xd->mi[0];
+  const struct segmentation *const seg = &cm->seg;
+  const int bw = num_8x8_blocks_wide_lookup[mi->mbmi.sb_type];
+  const int bh = num_8x8_blocks_high_lookup[mi->mbmi.sb_type];
+  const int x_mis = VPXMIN(bw, cm->mi_cols - mi_col);
+  const int y_mis = VPXMIN(bh, cm->mi_rows - mi_row);
+  MV_REF *const frame_mvs = cm->cur_frame->mvs + mi_row * cm->mi_cols + mi_col;
+  int w, h;
+
+  const int mis = cm->mi_stride;
+  const int mi_width = num_8x8_blocks_wide_lookup[bsize];
+  const int mi_height = num_8x8_blocks_high_lookup[bsize];
+  int max_plane;
+
+#if CONFIG_REF_MV
+  int8_t rf_type;
+#endif
+
+#if !CONFIG_SUPERTX
+  assert(mi->mbmi.sb_type == bsize);
+#endif
+
+  *mi_addr = *mi;
+  *x->mbmi_ext = ctx->mbmi_ext;
+
+#if CONFIG_DUAL_FILTER
+  reset_intmv_filter_type(cm, xd, mbmi);
+#endif
+
+#if CONFIG_REF_MV
+  rf_type = vp10_ref_frame_type(mbmi->ref_frame);
+  if (x->mbmi_ext->ref_mv_count[rf_type] > 1 && mbmi->sb_type >= BLOCK_8X8 &&
+      mbmi->mode == NEWMV) {
+    for (i = 0; i < 1 + has_second_ref(mbmi); ++i) {
+      int_mv this_mv =
+          (i == 0)
+              ? x->mbmi_ext->ref_mv_stack[rf_type][mbmi->ref_mv_idx].this_mv
+              : x->mbmi_ext->ref_mv_stack[rf_type][mbmi->ref_mv_idx].comp_mv;
+      clamp_mv_ref(&this_mv.as_mv, xd->n8_w << 3, xd->n8_h << 3, xd);
+      x->mbmi_ext->ref_mvs[mbmi->ref_frame[i]][0] = this_mv;
+      mbmi->pred_mv[i] = this_mv;
+    }
+  }
+#endif
+
+  // If segmentation in use
+  if (seg->enabled) {
+    // For in frame complexity AQ copy the segment id from the segment map.
+    if (cpi->oxcf.aq_mode == COMPLEXITY_AQ) {
+      const uint8_t *const map =
+          seg->update_map ? cpi->segmentation_map : cm->last_frame_seg_map;
+      mi_addr->mbmi.segment_id = get_segment_id(cm, map, bsize, mi_row, mi_col);
+    }
+    // Else for cyclic refresh mode update the segment map, set the segment id
+    // and then update the quantizer.
+    if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) {
+      vp10_cyclic_refresh_update_segment(cpi, &xd->mi[0]->mbmi, mi_row, mi_col,
+                                         bsize, ctx->rate, ctx->dist, x->skip);
+    }
+  }
+
+  max_plane = is_inter_block(mbmi) ? MAX_MB_PLANE : 1;
+  for (i = 0; i < max_plane; ++i) {
+    p[i].coeff = ctx->coeff[i][1];
+    p[i].qcoeff = ctx->qcoeff[i][1];
+    pd[i].dqcoeff = ctx->dqcoeff[i][1];
+    p[i].eobs = ctx->eobs[i][1];
+  }
+
+  for (i = max_plane; i < MAX_MB_PLANE; ++i) {
+    p[i].coeff = ctx->coeff[i][2];
+    p[i].qcoeff = ctx->qcoeff[i][2];
+    pd[i].dqcoeff = ctx->dqcoeff[i][2];
+    p[i].eobs = ctx->eobs[i][2];
+  }
+
+  for (i = 0; i < 2; ++i) pd[i].color_index_map = ctx->color_index_map[i];
+
+  // Restore the coding context of the MB to that that was in place
+  // when the mode was picked for it
+  for (y = 0; y < mi_height; y++)
+    for (x_idx = 0; x_idx < mi_width; x_idx++)
+      if ((xd->mb_to_right_edge >> (3 + MI_SIZE_LOG2)) + mi_width > x_idx &&
+          (xd->mb_to_bottom_edge >> (3 + MI_SIZE_LOG2)) + mi_height > y) {
+        xd->mi[x_idx + y * mis] = mi_addr;
+      }
+
+  if (cpi->oxcf.aq_mode)
+    vp10_init_plane_quantizers(cpi, x, xd->mi[0]->mbmi.segment_id);
+
+  if (is_inter_block(mbmi) && mbmi->sb_type < BLOCK_8X8) {
+    mbmi->mv[0].as_int = mi->bmi[3].as_mv[0].as_int;
+    mbmi->mv[1].as_int = mi->bmi[3].as_mv[1].as_int;
+  }
+
+  x->skip = ctx->skip;
+
+#if CONFIG_VAR_TX
+  for (i = 0; i < 1; ++i)
+    memcpy(x->blk_skip[i], ctx->blk_skip[i],
+           sizeof(uint8_t) * ctx->num_4x4_blk);
+#endif
+
+  if (!output_enabled) return;
+
+#if CONFIG_INTERNAL_STATS
+  if (frame_is_intra_only(cm)) {
+    static const int kf_mode_index[] = {
+      THR_DC /*DC_PRED*/,          THR_V_PRED /*V_PRED*/,
+      THR_H_PRED /*H_PRED*/,       THR_D45_PRED /*D45_PRED*/,
+      THR_D135_PRED /*D135_PRED*/, THR_D117_PRED /*D117_PRED*/,
+      THR_D153_PRED /*D153_PRED*/, THR_D207_PRED /*D207_PRED*/,
+      THR_D63_PRED /*D63_PRED*/,   THR_TM /*TM_PRED*/,
+    };
+    ++cpi->mode_chosen_counts[kf_mode_index[mbmi->mode]];
+  } else {
+    // Note how often each mode chosen as best
+    ++cpi->mode_chosen_counts[ctx->best_mode_index];
+  }
+#endif
+  if (!frame_is_intra_only(cm)) {
+    if (is_inter_block(mbmi)) {
+      vp10_update_mv_count(td);
+#if CONFIG_GLOBAL_MOTION
+      if (bsize >= BLOCK_8X8) {
+        update_global_motion_used(mbmi->mode, mbmi, cpi);
+      } else {
+        const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
+        const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
+        int idx, idy;
+        for (idy = 0; idy < 2; idy += num_4x4_h) {
+          for (idx = 0; idx < 2; idx += num_4x4_w) {
+            const int j = idy * 2 + idx;
+            update_global_motion_used(mi->bmi[j].as_mode, mbmi, cpi);
+          }
+        }
+      }
+#endif  // CONFIG_GLOBAL_MOTION
+      if (cm->interp_filter == SWITCHABLE
+#if CONFIG_EXT_INTERP
+          && vp10_is_interp_needed(xd)
+#endif
+              ) {
+#if CONFIG_DUAL_FILTER
+        update_filter_type_count(td->counts, xd, mbmi);
+#else
+        const int ctx = vp10_get_pred_context_switchable_interp(xd);
+        ++td->counts->switchable_interp[ctx][mbmi->interp_filter];
+#endif
+      }
+    }
+
+    rdc->comp_pred_diff[SINGLE_REFERENCE] += ctx->single_pred_diff;
+    rdc->comp_pred_diff[COMPOUND_REFERENCE] += ctx->comp_pred_diff;
+    rdc->comp_pred_diff[REFERENCE_MODE_SELECT] += ctx->hybrid_pred_diff;
+  }
+
+  for (h = 0; h < y_mis; ++h) {
+    MV_REF *const frame_mv = frame_mvs + h * cm->mi_cols;
+    for (w = 0; w < x_mis; ++w) {
+      MV_REF *const mv = frame_mv + w;
+      mv->ref_frame[0] = mi->mbmi.ref_frame[0];
+      mv->ref_frame[1] = mi->mbmi.ref_frame[1];
+      mv->mv[0].as_int = mi->mbmi.mv[0].as_int;
+      mv->mv[1].as_int = mi->mbmi.mv[1].as_int;
+    }
+  }
+}
+
+#if CONFIG_SUPERTX
+static void update_state_supertx(VP10_COMP *cpi, ThreadData *td,
+                                 PICK_MODE_CONTEXT *ctx, int mi_row, int mi_col,
+                                 BLOCK_SIZE bsize, int output_enabled) {
+  int y, x_idx;
+#if CONFIG_VAR_TX || CONFIG_REF_MV
+  int i;
+#endif
+  VP10_COMMON *const cm = &cpi->common;
+  RD_COUNTS *const rdc = &td->rd_counts;
+  MACROBLOCK *const x = &td->mb;
+  MACROBLOCKD *const xd = &x->e_mbd;
+  MODE_INFO *mi = &ctx->mic;
+  MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
+  MODE_INFO *mi_addr = xd->mi[0];
+  const struct segmentation *const seg = &cm->seg;
+  const int mis = cm->mi_stride;
+  const int mi_width = num_8x8_blocks_wide_lookup[bsize];
+  const int mi_height = num_8x8_blocks_high_lookup[bsize];
+  const int x_mis = VPXMIN(mi_width, cm->mi_cols - mi_col);
+  const int y_mis = VPXMIN(mi_height, cm->mi_rows - mi_row);
+  MV_REF *const frame_mvs = cm->cur_frame->mvs + mi_row * cm->mi_cols + mi_col;
+  int w, h;
+
+#if CONFIG_REF_MV
+  int8_t rf_type;
+#endif
+
+  *mi_addr = *mi;
+  *x->mbmi_ext = ctx->mbmi_ext;
+  assert(is_inter_block(mbmi));
+  assert(mbmi->tx_size == ctx->mic.mbmi.tx_size);
+
+#if CONFIG_DUAL_FILTER
+  reset_intmv_filter_type(cm, xd, mbmi);
+#endif
+
+#if CONFIG_REF_MV
+  rf_type = vp10_ref_frame_type(mbmi->ref_frame);
+  if (x->mbmi_ext->ref_mv_count[rf_type] > 1 && mbmi->sb_type >= BLOCK_8X8 &&
+      mbmi->mode == NEWMV) {
+    for (i = 0; i < 1 + has_second_ref(mbmi); ++i) {
+      int_mv this_mv =
+          (i == 0)
+              ? x->mbmi_ext->ref_mv_stack[rf_type][mbmi->ref_mv_idx].this_mv
+              : x->mbmi_ext->ref_mv_stack[rf_type][mbmi->ref_mv_idx].comp_mv;
+      clamp_mv_ref(&this_mv.as_mv, xd->n8_w << 3, xd->n8_h << 3, xd);
+      lower_mv_precision(&this_mv.as_mv, cm->allow_high_precision_mv);
+      x->mbmi_ext->ref_mvs[mbmi->ref_frame[i]][0] = this_mv;
+      mbmi->pred_mv[i] = this_mv;
+    }
+  }
+#endif
+
+  // If segmentation in use
+  if (seg->enabled) {
+    if (cpi->vaq_refresh) {
+      const int energy = bsize <= BLOCK_16X16
+                             ? x->mb_energy
+                             : vp10_block_energy(cpi, x, bsize);
+      mi_addr->mbmi.segment_id = vp10_vaq_segment_id(energy);
+    } else if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) {
+      // For cyclic refresh mode, now update the segment map
+      // and set the segment id.
+      vp10_cyclic_refresh_update_segment(cpi, &xd->mi[0]->mbmi, mi_row, mi_col,
+                                         bsize, ctx->rate, ctx->dist, 1);
+    } else {
+      // Otherwise just set the segment id based on the current segment map
+      const uint8_t *const map =
+          seg->update_map ? cpi->segmentation_map : cm->last_frame_seg_map;
+      mi_addr->mbmi.segment_id = get_segment_id(cm, map, bsize, mi_row, mi_col);
+    }
+    mi_addr->mbmi.segment_id_supertx = MAX_SEGMENTS;
+  }
+
+  // Restore the coding context of the MB to that that was in place
+  // when the mode was picked for it
+  for (y = 0; y < mi_height; y++)
+    for (x_idx = 0; x_idx < mi_width; x_idx++)
+      if ((xd->mb_to_right_edge >> (3 + MI_SIZE_LOG2)) + mi_width > x_idx &&
+          (xd->mb_to_bottom_edge >> (3 + MI_SIZE_LOG2)) + mi_height > y) {
+        xd->mi[x_idx + y * mis] = mi_addr;
+      }
+
+  if (is_inter_block(mbmi) && mbmi->sb_type < BLOCK_8X8) {
+    mbmi->mv[0].as_int = mi->bmi[3].as_mv[0].as_int;
+    mbmi->mv[1].as_int = mi->bmi[3].as_mv[1].as_int;
+  }
+
+  x->skip = ctx->skip;
+
+#if CONFIG_VAR_TX
+  for (i = 0; i < 1; ++i)
+    memcpy(x->blk_skip[i], ctx->blk_skip[i],
+           sizeof(uint8_t) * ctx->num_4x4_blk);
+#endif  // CONFIG_VAR_TX
+
+#if CONFIG_VAR_TX
+  {
+    const TX_SIZE mtx = mbmi->tx_size;
+    int idy, idx;
+    for (idy = 0; idy < (1 << mtx) / 2; ++idy)
+      for (idx = 0; idx < (1 << mtx) / 2; ++idx)
+        mbmi->inter_tx_size[idy][idx] = mbmi->tx_size;
+  }
+#endif  // CONFIG_VAR_TX
+  // Turn motion variation off for supertx
+  mbmi->motion_variation = SIMPLE_TRANSLATION;
+
+  if (!output_enabled) return;
+
+  if (!frame_is_intra_only(cm)) {
+    vp10_update_mv_count(td);
+
+    if (cm->interp_filter == SWITCHABLE
+#if CONFIG_EXT_INTERP
+        && vp10_is_interp_needed(xd)
+#endif
+            ) {
+#if CONFIG_DUAL_FILTER
+      update_filter_type_count(td->counts, xd, mbmi);
+#else
+      const int ctx = vp10_get_pred_context_switchable_interp(xd);
+      ++td->counts->switchable_interp[ctx][mbmi->interp_filter];
+#endif
+    }
+
+    rdc->comp_pred_diff[SINGLE_REFERENCE] += ctx->single_pred_diff;
+    rdc->comp_pred_diff[COMPOUND_REFERENCE] += ctx->comp_pred_diff;
+    rdc->comp_pred_diff[REFERENCE_MODE_SELECT] += ctx->hybrid_pred_diff;
+  }
+
+  for (h = 0; h < y_mis; ++h) {
+    MV_REF *const frame_mv = frame_mvs + h * cm->mi_cols;
+    for (w = 0; w < x_mis; ++w) {
+      MV_REF *const mv = frame_mv + w;
+      mv->ref_frame[0] = mi->mbmi.ref_frame[0];
+      mv->ref_frame[1] = mi->mbmi.ref_frame[1];
+      mv->mv[0].as_int = mi->mbmi.mv[0].as_int;
+      mv->mv[1].as_int = mi->mbmi.mv[1].as_int;
+    }
+  }
+}
+
+static void update_state_sb_supertx(VP10_COMP *cpi, ThreadData *td,
+                                    const TileInfo *const tile, int mi_row,
+                                    int mi_col, BLOCK_SIZE bsize,
+                                    int output_enabled, PC_TREE *pc_tree) {
+  VP10_COMMON *const cm = &cpi->common;
+  MACROBLOCK *const x = &td->mb;
+  MACROBLOCKD *const xd = &x->e_mbd;
+  struct macroblock_plane *const p = x->plane;
+  struct macroblockd_plane *const pd = xd->plane;
+  int bsl = b_width_log2_lookup[bsize], hbs = (1 << bsl) / 4;
+  PARTITION_TYPE partition = pc_tree->partitioning;
+  BLOCK_SIZE subsize = get_subsize(bsize, partition);
+  int i;
+#if CONFIG_EXT_PARTITION_TYPES
+  BLOCK_SIZE bsize2 = get_subsize(bsize, PARTITION_SPLIT);
+#endif
+  PICK_MODE_CONTEXT *pmc = NULL;
+
+  if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
+
+  if (bsize == BLOCK_16X16 && cpi->vaq_refresh)
+    x->mb_energy = vp10_block_energy(cpi, x, bsize);
+
+  switch (partition) {
+    case PARTITION_NONE:
+      set_offsets_supertx(cpi, td, tile, mi_row, mi_col, subsize);
+      update_state_supertx(cpi, td, &pc_tree->none, mi_row, mi_col, subsize,
+                           output_enabled);
+      break;
+    case PARTITION_VERT:
+      set_offsets_supertx(cpi, td, tile, mi_row, mi_col, subsize);
+      update_state_supertx(cpi, td, &pc_tree->vertical[0], mi_row, mi_col,
+                           subsize, output_enabled);
+      if (mi_col + hbs < cm->mi_cols && bsize > BLOCK_8X8) {
+        set_offsets_supertx(cpi, td, tile, mi_row, mi_col + hbs, subsize);
+        update_state_supertx(cpi, td, &pc_tree->vertical[1], mi_row,
+                             mi_col + hbs, subsize, output_enabled);
+      }
+      pmc = &pc_tree->vertical_supertx;
+      break;
+    case PARTITION_HORZ:
+      set_offsets_supertx(cpi, td, tile, mi_row, mi_col, subsize);
+      update_state_supertx(cpi, td, &pc_tree->horizontal[0], mi_row, mi_col,
+                           subsize, output_enabled);
+      if (mi_row + hbs < cm->mi_rows && bsize > BLOCK_8X8) {
+        set_offsets_supertx(cpi, td, tile, mi_row + hbs, mi_col, subsize);
+        update_state_supertx(cpi, td, &pc_tree->horizontal[1], mi_row + hbs,
+                             mi_col, subsize, output_enabled);
+      }
+      pmc = &pc_tree->horizontal_supertx;
+      break;
+    case PARTITION_SPLIT:
+      if (bsize == BLOCK_8X8) {
+        set_offsets_supertx(cpi, td, tile, mi_row, mi_col, subsize);
+        update_state_supertx(cpi, td, pc_tree->leaf_split[0], mi_row, mi_col,
+                             subsize, output_enabled);
+      } else {
+        set_offsets_supertx(cpi, td, tile, mi_row, mi_col, subsize);
+        update_state_sb_supertx(cpi, td, tile, mi_row, mi_col, subsize,
+                                output_enabled, pc_tree->split[0]);
+        set_offsets_supertx(cpi, td, tile, mi_row, mi_col + hbs, subsize);
+        update_state_sb_supertx(cpi, td, tile, mi_row, mi_col + hbs, subsize,
+                                output_enabled, pc_tree->split[1]);
+        set_offsets_supertx(cpi, td, tile, mi_row + hbs, mi_col, subsize);
+        update_state_sb_supertx(cpi, td, tile, mi_row + hbs, mi_col, subsize,
+                                output_enabled, pc_tree->split[2]);
+        set_offsets_supertx(cpi, td, tile, mi_row + hbs, mi_col + hbs, subsize);
+        update_state_sb_supertx(cpi, td, tile, mi_row + hbs, mi_col + hbs,
+                                subsize, output_enabled, pc_tree->split[3]);
+      }
+      pmc = &pc_tree->split_supertx;
+      break;
+#if CONFIG_EXT_PARTITION_TYPES
+    case PARTITION_HORZ_A:
+      set_offsets_supertx(cpi, td, tile, mi_row, mi_col, bsize2);
+      update_state_supertx(cpi, td, &pc_tree->horizontala[0], mi_row, mi_col,
+                           bsize2, output_enabled);
+      set_offsets_supertx(cpi, td, tile, mi_row, mi_col + hbs, bsize2);
+      update_state_supertx(cpi, td, &pc_tree->horizontala[1], mi_row,
+                           mi_col + hbs, bsize2, output_enabled);
+      set_offsets_supertx(cpi, td, tile, mi_row + hbs, mi_col, subsize);
+      update_state_supertx(cpi, td, &pc_tree->horizontala[2], mi_row + hbs,
+                           mi_col, subsize, output_enabled);
+      pmc = &pc_tree->horizontala_supertx;
+      break;
+    case PARTITION_HORZ_B:
+      set_offsets_supertx(cpi, td, tile, mi_row, mi_col, subsize);
+      update_state_supertx(cpi, td, &pc_tree->horizontalb[0], mi_row, mi_col,
+                           subsize, output_enabled);
+      set_offsets_supertx(cpi, td, tile, mi_row + hbs, mi_col, bsize2);
+      update_state_supertx(cpi, td, &pc_tree->horizontalb[1], mi_row + hbs,
+                           mi_col, bsize2, output_enabled);
+      set_offsets_supertx(cpi, td, tile, mi_row + hbs, mi_col + hbs, bsize2);
+      update_state_supertx(cpi, td, &pc_tree->horizontalb[2], mi_row + hbs,
+                           mi_col + hbs, bsize2, output_enabled);
+      pmc = &pc_tree->horizontalb_supertx;
+      break;
+    case PARTITION_VERT_A:
+      set_offsets_supertx(cpi, td, tile, mi_row, mi_col, bsize2);
+      update_state_supertx(cpi, td, &pc_tree->verticala[0], mi_row, mi_col,
+                           bsize2, output_enabled);
+      set_offsets_supertx(cpi, td, tile, mi_row + hbs, mi_col, bsize2);
+      update_state_supertx(cpi, td, &pc_tree->verticala[1], mi_row + hbs,
+                           mi_col, bsize2, output_enabled);
+      set_offsets_supertx(cpi, td, tile, mi_row, mi_col + hbs, subsize);
+      update_state_supertx(cpi, td, &pc_tree->verticala[2], mi_row,
+                           mi_col + hbs, subsize, output_enabled);
+      pmc = &pc_tree->verticala_supertx;
+      break;
+    case PARTITION_VERT_B:
+      set_offsets_supertx(cpi, td, tile, mi_row, mi_col, subsize);
+      update_state_supertx(cpi, td, &pc_tree->verticalb[0], mi_row, mi_col,
+                           subsize, output_enabled);
+      set_offsets_supertx(cpi, td, tile, mi_row, mi_col + hbs, bsize2);
+      update_state_supertx(cpi, td, &pc_tree->verticalb[1], mi_row,
+                           mi_col + hbs, bsize2, output_enabled);
+      set_offsets_supertx(cpi, td, tile, mi_row + hbs, mi_col + hbs, bsize2);
+      update_state_supertx(cpi, td, &pc_tree->verticalb[2], mi_row + hbs,
+                           mi_col + hbs, bsize2, output_enabled);
+      pmc = &pc_tree->verticalb_supertx;
+      break;
+#endif  // CONFIG_EXT_PARTITION_TYPES
+    default: assert(0);
+  }
+
+  for (i = 0; i < MAX_MB_PLANE; ++i) {
+    if (pmc != NULL) {
+      p[i].coeff = pmc->coeff[i][1];
+      p[i].qcoeff = pmc->qcoeff[i][1];
+      pd[i].dqcoeff = pmc->dqcoeff[i][1];
+      p[i].eobs = pmc->eobs[i][1];
+    } else {
+      // These should never be used
+      p[i].coeff = NULL;
+      p[i].qcoeff = NULL;
+      pd[i].dqcoeff = NULL;
+      p[i].eobs = NULL;
+    }
+  }
+}
+
+static void update_supertx_param(ThreadData *td, PICK_MODE_CONTEXT *ctx,
+                                 int best_tx, TX_SIZE supertx_size) {
+  MACROBLOCK *const x = &td->mb;
+#if CONFIG_VAR_TX
+  int i;
+
+  for (i = 0; i < 1; ++i)
+    memcpy(ctx->blk_skip[i], x->blk_skip[i],
+           sizeof(uint8_t) * ctx->num_4x4_blk);
+#endif  // CONFIG_VAR_TX
+  ctx->mic.mbmi.tx_size = supertx_size;
+  ctx->skip = x->skip;
+  ctx->mic.mbmi.tx_type = best_tx;
+}
+
+static void update_supertx_param_sb(VP10_COMP *cpi, ThreadData *td, int mi_row,
+                                    int mi_col, BLOCK_SIZE bsize, int best_tx,
+                                    TX_SIZE supertx_size, PC_TREE *pc_tree) {
+  VP10_COMMON *const cm = &cpi->common;
+  int bsl = b_width_log2_lookup[bsize], hbs = (1 << bsl) / 4;
+  PARTITION_TYPE partition = pc_tree->partitioning;
+  BLOCK_SIZE subsize = get_subsize(bsize, partition);
+#if CONFIG_EXT_PARTITION_TYPES
+  int i;
+#endif
+
+  if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
+
+  switch (partition) {
+    case PARTITION_NONE:
+      update_supertx_param(td, &pc_tree->none, best_tx, supertx_size);
+      break;
+    case PARTITION_VERT:
+      update_supertx_param(td, &pc_tree->vertical[0], best_tx, supertx_size);
+      if (mi_col + hbs < cm->mi_cols && bsize > BLOCK_8X8)
+        update_supertx_param(td, &pc_tree->vertical[1], best_tx, supertx_size);
+      break;
+    case PARTITION_HORZ:
+      update_supertx_param(td, &pc_tree->horizontal[0], best_tx, supertx_size);
+      if (mi_row + hbs < cm->mi_rows && bsize > BLOCK_8X8)
+        update_supertx_param(td, &pc_tree->horizontal[1], best_tx,
+                             supertx_size);
+      break;
+    case PARTITION_SPLIT:
+      if (bsize == BLOCK_8X8) {
+        update_supertx_param(td, pc_tree->leaf_split[0], best_tx, supertx_size);
+      } else {
+        update_supertx_param_sb(cpi, td, mi_row, mi_col, subsize, best_tx,
+                                supertx_size, pc_tree->split[0]);
+        update_supertx_param_sb(cpi, td, mi_row, mi_col + hbs, subsize, best_tx,
+                                supertx_size, pc_tree->split[1]);
+        update_supertx_param_sb(cpi, td, mi_row + hbs, mi_col, subsize, best_tx,
+                                supertx_size, pc_tree->split[2]);
+        update_supertx_param_sb(cpi, td, mi_row + hbs, mi_col + hbs, subsize,
+                                best_tx, supertx_size, pc_tree->split[3]);
+      }
+      break;
+#if CONFIG_EXT_PARTITION_TYPES
+    case PARTITION_HORZ_A:
+      for (i = 0; i < 3; i++)
+        update_supertx_param(td, &pc_tree->horizontala[i], best_tx,
+                             supertx_size);
+      break;
+    case PARTITION_HORZ_B:
+      for (i = 0; i < 3; i++)
+        update_supertx_param(td, &pc_tree->horizontalb[i], best_tx,
+                             supertx_size);
+      break;
+    case PARTITION_VERT_A:
+      for (i = 0; i < 3; i++)
+        update_supertx_param(td, &pc_tree->verticala[i], best_tx, supertx_size);
+      break;
+    case PARTITION_VERT_B:
+      for (i = 0; i < 3; i++)
+        update_supertx_param(td, &pc_tree->verticalb[i], best_tx, supertx_size);
+      break;
+#endif  // CONFIG_EXT_PARTITION_TYPES
+    default: assert(0);
+  }
+}
+#endif  // CONFIG_SUPERTX
+
+void vp10_setup_src_planes(MACROBLOCK *x, const YV12_BUFFER_CONFIG *src,
+                           int mi_row, int mi_col) {
+  uint8_t *const buffers[3] = { src->y_buffer, src->u_buffer, src->v_buffer };
+  const int widths[3] = { src->y_crop_width, src->uv_crop_width,
+                          src->uv_crop_width };
+  const int heights[3] = { src->y_crop_height, src->uv_crop_height,
+                           src->uv_crop_height };
+  const int strides[3] = { src->y_stride, src->uv_stride, src->uv_stride };
+  int i;
+
+  // Set current frame pointer.
+  x->e_mbd.cur_buf = src;
+
+  for (i = 0; i < MAX_MB_PLANE; i++)
+    setup_pred_plane(&x->plane[i].src, buffers[i], widths[i], heights[i],
+                     strides[i], mi_row, mi_col, NULL,
+                     x->e_mbd.plane[i].subsampling_x,
+                     x->e_mbd.plane[i].subsampling_y);
+}
+
+static int set_segment_rdmult(VP10_COMP *const cpi, MACROBLOCK *const x,
+                              int8_t segment_id) {
+  int segment_qindex;
+  VP10_COMMON *const cm = &cpi->common;
+  vp10_init_plane_quantizers(cpi, x, segment_id);
+  vpx_clear_system_state();
+  segment_qindex = vp10_get_qindex(&cm->seg, segment_id, cm->base_qindex);
+  return vp10_compute_rd_mult(cpi, segment_qindex + cm->y_dc_delta_q);
+}
+
+static void rd_pick_sb_modes(VP10_COMP *cpi, TileDataEnc *tile_data,
+                             MACROBLOCK *const x, int mi_row, int mi_col,
+                             RD_COST *rd_cost,
+#if CONFIG_SUPERTX
+                             int *totalrate_nocoef,
+#endif
+#if CONFIG_EXT_PARTITION_TYPES
+                             PARTITION_TYPE partition,
+#endif
+                             BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
+                             int64_t best_rd) {
+  VP10_COMMON *const cm = &cpi->common;
+  TileInfo *const tile_info = &tile_data->tile_info;
+  MACROBLOCKD *const xd = &x->e_mbd;
+  MB_MODE_INFO *mbmi;
+  struct macroblock_plane *const p = x->plane;
+  struct macroblockd_plane *const pd = xd->plane;
+  const AQ_MODE aq_mode = cpi->oxcf.aq_mode;
+  int i, orig_rdmult;
+
+  vpx_clear_system_state();
+
+  // Use the lower precision, but faster, 32x32 fdct for mode selection.
+  x->use_lp32x32fdct = 1;
+
+  set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize);
+  mbmi = &xd->mi[0]->mbmi;
+  mbmi->sb_type = bsize;
+#if CONFIG_SUPERTX
+  // We set tx_size here as skip blocks would otherwise not set it.
+  // tx_size needs to be set at this point as supertx_enable in
+  // write_modes_sb is computed based on this, and if the garbage in memory
+  // just happens to be the supertx_size, then the packer will code this
+  // block as a supertx block, even if rdopt did not pick it as such.
+  mbmi->tx_size = max_txsize_lookup[bsize];
+#endif
+#if CONFIG_EXT_PARTITION_TYPES
+  mbmi->partition = partition;
+#endif
+
+  for (i = 0; i < MAX_MB_PLANE; ++i) {
+    p[i].coeff = ctx->coeff[i][0];
+    p[i].qcoeff = ctx->qcoeff[i][0];
+    pd[i].dqcoeff = ctx->dqcoeff[i][0];
+    p[i].eobs = ctx->eobs[i][0];
+  }
+
+  for (i = 0; i < 2; ++i) pd[i].color_index_map = ctx->color_index_map[i];
+
+  ctx->is_coded = 0;
+  ctx->skippable = 0;
+  ctx->pred_pixel_ready = 0;
+
+  // Set to zero to make sure we do not use the previous encoded frame stats
+  mbmi->skip = 0;
+
+#if CONFIG_VP9_HIGHBITDEPTH
+  if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+    x->source_variance = vp10_high_get_sby_perpixel_variance(
+        cpi, &x->plane[0].src, bsize, xd->bd);
+  } else {
+    x->source_variance =
+        vp10_get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize);
+  }
+#else
+  x->source_variance =
+      vp10_get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize);
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+  // Save rdmult before it might be changed, so it can be restored later.
+  orig_rdmult = x->rdmult;
+
+  if (aq_mode == VARIANCE_AQ) {
+    if (cpi->vaq_refresh) {
+      const int energy = bsize <= BLOCK_16X16
+                             ? x->mb_energy
+                             : vp10_block_energy(cpi, x, bsize);
+      mbmi->segment_id = vp10_vaq_segment_id(energy);
+      // Re-initialise quantiser
+      vp10_init_plane_quantizers(cpi, x, mbmi->segment_id);
+      x->encode_breakout = cpi->segment_encode_breakout[mbmi->segment_id];
+    }
+    x->rdmult = set_segment_rdmult(cpi, x, mbmi->segment_id);
+  } else if (aq_mode == COMPLEXITY_AQ) {
+    x->rdmult = set_segment_rdmult(cpi, x, mbmi->segment_id);
+  } else if (aq_mode == CYCLIC_REFRESH_AQ) {
+    // If segment is boosted, use rdmult for that segment.
+    if (cyclic_refresh_segment_id_boosted(mbmi->segment_id))
+      x->rdmult = vp10_cyclic_refresh_get_rdmult(cpi->cyclic_refresh);
+  }
+
+  // Find best coding mode & reconstruct the MB so it is available
+  // as a predictor for MBs that follow in the SB
+  if (frame_is_intra_only(cm)) {
+    vp10_rd_pick_intra_mode_sb(cpi, x, rd_cost, bsize, ctx, best_rd);
+#if CONFIG_SUPERTX
+    *totalrate_nocoef = 0;
+#endif  // CONFIG_SUPERTX
+  } else {
+    if (bsize >= BLOCK_8X8) {
+      if (segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
+        vp10_rd_pick_inter_mode_sb_seg_skip(cpi, tile_data, x, rd_cost, bsize,
+                                            ctx, best_rd);
+#if CONFIG_SUPERTX
+        *totalrate_nocoef = rd_cost->rate;
+#endif  // CONFIG_SUPERTX
+      } else {
+        vp10_rd_pick_inter_mode_sb(cpi, tile_data, x, mi_row, mi_col, rd_cost,
+#if CONFIG_SUPERTX
+                                   totalrate_nocoef,
+#endif  // CONFIG_SUPERTX
+                                   bsize, ctx, best_rd);
+#if CONFIG_SUPERTX
+        assert(*totalrate_nocoef >= 0);
+#endif  // CONFIG_SUPERTX
+      }
+    } else {
+      if (segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
+        // The decoder rejects sub8x8 partitions when SEG_LVL_SKIP is set.
+        rd_cost->rate = INT_MAX;
+      } else {
+        vp10_rd_pick_inter_mode_sub8x8(cpi, tile_data, x, mi_row, mi_col,
+                                       rd_cost,
+#if CONFIG_SUPERTX
+                                       totalrate_nocoef,
+#endif  // CONFIG_SUPERTX
+                                       bsize, ctx, best_rd);
+#if CONFIG_SUPERTX
+        assert(*totalrate_nocoef >= 0);
+#endif  // CONFIG_SUPERTX
+      }
+    }
+  }
+
+  // Examine the resulting rate and for AQ mode 2 make a segment choice.
+  if ((rd_cost->rate != INT_MAX) && (aq_mode == COMPLEXITY_AQ) &&
+      (bsize >= BLOCK_16X16) &&
+      (cm->frame_type == KEY_FRAME || cpi->refresh_alt_ref_frame ||
+       (cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref))) {
+    vp10_caq_select_segment(cpi, x, bsize, mi_row, mi_col, rd_cost->rate);
+  }
+
+  x->rdmult = orig_rdmult;
+
+  // TODO(jingning) The rate-distortion optimization flow needs to be
+  // refactored to provide proper exit/return handle.
+  if (rd_cost->rate == INT_MAX) rd_cost->rdcost = INT64_MAX;
+
+  ctx->rate = rd_cost->rate;
+  ctx->dist = rd_cost->dist;
+}
+
+#if CONFIG_REF_MV
+static void update_inter_mode_stats(FRAME_COUNTS *counts, PREDICTION_MODE mode,
+#if CONFIG_EXT_INTER
+                                    int is_compound,
+#endif  // CONFIG_EXT_INTER
+                                    int16_t mode_context) {
+  int16_t mode_ctx = mode_context & NEWMV_CTX_MASK;
+#if CONFIG_EXT_INTER
+  if (mode == NEWMV || mode == NEWFROMNEARMV) {
+    if (!is_compound) ++counts->new2mv_mode[mode == NEWFROMNEARMV];
+#else
+  if (mode == NEWMV) {
+#endif  // CONFIG_EXT_INTER
+    ++counts->newmv_mode[mode_ctx][0];
+    return;
+  } else {
+    ++counts->newmv_mode[mode_ctx][1];
+
+    if (mode_context & (1 << ALL_ZERO_FLAG_OFFSET)) {
+      return;
+    }
+
+    mode_ctx = (mode_context >> ZEROMV_OFFSET) & ZEROMV_CTX_MASK;
+    if (mode == ZEROMV) {
+      ++counts->zeromv_mode[mode_ctx][0];
+      return;
+    } else {
+      ++counts->zeromv_mode[mode_ctx][1];
+      mode_ctx = (mode_context >> REFMV_OFFSET) & REFMV_CTX_MASK;
+
+      if (mode_context & (1 << SKIP_NEARESTMV_OFFSET)) mode_ctx = 6;
+      if (mode_context & (1 << SKIP_NEARMV_OFFSET)) mode_ctx = 7;
+      if (mode_context & (1 << SKIP_NEARESTMV_SUB8X8_OFFSET)) mode_ctx = 8;
+
+      ++counts->refmv_mode[mode_ctx][mode != NEARESTMV];
+    }
+  }
+}
+#endif
+
+static void update_stats(VP10_COMMON *cm, ThreadData *td
+#if CONFIG_SUPERTX
+                         ,
+                         int supertx_enabled
+#endif
+                         ) {
+  const MACROBLOCK *x = &td->mb;
+  const MACROBLOCKD *const xd = &x->e_mbd;
+  const MODE_INFO *const mi = xd->mi[0];
+  const MB_MODE_INFO *const mbmi = &mi->mbmi;
+  const MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
+  const BLOCK_SIZE bsize = mbmi->sb_type;
+
+  if (!frame_is_intra_only(cm)) {
+    FRAME_COUNTS *const counts = td->counts;
+    const int inter_block = is_inter_block(mbmi);
+    const int seg_ref_active =
+        segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_REF_FRAME);
+    if (!seg_ref_active) {
+#if CONFIG_SUPERTX
+      if (!supertx_enabled)
+#endif
+        counts->intra_inter[vp10_get_intra_inter_context(xd)][inter_block]++;
+      // If the segment reference feature is enabled we have only a single
+      // reference frame allowed for the segment so exclude it from
+      // the reference frame counts used to work out probabilities.
+      if (inter_block) {
+        const MV_REFERENCE_FRAME ref0 = mbmi->ref_frame[0];
+#if CONFIG_EXT_REFS
+        const MV_REFERENCE_FRAME ref1 = mbmi->ref_frame[1];
+#endif  // CONFIG_EXT_REFS
+
+        if (cm->reference_mode == REFERENCE_MODE_SELECT)
+          counts->comp_inter[vp10_get_reference_mode_context(
+              cm, xd)][has_second_ref(mbmi)]++;
+
+        if (has_second_ref(mbmi)) {
+#if CONFIG_EXT_REFS
+          const int bit = (ref0 == GOLDEN_FRAME || ref0 == LAST3_FRAME);
+
+          counts->comp_ref[vp10_get_pred_context_comp_ref_p(cm, xd)][0][bit]++;
+          if (!bit) {
+            counts->comp_ref[vp10_get_pred_context_comp_ref_p1(
+                cm, xd)][1][ref0 == LAST_FRAME]++;
+          } else {
+            counts->comp_ref[vp10_get_pred_context_comp_ref_p2(
+                cm, xd)][2][ref0 == GOLDEN_FRAME]++;
+          }
+
+          counts->comp_bwdref[vp10_get_pred_context_comp_bwdref_p(
+              cm, xd)][0][ref1 == ALTREF_FRAME]++;
+#else
+          counts->comp_ref[vp10_get_pred_context_comp_ref_p(
+              cm, xd)][0][ref0 == GOLDEN_FRAME]++;
+#endif  // CONFIG_EXT_REFS
+        } else {
+#if CONFIG_EXT_REFS
+          const int bit = (ref0 == ALTREF_FRAME || ref0 == BWDREF_FRAME);
+
+          counts->single_ref[vp10_get_pred_context_single_ref_p1(xd)][0][bit]++;
+          if (bit) {
+            counts->single_ref[vp10_get_pred_context_single_ref_p2(
+                xd)][1][ref0 != BWDREF_FRAME]++;
+          } else {
+            const int bit1 = !(ref0 == LAST2_FRAME || ref0 == LAST_FRAME);
+            counts->single_ref[vp10_get_pred_context_single_ref_p3(
+                xd)][2][bit1]++;
+            if (!bit1) {
+              counts->single_ref[vp10_get_pred_context_single_ref_p4(
+                  xd)][3][ref0 != LAST_FRAME]++;
+            } else {
+              counts->single_ref[vp10_get_pred_context_single_ref_p5(
+                  xd)][4][ref0 != LAST3_FRAME]++;
+            }
+          }
+#else
+          counts->single_ref[vp10_get_pred_context_single_ref_p1(
+              xd)][0][ref0 != LAST_FRAME]++;
+          if (ref0 != LAST_FRAME) {
+            counts->single_ref[vp10_get_pred_context_single_ref_p2(
+                xd)][1][ref0 != GOLDEN_FRAME]++;
+          }
+#endif  // CONFIG_EXT_REFS
+        }
+
+#if CONFIG_EXT_INTER
+        if (cm->reference_mode != COMPOUND_REFERENCE &&
+#if CONFIG_SUPERTX
+            !supertx_enabled &&
+#endif
+            is_interintra_allowed(mbmi)) {
+          const int bsize_group = size_group_lookup[bsize];
+          if (mbmi->ref_frame[1] == INTRA_FRAME) {
+            counts->interintra[bsize_group][1]++;
+            counts->interintra_mode[bsize_group][mbmi->interintra_mode]++;
+            if (is_interintra_wedge_used(bsize))
+              counts->wedge_interintra[bsize][mbmi->use_wedge_interintra]++;
+          } else {
+            counts->interintra[bsize_group][0]++;
+          }
+        }
+#endif  // CONFIG_EXT_INTER
+
+#if CONFIG_OBMC || CONFIG_WARPED_MOTION
+#if CONFIG_SUPERTX
+        if (!supertx_enabled)
+#endif  // CONFIG_SUPERTX
+#if CONFIG_EXT_INTER
+          if (mbmi->ref_frame[1] != INTRA_FRAME)
+#endif  // CONFIG_EXT_INTER
+            if (is_motvar_allowed(mbmi))
+              counts->motvar[mbmi->sb_type][mbmi->motion_variation]++;
+#endif  // CONFIG_OBMC || CONFIG_WARPED_MOTION
+
+#if CONFIG_EXT_INTER
+        if (cm->reference_mode != SINGLE_REFERENCE &&
+            is_inter_compound_mode(mbmi->mode) &&
+#if CONFIG_OBMC || CONFIG_WARPED_MOTION
+            !(is_motvar_allowed(mbmi) &&
+              mbmi->motion_variation != SIMPLE_TRANSLATION) &&
+#endif  // CONFIG_OBMC || CONFIG_WARPED_MOTION
+            is_interinter_wedge_used(bsize)) {
+          counts->wedge_interinter[bsize][mbmi->use_wedge_interinter]++;
+        }
+#endif  // CONFIG_EXT_INTER
+      }
+    }
+
+    if (inter_block &&
+        !segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
+      int16_t mode_ctx = mbmi_ext->mode_context[mbmi->ref_frame[0]];
+      if (bsize >= BLOCK_8X8) {
+        const PREDICTION_MODE mode = mbmi->mode;
+#if CONFIG_REF_MV
+#if CONFIG_EXT_INTER
+        if (has_second_ref(mbmi)) {
+          mode_ctx = mbmi_ext->compound_mode_context[mbmi->ref_frame[0]];
+          ++counts->inter_compound_mode[mode_ctx][INTER_COMPOUND_OFFSET(mode)];
+        } else {
+#endif  // CONFIG_EXT_INTER
+          mode_ctx = vp10_mode_context_analyzer(mbmi_ext->mode_context,
+                                                mbmi->ref_frame, bsize, -1);
+          update_inter_mode_stats(counts, mode,
+#if CONFIG_EXT_INTER
+                                  has_second_ref(mbmi),
+#endif  // CONFIG_EXT_INTER
+                                  mode_ctx);
+
+          if (mode == NEWMV) {
+            uint8_t ref_frame_type = vp10_ref_frame_type(mbmi->ref_frame);
+            int idx;
+
+            for (idx = 0; idx < 2; ++idx) {
+              if (mbmi_ext->ref_mv_count[ref_frame_type] > idx + 1) {
+                uint8_t drl_ctx =
+                    vp10_drl_ctx(mbmi_ext->ref_mv_stack[ref_frame_type], idx);
+                ++counts->drl_mode[drl_ctx][mbmi->ref_mv_idx != idx];
+
+                if (mbmi->ref_mv_idx == idx) break;
+              }
+            }
+          }
+
+          if (mode == NEARMV) {
+            uint8_t ref_frame_type = vp10_ref_frame_type(mbmi->ref_frame);
+            int idx;
+
+            for (idx = 1; idx < 3; ++idx) {
+              if (mbmi_ext->ref_mv_count[ref_frame_type] > idx + 1) {
+                uint8_t drl_ctx =
+                    vp10_drl_ctx(mbmi_ext->ref_mv_stack[ref_frame_type], idx);
+                ++counts->drl_mode[drl_ctx][mbmi->ref_mv_idx != idx - 1];
+
+                if (mbmi->ref_mv_idx == idx - 1) break;
+              }
+            }
+          }
+#if CONFIG_EXT_INTER
+        }
+#endif  // CONFIG_EXT_INTER
+#else
+#if CONFIG_EXT_INTER
+        if (is_inter_compound_mode(mode))
+          ++counts->inter_compound_mode[mode_ctx][INTER_COMPOUND_OFFSET(mode)];
+        else
+#endif  // CONFIG_EXT_INTER
+          ++counts->inter_mode[mode_ctx][INTER_OFFSET(mode)];
+#endif
+      } else {
+        const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
+        const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
+        int idx, idy;
+        for (idy = 0; idy < 2; idy += num_4x4_h) {
+          for (idx = 0; idx < 2; idx += num_4x4_w) {
+            const int j = idy * 2 + idx;
+            const PREDICTION_MODE b_mode = mi->bmi[j].as_mode;
+#if CONFIG_REF_MV
+#if CONFIG_EXT_INTER
+            if (has_second_ref(mbmi)) {
+              mode_ctx = mbmi_ext->compound_mode_context[mbmi->ref_frame[0]];
+              ++counts->inter_compound_mode[mode_ctx][INTER_COMPOUND_OFFSET(
+                  b_mode)];
+            } else {
+#endif  // CONFIG_EXT_INTER
+              mode_ctx = vp10_mode_context_analyzer(mbmi_ext->mode_context,
+                                                    mbmi->ref_frame, bsize, j);
+              update_inter_mode_stats(counts, b_mode,
+#if CONFIG_EXT_INTER
+                                      has_second_ref(mbmi),
+#endif  // CONFIG_EXT_INTER
+                                      mode_ctx);
+#if CONFIG_EXT_INTER
+            }
+#endif  // CONFIG_EXT_INTER
+#else
+#if CONFIG_EXT_INTER
+            if (is_inter_compound_mode(b_mode))
+              ++counts->inter_compound_mode[mode_ctx][INTER_COMPOUND_OFFSET(
+                  b_mode)];
+            else
+#endif  // CONFIG_EXT_INTER
+              ++counts->inter_mode[mode_ctx][INTER_OFFSET(b_mode)];
+#endif
+          }
+        }
+      }
+    }
+  }
+}
+
+typedef struct {
+  ENTROPY_CONTEXT a[2 * MAX_MIB_SIZE * MAX_MB_PLANE];
+  ENTROPY_CONTEXT l[2 * MAX_MIB_SIZE * MAX_MB_PLANE];
+  PARTITION_CONTEXT sa[MAX_MIB_SIZE];
+  PARTITION_CONTEXT sl[MAX_MIB_SIZE];
+#if CONFIG_VAR_TX
+  TXFM_CONTEXT *p_ta;
+  TXFM_CONTEXT *p_tl;
+  TXFM_CONTEXT ta[MAX_MIB_SIZE];
+  TXFM_CONTEXT tl[MAX_MIB_SIZE];
+#endif
+} RD_SEARCH_MACROBLOCK_CONTEXT;
+
+static void restore_context(MACROBLOCK *x,
+                            const RD_SEARCH_MACROBLOCK_CONTEXT *ctx, int mi_row,
+                            int mi_col, BLOCK_SIZE bsize) {
+  MACROBLOCKD *xd = &x->e_mbd;
+  int p;
+  const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
+  const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
+  int mi_width = num_8x8_blocks_wide_lookup[bsize];
+  int mi_height = num_8x8_blocks_high_lookup[bsize];
+  for (p = 0; p < MAX_MB_PLANE; p++) {
+    memcpy(xd->above_context[p] + ((mi_col * 2) >> xd->plane[p].subsampling_x),
+           ctx->a + num_4x4_blocks_wide * p,
+           (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_wide) >>
+               xd->plane[p].subsampling_x);
+    memcpy(xd->left_context[p] +
+               ((mi_row & MAX_MIB_MASK) * 2 >> xd->plane[p].subsampling_y),
+           ctx->l + num_4x4_blocks_high * p,
+           (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_high) >>
+               xd->plane[p].subsampling_y);
+  }
+  memcpy(xd->above_seg_context + mi_col, ctx->sa,
+         sizeof(*xd->above_seg_context) * mi_width);
+  memcpy(xd->left_seg_context + (mi_row & MAX_MIB_MASK), ctx->sl,
+         sizeof(xd->left_seg_context[0]) * mi_height);
+#if CONFIG_VAR_TX
+  xd->above_txfm_context = ctx->p_ta;
+  xd->left_txfm_context = ctx->p_tl;
+  memcpy(xd->above_txfm_context, ctx->ta,
+         sizeof(*xd->above_txfm_context) * mi_width);
+  memcpy(xd->left_txfm_context, ctx->tl,
+         sizeof(*xd->left_txfm_context) * mi_height);
+#endif
+}
+
+static void save_context(const MACROBLOCK *x, RD_SEARCH_MACROBLOCK_CONTEXT *ctx,
+                         int mi_row, int mi_col, BLOCK_SIZE bsize) {
+  const MACROBLOCKD *xd = &x->e_mbd;
+  int p;
+  const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
+  const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
+  int mi_width = num_8x8_blocks_wide_lookup[bsize];
+  int mi_height = num_8x8_blocks_high_lookup[bsize];
+
+  // buffer the above/left context information of the block in search.
+  for (p = 0; p < MAX_MB_PLANE; ++p) {
+    memcpy(ctx->a + num_4x4_blocks_wide * p,
+           xd->above_context[p] + (mi_col * 2 >> xd->plane[p].subsampling_x),
+           (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_wide) >>
+               xd->plane[p].subsampling_x);
+    memcpy(ctx->l + num_4x4_blocks_high * p,
+           xd->left_context[p] +
+               ((mi_row & MAX_MIB_MASK) * 2 >> xd->plane[p].subsampling_y),
+           (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_high) >>
+               xd->plane[p].subsampling_y);
+  }
+  memcpy(ctx->sa, xd->above_seg_context + mi_col,
+         sizeof(*xd->above_seg_context) * mi_width);
+  memcpy(ctx->sl, xd->left_seg_context + (mi_row & MAX_MIB_MASK),
+         sizeof(xd->left_seg_context[0]) * mi_height);
+#if CONFIG_VAR_TX
+  memcpy(ctx->ta, xd->above_txfm_context,
+         sizeof(*xd->above_txfm_context) * mi_width);
+  memcpy(ctx->tl, xd->left_txfm_context,
+         sizeof(*xd->left_txfm_context) * mi_height);
+  ctx->p_ta = xd->above_txfm_context;
+  ctx->p_tl = xd->left_txfm_context;
+#endif
+}
+
+static void encode_b(VP10_COMP *cpi, const TileInfo *const tile, ThreadData *td,
+                     TOKENEXTRA **tp, int mi_row, int mi_col,
+                     int output_enabled, BLOCK_SIZE bsize,
+#if CONFIG_EXT_PARTITION_TYPES
+                     PARTITION_TYPE partition,
+#endif
+                     PICK_MODE_CONTEXT *ctx) {
+  MACROBLOCK *const x = &td->mb;
+  set_offsets(cpi, tile, x, mi_row, mi_col, bsize);
+#if CONFIG_EXT_PARTITION_TYPES
+  x->e_mbd.mi[0]->mbmi.partition = partition;
+#endif
+  update_state(cpi, td, ctx, mi_row, mi_col, bsize, output_enabled);
+  encode_superblock(cpi, td, tp, output_enabled, mi_row, mi_col, bsize, ctx);
+
+  if (output_enabled) {
+#if CONFIG_SUPERTX
+    update_stats(&cpi->common, td, 0);
+#else
+    update_stats(&cpi->common, td);
+#endif
+  }
+}
+
+static void encode_sb(VP10_COMP *cpi, ThreadData *td,
+                      const TileInfo *const tile, TOKENEXTRA **tp, int mi_row,
+                      int mi_col, int output_enabled, BLOCK_SIZE bsize,
+                      PC_TREE *pc_tree) {
+  const VP10_COMMON *const cm = &cpi->common;
+  MACROBLOCK *const x = &td->mb;
+  MACROBLOCKD *const xd = &x->e_mbd;
+
+  const int ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
+  const int hbs = num_8x8_blocks_wide_lookup[bsize] / 2;
+  const PARTITION_TYPE partition = pc_tree->partitioning;
+  const BLOCK_SIZE subsize = get_subsize(bsize, partition);
+#if CONFIG_EXT_PARTITION_TYPES
+  const BLOCK_SIZE bsize2 = get_subsize(bsize, PARTITION_SPLIT);
+#endif
+
+  assert(bsize >= BLOCK_8X8);
+
+  if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
+
+  if (output_enabled) td->counts->partition[ctx][partition]++;
+
+#if CONFIG_SUPERTX
+  if (!frame_is_intra_only(cm) && bsize <= MAX_SUPERTX_BLOCK_SIZE &&
+      partition != PARTITION_NONE && !xd->lossless[0]) {
+    int supertx_enabled;
+    TX_SIZE supertx_size = max_txsize_lookup[bsize];
+    supertx_enabled = check_supertx_sb(bsize, supertx_size, pc_tree);
+    if (supertx_enabled) {
+      const int mi_width = num_8x8_blocks_wide_lookup[bsize];
+      const int mi_height = num_8x8_blocks_high_lookup[bsize];
+      int x_idx, y_idx, i;
+      uint8_t *dst_buf[3];
+      int dst_stride[3];
+      set_skip_context(xd, mi_row, mi_col);
+      set_mode_info_offsets(cpi, x, xd, mi_row, mi_col);
+      update_state_sb_supertx(cpi, td, tile, mi_row, mi_col, bsize,
+                              output_enabled, pc_tree);
+
+      vp10_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row,
+                            mi_col);
+      for (i = 0; i < MAX_MB_PLANE; i++) {
+        dst_buf[i] = xd->plane[i].dst.buf;
+        dst_stride[i] = xd->plane[i].dst.stride;
+      }
+      predict_sb_complex(cpi, td, tile, mi_row, mi_col, mi_row, mi_col,
+                         output_enabled, bsize, bsize, dst_buf, dst_stride,
+                         pc_tree);
+
+      set_offsets_without_segment_id(cpi, tile, x, mi_row, mi_col, bsize);
+      set_segment_id_supertx(cpi, x, mi_row, mi_col, bsize);
+
+      if (!x->skip) {
+        x->skip_optimize = 0;
+        x->use_lp32x32fdct = cpi->sf.use_lp32x32fdct;
+
+        vp10_encode_sb_supertx(x, bsize);
+        vp10_tokenize_sb_supertx(cpi, td, tp, !output_enabled, bsize);
+      } else {
+        xd->mi[0]->mbmi.skip = 1;
+        if (output_enabled) td->counts->skip[vp10_get_skip_context(xd)][1]++;
+        reset_skip_context(xd, bsize);
+      }
+      if (output_enabled) {
+        for (y_idx = 0; y_idx < mi_height; y_idx++)
+          for (x_idx = 0; x_idx < mi_width; x_idx++) {
+            if ((xd->mb_to_right_edge >> (3 + MI_SIZE_LOG2)) + mi_width >
+                    x_idx &&
+                (xd->mb_to_bottom_edge >> (3 + MI_SIZE_LOG2)) + mi_height >
+                    y_idx) {
+              xd->mi[x_idx + y_idx * cm->mi_stride]->mbmi.skip =
+                  xd->mi[0]->mbmi.skip;
+            }
+          }
+        td->counts->supertx[partition_supertx_context_lookup[partition]]
+                           [supertx_size][1]++;
+        td->counts->supertx_size[supertx_size]++;
+#if CONFIG_EXT_TX
+        if (get_ext_tx_types(supertx_size, bsize, 1) > 1 &&
+            !xd->mi[0]->mbmi.skip) {
+          int eset = get_ext_tx_set(supertx_size, bsize, 1);
+          if (eset > 0) {
+            ++td->counts
+                  ->inter_ext_tx[eset][supertx_size][xd->mi[0]->mbmi.tx_type];
+          }
+        }
+#else
+        if (supertx_size < TX_32X32 && !xd->mi[0]->mbmi.skip) {
+          ++td->counts->inter_ext_tx[supertx_size][xd->mi[0]->mbmi.tx_type];
+        }
+#endif  // CONFIG_EXT_TX
+      }
+#if CONFIG_EXT_PARTITION_TYPES
+      update_ext_partition_context(xd, mi_row, mi_col, subsize, bsize,
+                                   partition);
+#else
+      if (partition != PARTITION_SPLIT || bsize == BLOCK_8X8)
+        update_partition_context(xd, mi_row, mi_col, subsize, bsize);
+#endif
+#if CONFIG_VAR_TX
+      set_txfm_ctxs(supertx_size, mi_width, mi_height, xd);
+#endif  // CONFIG_VAR_TX
+      return;
+    } else {
+      if (output_enabled) {
+        td->counts->supertx[partition_supertx_context_lookup[partition]]
+                           [supertx_size][0]++;
+      }
+    }
+  }
+#endif  // CONFIG_SUPERTX
+
+  switch (partition) {
+    case PARTITION_NONE:
+      encode_b(cpi, tile, td, tp, mi_row, mi_col, output_enabled, subsize,
+#if CONFIG_EXT_PARTITION_TYPES
+               partition,
+#endif
+               &pc_tree->none);
+      break;
+    case PARTITION_VERT:
+      encode_b(cpi, tile, td, tp, mi_row, mi_col, output_enabled, subsize,
+#if CONFIG_EXT_PARTITION_TYPES
+               partition,
+#endif
+               &pc_tree->vertical[0]);
+      if (mi_col + hbs < cm->mi_cols && bsize > BLOCK_8X8) {
+        encode_b(cpi, tile, td, tp, mi_row, mi_col + hbs, output_enabled,
+                 subsize,
+#if CONFIG_EXT_PARTITION_TYPES
+                 partition,
+#endif
+                 &pc_tree->vertical[1]);
+      }
+      break;
+    case PARTITION_HORZ:
+      encode_b(cpi, tile, td, tp, mi_row, mi_col, output_enabled, subsize,
+#if CONFIG_EXT_PARTITION_TYPES
+               partition,
+#endif
+               &pc_tree->horizontal[0]);
+      if (mi_row + hbs < cm->mi_rows && bsize > BLOCK_8X8) {
+        encode_b(cpi, tile, td, tp, mi_row + hbs, mi_col, output_enabled,
+                 subsize,
+#if CONFIG_EXT_PARTITION_TYPES
+                 partition,
+#endif
+                 &pc_tree->horizontal[1]);
+      }
+      break;
+    case PARTITION_SPLIT:
+      if (bsize == BLOCK_8X8) {
+        encode_b(cpi, tile, td, tp, mi_row, mi_col, output_enabled, subsize,
+#if CONFIG_EXT_PARTITION_TYPES
+                 partition,
+#endif
+                 pc_tree->leaf_split[0]);
+      } else {
+        encode_sb(cpi, td, tile, tp, mi_row, mi_col, output_enabled, subsize,
+                  pc_tree->split[0]);
+        encode_sb(cpi, td, tile, tp, mi_row, mi_col + hbs, output_enabled,
+                  subsize, pc_tree->split[1]);
+        encode_sb(cpi, td, tile, tp, mi_row + hbs, mi_col, output_enabled,
+                  subsize, pc_tree->split[2]);
+        encode_sb(cpi, td, tile, tp, mi_row + hbs, mi_col + hbs, output_enabled,
+                  subsize, pc_tree->split[3]);
+      }
+      break;
+#if CONFIG_EXT_PARTITION_TYPES
+    case PARTITION_HORZ_A:
+      encode_b(cpi, tile, td, tp, mi_row, mi_col, output_enabled, bsize2,
+               partition, &pc_tree->horizontala[0]);
+      encode_b(cpi, tile, td, tp, mi_row, mi_col + hbs, output_enabled, bsize2,
+               partition, &pc_tree->horizontala[1]);
+      encode_b(cpi, tile, td, tp, mi_row + hbs, mi_col, output_enabled, subsize,
+               partition, &pc_tree->horizontala[2]);
+      break;
+    case PARTITION_HORZ_B:
+      encode_b(cpi, tile, td, tp, mi_row, mi_col, output_enabled, subsize,
+               partition, &pc_tree->horizontalb[0]);
+      encode_b(cpi, tile, td, tp, mi_row + hbs, mi_col, output_enabled, bsize2,
+               partition, &pc_tree->horizontalb[1]);
+      encode_b(cpi, tile, td, tp, mi_row + hbs, mi_col + hbs, output_enabled,
+               bsize2, partition, &pc_tree->horizontalb[2]);
+      break;
+    case PARTITION_VERT_A:
+      encode_b(cpi, tile, td, tp, mi_row, mi_col, output_enabled, bsize2,
+               partition, &pc_tree->verticala[0]);
+      encode_b(cpi, tile, td, tp, mi_row + hbs, mi_col, output_enabled, bsize2,
+               partition, &pc_tree->verticala[1]);
+      encode_b(cpi, tile, td, tp, mi_row, mi_col + hbs, output_enabled, subsize,
+               partition, &pc_tree->verticala[2]);
+
+      break;
+    case PARTITION_VERT_B:
+      encode_b(cpi, tile, td, tp, mi_row, mi_col, output_enabled, subsize,
+               partition, &pc_tree->verticalb[0]);
+      encode_b(cpi, tile, td, tp, mi_row, mi_col + hbs, output_enabled, bsize2,
+               partition, &pc_tree->verticalb[1]);
+      encode_b(cpi, tile, td, tp, mi_row + hbs, mi_col + hbs, output_enabled,
+               bsize2, partition, &pc_tree->verticalb[2]);
+      break;
+#endif  // CONFIG_EXT_PARTITION_TYPES
+    default: assert(0 && "Invalid partition type."); break;
+  }
+
+#if CONFIG_EXT_PARTITION_TYPES
+  update_ext_partition_context(xd, mi_row, mi_col, subsize, bsize, partition);
+#else
+  if (partition != PARTITION_SPLIT || bsize == BLOCK_8X8)
+    update_partition_context(xd, mi_row, mi_col, subsize, bsize);
+#endif  // CONFIG_EXT_PARTITION_TYPES
+}
+
+// Check to see if the given partition size is allowed for a specified number
+// of mi block rows and columns remaining in the image.
+// If not then return the largest allowed partition size
+static BLOCK_SIZE find_partition_size(BLOCK_SIZE bsize, int rows_left,
+                                      int cols_left, int *bh, int *bw) {
+  if (rows_left <= 0 || cols_left <= 0) {
+    return VPXMIN(bsize, BLOCK_8X8);
+  } else {
+    for (; bsize > 0; bsize -= 3) {
+      *bh = num_8x8_blocks_high_lookup[bsize];
+      *bw = num_8x8_blocks_wide_lookup[bsize];
+      if ((*bh <= rows_left) && (*bw <= cols_left)) {
+        break;
+      }
+    }
+  }
+  return bsize;
+}
+
+static void set_partial_sb_partition(const VP10_COMMON *const cm, MODE_INFO *mi,
+                                     int bh_in, int bw_in,
+                                     int mi_rows_remaining,
+                                     int mi_cols_remaining, BLOCK_SIZE bsize,
+                                     MODE_INFO **mib) {
+  int bh = bh_in;
+  int r, c;
+  for (r = 0; r < cm->mib_size; r += bh) {
+    int bw = bw_in;
+    for (c = 0; c < cm->mib_size; c += bw) {
+      const int index = r * cm->mi_stride + c;
+      mib[index] = mi + index;
+      mib[index]->mbmi.sb_type = find_partition_size(
+          bsize, mi_rows_remaining - r, mi_cols_remaining - c, &bh, &bw);
+    }
+  }
+}
+
+// This function attempts to set all mode info entries in a given superblock
+// to the same block partition size.
+// However, at the bottom and right borders of the image the requested size
+// may not be allowed in which case this code attempts to choose the largest
+// allowable partition.
+static void set_fixed_partitioning(VP10_COMP *cpi, const TileInfo *const tile,
+                                   MODE_INFO **mib, int mi_row, int mi_col,
+                                   BLOCK_SIZE bsize) {
+  VP10_COMMON *const cm = &cpi->common;
+  const int mi_rows_remaining = tile->mi_row_end - mi_row;
+  const int mi_cols_remaining = tile->mi_col_end - mi_col;
+  int block_row, block_col;
+  MODE_INFO *const mi_upper_left = cm->mi + mi_row * cm->mi_stride + mi_col;
+  int bh = num_8x8_blocks_high_lookup[bsize];
+  int bw = num_8x8_blocks_wide_lookup[bsize];
+
+  assert((mi_rows_remaining > 0) && (mi_cols_remaining > 0));
+
+  // Apply the requested partition size to the SB if it is all "in image"
+  if ((mi_cols_remaining >= cm->mib_size) &&
+      (mi_rows_remaining >= cm->mib_size)) {
+    for (block_row = 0; block_row < cm->mib_size; block_row += bh) {
+      for (block_col = 0; block_col < cm->mib_size; block_col += bw) {
+        int index = block_row * cm->mi_stride + block_col;
+        mib[index] = mi_upper_left + index;
+        mib[index]->mbmi.sb_type = bsize;
+      }
+    }
+  } else {
+    // Else this is a partial SB.
+    set_partial_sb_partition(cm, mi_upper_left, bh, bw, mi_rows_remaining,
+                             mi_cols_remaining, bsize, mib);
+  }
+}
+
+static void rd_use_partition(VP10_COMP *cpi, ThreadData *td,
+                             TileDataEnc *tile_data, MODE_INFO **mib,
+                             TOKENEXTRA **tp, int mi_row, int mi_col,
+                             BLOCK_SIZE bsize, int *rate, int64_t *dist,
+#if CONFIG_SUPERTX
+                             int *rate_nocoef,
+#endif
+                             int do_recon, PC_TREE *pc_tree) {
+  VP10_COMMON *const cm = &cpi->common;
+  TileInfo *const tile_info = &tile_data->tile_info;
+  MACROBLOCK *const x = &td->mb;
+  MACROBLOCKD *const xd = &x->e_mbd;
+  const int bs = num_8x8_blocks_wide_lookup[bsize];
+  const int hbs = bs / 2;
+  int i;
+  const int pl = partition_plane_context(xd, mi_row, mi_col, bsize);
+  const PARTITION_TYPE partition = get_partition(cm, mi_row, mi_col, bsize);
+  const BLOCK_SIZE subsize = get_subsize(bsize, partition);
+  RD_SEARCH_MACROBLOCK_CONTEXT x_ctx;
+  RD_COST last_part_rdc, none_rdc, chosen_rdc;
+  BLOCK_SIZE sub_subsize = BLOCK_4X4;
+  int splits_below = 0;
+  BLOCK_SIZE bs_type = mib[0]->mbmi.sb_type;
+  int do_partition_search = 1;
+  PICK_MODE_CONTEXT *ctx = &pc_tree->none;
+#if CONFIG_SUPERTX
+  int last_part_rate_nocoef = INT_MAX;
+  int none_rate_nocoef = INT_MAX;
+  int chosen_rate_nocoef = INT_MAX;
+#endif
+
+  if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
+
+  assert(num_4x4_blocks_wide_lookup[bsize] ==
+         num_4x4_blocks_high_lookup[bsize]);
+
+  vp10_rd_cost_reset(&last_part_rdc);
+  vp10_rd_cost_reset(&none_rdc);
+  vp10_rd_cost_reset(&chosen_rdc);
+
+  pc_tree->partitioning = partition;
+
+#if CONFIG_VAR_TX
+  xd->above_txfm_context = cm->above_txfm_context + mi_col;
+  xd->left_txfm_context =
+      xd->left_txfm_context_buffer + (mi_row & MAX_MIB_MASK);
+#endif
+
+  save_context(x, &x_ctx, mi_row, mi_col, bsize);
+
+  if (bsize == BLOCK_16X16 && cpi->vaq_refresh) {
+    set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize);
+    x->mb_energy = vp10_block_energy(cpi, x, bsize);
+  }
+
+  if (do_partition_search &&
+      cpi->sf.partition_search_type == SEARCH_PARTITION &&
+      cpi->sf.adjust_partitioning_from_last_frame) {
+    // Check if any of the sub blocks are further split.
+    if (partition == PARTITION_SPLIT && subsize > BLOCK_8X8) {
+      sub_subsize = get_subsize(subsize, PARTITION_SPLIT);
+      splits_below = 1;
+      for (i = 0; i < 4; i++) {
+        int jj = i >> 1, ii = i & 0x01;
+        MODE_INFO *this_mi = mib[jj * hbs * cm->mi_stride + ii * hbs];
+        if (this_mi && this_mi->mbmi.sb_type >= sub_subsize) {
+          splits_below = 0;
+        }
+      }
+    }
+
+    // If partition is not none try none unless each of the 4 splits are split
+    // even further..
+    if (partition != PARTITION_NONE && !splits_below &&
+        mi_row + hbs < cm->mi_rows && mi_col + hbs < cm->mi_cols) {
+      pc_tree->partitioning = PARTITION_NONE;
+      rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &none_rdc,
+#if CONFIG_SUPERTX
+                       &none_rate_nocoef,
+#endif
+#if CONFIG_EXT_PARTITION_TYPES
+                       PARTITION_NONE,
+#endif
+                       bsize, ctx, INT64_MAX);
+
+      if (none_rdc.rate < INT_MAX) {
+        none_rdc.rate += cpi->partition_cost[pl][PARTITION_NONE];
+        none_rdc.rdcost =
+            RDCOST(x->rdmult, x->rddiv, none_rdc.rate, none_rdc.dist);
+#if CONFIG_SUPERTX
+        none_rate_nocoef += cpi->partition_cost[pl][PARTITION_NONE];
+#endif
+      }
+
+      restore_context(x, &x_ctx, mi_row, mi_col, bsize);
+
+      mib[0]->mbmi.sb_type = bs_type;
+      pc_tree->partitioning = partition;
+    }
+  }
+
+  switch (partition) {
+    case PARTITION_NONE:
+      rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &last_part_rdc,
+#if CONFIG_SUPERTX
+                       &last_part_rate_nocoef,
+#endif
+#if CONFIG_EXT_PARTITION_TYPES
+                       PARTITION_NONE,
+#endif
+                       bsize, ctx, INT64_MAX);
+      break;
+    case PARTITION_HORZ:
+      rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &last_part_rdc,
+#if CONFIG_SUPERTX
+                       &last_part_rate_nocoef,
+#endif
+#if CONFIG_EXT_PARTITION_TYPES
+                       PARTITION_HORZ,
+#endif
+                       subsize, &pc_tree->horizontal[0], INT64_MAX);
+      if (last_part_rdc.rate != INT_MAX && bsize >= BLOCK_8X8 &&
+          mi_row + hbs < cm->mi_rows) {
+        RD_COST tmp_rdc;
+#if CONFIG_SUPERTX
+        int rt_nocoef = 0;
+#endif
+        PICK_MODE_CONTEXT *ctx = &pc_tree->horizontal[0];
+        vp10_rd_cost_init(&tmp_rdc);
+        update_state(cpi, td, ctx, mi_row, mi_col, subsize, 0);
+        encode_superblock(cpi, td, tp, 0, mi_row, mi_col, subsize, ctx);
+        rd_pick_sb_modes(cpi, tile_data, x, mi_row + hbs, mi_col, &tmp_rdc,
+#if CONFIG_SUPERTX
+                         &rt_nocoef,
+#endif
+#if CONFIG_EXT_PARTITION_TYPES
+                         PARTITION_HORZ,
+#endif
+                         subsize, &pc_tree->horizontal[1], INT64_MAX);
+        if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
+          vp10_rd_cost_reset(&last_part_rdc);
+#if CONFIG_SUPERTX
+          last_part_rate_nocoef = INT_MAX;
+#endif
+          break;
+        }
+        last_part_rdc.rate += tmp_rdc.rate;
+        last_part_rdc.dist += tmp_rdc.dist;
+        last_part_rdc.rdcost += tmp_rdc.rdcost;
+#if CONFIG_SUPERTX
+        last_part_rate_nocoef += rt_nocoef;
+#endif
+      }
+      break;
+    case PARTITION_VERT:
+      rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &last_part_rdc,
+#if CONFIG_SUPERTX
+                       &last_part_rate_nocoef,
+#endif
+#if CONFIG_EXT_PARTITION_TYPES
+                       PARTITION_VERT,
+#endif
+                       subsize, &pc_tree->vertical[0], INT64_MAX);
+      if (last_part_rdc.rate != INT_MAX && bsize >= BLOCK_8X8 &&
+          mi_col + hbs < cm->mi_cols) {
+        RD_COST tmp_rdc;
+#if CONFIG_SUPERTX
+        int rt_nocoef = 0;
+#endif
+        PICK_MODE_CONTEXT *ctx = &pc_tree->vertical[0];
+        vp10_rd_cost_init(&tmp_rdc);
+        update_state(cpi, td, ctx, mi_row, mi_col, subsize, 0);
+        encode_superblock(cpi, td, tp, 0, mi_row, mi_col, subsize, ctx);
+        rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + hbs, &tmp_rdc,
+#if CONFIG_SUPERTX
+                         &rt_nocoef,
+#endif
+#if CONFIG_EXT_PARTITION_TYPES
+                         PARTITION_VERT,
+#endif
+                         subsize, &pc_tree->vertical[bsize > BLOCK_8X8],
+                         INT64_MAX);
+        if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
+          vp10_rd_cost_reset(&last_part_rdc);
+#if CONFIG_SUPERTX
+          last_part_rate_nocoef = INT_MAX;
+#endif
+          break;
+        }
+        last_part_rdc.rate += tmp_rdc.rate;
+        last_part_rdc.dist += tmp_rdc.dist;
+        last_part_rdc.rdcost += tmp_rdc.rdcost;
+#if CONFIG_SUPERTX
+        last_part_rate_nocoef += rt_nocoef;
+#endif
+      }
+      break;
+    case PARTITION_SPLIT:
+      if (bsize == BLOCK_8X8) {
+        rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &last_part_rdc,
+#if CONFIG_SUPERTX
+                         &last_part_rate_nocoef,
+#endif
+#if CONFIG_EXT_PARTITION_TYPES
+                         PARTITION_SPLIT,
+#endif
+                         subsize, pc_tree->leaf_split[0], INT64_MAX);
+        break;
+      }
+      last_part_rdc.rate = 0;
+      last_part_rdc.dist = 0;
+      last_part_rdc.rdcost = 0;
+#if CONFIG_SUPERTX
+      last_part_rate_nocoef = 0;
+#endif
+      for (i = 0; i < 4; i++) {
+        int x_idx = (i & 1) * hbs;
+        int y_idx = (i >> 1) * hbs;
+        int jj = i >> 1, ii = i & 0x01;
+        RD_COST tmp_rdc;
+#if CONFIG_SUPERTX
+        int rt_nocoef;
+#endif
+        if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols))
+          continue;
+
+        vp10_rd_cost_init(&tmp_rdc);
+        rd_use_partition(cpi, td, tile_data,
+                         mib + jj * hbs * cm->mi_stride + ii * hbs, tp,
+                         mi_row + y_idx, mi_col + x_idx, subsize, &tmp_rdc.rate,
+                         &tmp_rdc.dist,
+#if CONFIG_SUPERTX
+                         &rt_nocoef,
+#endif
+                         i != 3, pc_tree->split[i]);
+        if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
+          vp10_rd_cost_reset(&last_part_rdc);
+#if CONFIG_SUPERTX
+          last_part_rate_nocoef = INT_MAX;
+#endif
+          break;
+        }
+        last_part_rdc.rate += tmp_rdc.rate;
+        last_part_rdc.dist += tmp_rdc.dist;
+#if CONFIG_SUPERTX
+        last_part_rate_nocoef += rt_nocoef;
+#endif
+      }
+      break;
+#if CONFIG_EXT_PARTITION_TYPES
+    case PARTITION_VERT_A:
+    case PARTITION_VERT_B:
+    case PARTITION_HORZ_A:
+    case PARTITION_HORZ_B: assert(0 && "Cannot handle extended partiton types");
+#endif  //  CONFIG_EXT_PARTITION_TYPES
+    default: assert(0); break;
+  }
+
+  if (last_part_rdc.rate < INT_MAX) {
+    last_part_rdc.rate += cpi->partition_cost[pl][partition];
+    last_part_rdc.rdcost =
+        RDCOST(x->rdmult, x->rddiv, last_part_rdc.rate, last_part_rdc.dist);
+#if CONFIG_SUPERTX
+    last_part_rate_nocoef += cpi->partition_cost[pl][partition];
+#endif
+  }
+
+  if (do_partition_search && cpi->sf.adjust_partitioning_from_last_frame &&
+      cpi->sf.partition_search_type == SEARCH_PARTITION &&
+      partition != PARTITION_SPLIT && bsize > BLOCK_8X8 &&
+      (mi_row + bs < cm->mi_rows || mi_row + hbs == cm->mi_rows) &&
+      (mi_col + bs < cm->mi_cols || mi_col + hbs == cm->mi_cols)) {
+    BLOCK_SIZE split_subsize = get_subsize(bsize, PARTITION_SPLIT);
+    chosen_rdc.rate = 0;
+    chosen_rdc.dist = 0;
+#if CONFIG_SUPERTX
+    chosen_rate_nocoef = 0;
+#endif
+
+    restore_context(x, &x_ctx, mi_row, mi_col, bsize);
+
+    pc_tree->partitioning = PARTITION_SPLIT;
+
+    // Split partition.
+    for (i = 0; i < 4; i++) {
+      int x_idx = (i & 1) * hbs;
+      int y_idx = (i >> 1) * hbs;
+      RD_COST tmp_rdc;
+#if CONFIG_SUPERTX
+      int rt_nocoef = 0;
+#endif
+      RD_SEARCH_MACROBLOCK_CONTEXT x_ctx;
+
+      if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols))
+        continue;
+
+      save_context(x, &x_ctx, mi_row, mi_col, bsize);
+      pc_tree->split[i]->partitioning = PARTITION_NONE;
+      rd_pick_sb_modes(cpi, tile_data, x, mi_row + y_idx, mi_col + x_idx,
+                       &tmp_rdc,
+#if CONFIG_SUPERTX
+                       &rt_nocoef,
+#endif
+#if CONFIG_EXT_PARTITION_TYPES
+                       PARTITION_SPLIT,
+#endif
+                       split_subsize, &pc_tree->split[i]->none, INT64_MAX);
+
+      restore_context(x, &x_ctx, mi_row, mi_col, bsize);
+
+      if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
+        vp10_rd_cost_reset(&chosen_rdc);
+#if CONFIG_SUPERTX
+        chosen_rate_nocoef = INT_MAX;
+#endif
+        break;
+      }
+
+      chosen_rdc.rate += tmp_rdc.rate;
+      chosen_rdc.dist += tmp_rdc.dist;
+#if CONFIG_SUPERTX
+      chosen_rate_nocoef += rt_nocoef;
+#endif
+
+      if (i != 3)
+        encode_sb(cpi, td, tile_info, tp, mi_row + y_idx, mi_col + x_idx, 0,
+                  split_subsize, pc_tree->split[i]);
+
+      chosen_rdc.rate += cpi->partition_cost[pl][PARTITION_NONE];
+#if CONFIG_SUPERTX
+      chosen_rate_nocoef += cpi->partition_cost[pl][PARTITION_SPLIT];
+#endif
+    }
+    if (chosen_rdc.rate < INT_MAX) {
+      chosen_rdc.rate += cpi->partition_cost[pl][PARTITION_SPLIT];
+      chosen_rdc.rdcost =
+          RDCOST(x->rdmult, x->rddiv, chosen_rdc.rate, chosen_rdc.dist);
+#if CONFIG_SUPERTX
+      chosen_rate_nocoef += cpi->partition_cost[pl][PARTITION_NONE];
+#endif
+    }
+  }
+
+  // If last_part is better set the partitioning to that.
+  if (last_part_rdc.rdcost < chosen_rdc.rdcost) {
+    mib[0]->mbmi.sb_type = bsize;
+    if (bsize >= BLOCK_8X8) pc_tree->partitioning = partition;
+    chosen_rdc = last_part_rdc;
+#if CONFIG_SUPERTX
+    chosen_rate_nocoef = last_part_rate_nocoef;
+#endif
+  }
+  // If none was better set the partitioning to that.
+  if (none_rdc.rdcost < chosen_rdc.rdcost) {
+    if (bsize >= BLOCK_8X8) pc_tree->partitioning = PARTITION_NONE;
+    chosen_rdc = none_rdc;
+#if CONFIG_SUPERTX
+    chosen_rate_nocoef = none_rate_nocoef;
+#endif
+  }
+
+  restore_context(x, &x_ctx, mi_row, mi_col, bsize);
+
+  // We must have chosen a partitioning and encoding or we'll fail later on.
+  // No other opportunities for success.
+  if (bsize == cm->sb_size)
+    assert(chosen_rdc.rate < INT_MAX && chosen_rdc.dist < INT64_MAX);
+
+  if (do_recon) {
+    int output_enabled = (bsize == cm->sb_size);
+    encode_sb(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled, bsize,
+              pc_tree);
+  }
+
+  *rate = chosen_rdc.rate;
+  *dist = chosen_rdc.dist;
+#if CONFIG_SUPERTX
+  *rate_nocoef = chosen_rate_nocoef;
+#endif
+}
+
+/* clang-format off */
+static const BLOCK_SIZE min_partition_size[BLOCK_SIZES] = {
+                            BLOCK_4X4,    //                     4x4
+  BLOCK_4X4,   BLOCK_4X4,   BLOCK_4X4,    //    4x8,    8x4,     8x8
+  BLOCK_4X4,   BLOCK_4X4,   BLOCK_8X8,    //   8x16,   16x8,   16x16
+  BLOCK_8X8,   BLOCK_8X8,   BLOCK_16X16,  //  16x32,  32x16,   32x32
+  BLOCK_16X16, BLOCK_16X16, BLOCK_16X16,  //  32x64,  64x32,   64x64
+#if CONFIG_EXT_PARTITION
+  BLOCK_16X16, BLOCK_16X16, BLOCK_16X16   // 64x128, 128x64, 128x128
+#endif  // CONFIG_EXT_PARTITION
+};
+
+static const BLOCK_SIZE max_partition_size[BLOCK_SIZES] = {
+                                  BLOCK_8X8,    //                     4x4
+  BLOCK_16X16,   BLOCK_16X16,   BLOCK_16X16,    //    4x8,    8x4,     8x8
+  BLOCK_32X32,   BLOCK_32X32,   BLOCK_32X32,    //   8x16,   16x8,   16x16
+  BLOCK_64X64,   BLOCK_64X64,   BLOCK_64X64,    //  16x32,  32x16,   32x32
+  BLOCK_LARGEST, BLOCK_LARGEST, BLOCK_LARGEST,  //  32x64,  64x32,   64x64
+#if CONFIG_EXT_PARTITION
+  BLOCK_LARGEST, BLOCK_LARGEST, BLOCK_LARGEST   // 64x128, 128x64, 128x128
+#endif  // CONFIG_EXT_PARTITION
+};
+
+// Next square block size less or equal than current block size.
+static const BLOCK_SIZE next_square_size[BLOCK_SIZES] = {
+                              BLOCK_4X4,    //                     4x4
+  BLOCK_4X4,   BLOCK_4X4,     BLOCK_8X8,    //    4x8,    8x4,     8x8
+  BLOCK_8X8,   BLOCK_8X8,     BLOCK_16X16,  //   8x16,   16x8,   16x16
+  BLOCK_16X16, BLOCK_16X16,   BLOCK_32X32,  //  16x32,  32x16,   32x32
+  BLOCK_32X32, BLOCK_32X32,   BLOCK_64X64,  //  32x64,  64x32,   64x64
+#if CONFIG_EXT_PARTITION
+  BLOCK_64X64, BLOCK_64X64, BLOCK_128X128   // 64x128, 128x64, 128x128
+#endif  // CONFIG_EXT_PARTITION
+};
+/* clang-format on */
+
+// Look at all the mode_info entries for blocks that are part of this
+// partition and find the min and max values for sb_type.
+// At the moment this is designed to work on a superblock but could be
+// adjusted to use a size parameter.
+//
+// The min and max are assumed to have been initialized prior to calling this
+// function so repeat calls can accumulate a min and max of more than one
+// superblock.
+static void get_sb_partition_size_range(const VP10_COMMON *const cm,
+                                        MACROBLOCKD *xd, MODE_INFO **mib,
+                                        BLOCK_SIZE *min_block_size,
+                                        BLOCK_SIZE *max_block_size) {
+  int i, j;
+  int index = 0;
+
+  // Check the sb_type for each block that belongs to this region.
+  for (i = 0; i < cm->mib_size; ++i) {
+    for (j = 0; j < cm->mib_size; ++j) {
+      MODE_INFO *mi = mib[index + j];
+      BLOCK_SIZE sb_type = mi ? mi->mbmi.sb_type : BLOCK_4X4;
+      *min_block_size = VPXMIN(*min_block_size, sb_type);
+      *max_block_size = VPXMAX(*max_block_size, sb_type);
+    }
+    index += xd->mi_stride;
+  }
+}
+
+// Look at neighboring blocks and set a min and max partition size based on
+// what they chose.
+static void rd_auto_partition_range(VP10_COMP *cpi, const TileInfo *const tile,
+                                    MACROBLOCKD *const xd, int mi_row,
+                                    int mi_col, BLOCK_SIZE *min_block_size,
+                                    BLOCK_SIZE *max_block_size) {
+  VP10_COMMON *const cm = &cpi->common;
+  MODE_INFO **mi = xd->mi;
+  const int left_in_image = xd->left_available && mi[-1];
+  const int above_in_image = xd->up_available && mi[-xd->mi_stride];
+  const int mi_rows_remaining = tile->mi_row_end - mi_row;
+  const int mi_cols_remaining = tile->mi_col_end - mi_col;
+  int bh, bw;
+  BLOCK_SIZE min_size = BLOCK_4X4;
+  BLOCK_SIZE max_size = BLOCK_LARGEST;
+
+  // Trap case where we do not have a prediction.
+  if (left_in_image || above_in_image || cm->frame_type != KEY_FRAME) {
+    // Default "min to max" and "max to min"
+    min_size = BLOCK_LARGEST;
+    max_size = BLOCK_4X4;
+
+    // NOTE: each call to get_sb_partition_size_range() uses the previous
+    // passed in values for min and max as a starting point.
+    // Find the min and max partition used in previous frame at this location
+    if (cm->frame_type != KEY_FRAME) {
+      MODE_INFO **prev_mi =
+          &cm->prev_mi_grid_visible[mi_row * xd->mi_stride + mi_col];
+      get_sb_partition_size_range(cm, xd, prev_mi, &min_size, &max_size);
+    }
+    // Find the min and max partition sizes used in the left superblock
+    if (left_in_image) {
+      MODE_INFO **left_sb_mi = &mi[-cm->mib_size];
+      get_sb_partition_size_range(cm, xd, left_sb_mi, &min_size, &max_size);
+    }
+    // Find the min and max partition sizes used in the above suprblock.
+    if (above_in_image) {
+      MODE_INFO **above_sb_mi = &mi[-xd->mi_stride * cm->mib_size];
+      get_sb_partition_size_range(cm, xd, above_sb_mi, &min_size, &max_size);
+    }
+
+    // Adjust observed min and max for "relaxed" auto partition case.
+    if (cpi->sf.auto_min_max_partition_size == RELAXED_NEIGHBORING_MIN_MAX) {
+      min_size = min_partition_size[min_size];
+      max_size = max_partition_size[max_size];
+    }
+  }
+
+  // Check border cases where max and min from neighbors may not be legal.
+  max_size = find_partition_size(max_size, mi_rows_remaining, mi_cols_remaining,
+                                 &bh, &bw);
+  min_size = VPXMIN(min_size, max_size);
+
+  // Test for blocks at the edge of the active image.
+  // This may be the actual edge of the image or where there are formatting
+  // bars.
+  if (vp10_active_edge_sb(cpi, mi_row, mi_col)) {
+    min_size = BLOCK_4X4;
+  } else {
+    min_size = VPXMIN(cpi->sf.rd_auto_partition_min_limit, min_size);
+  }
+
+  // When use_square_partition_only is true, make sure at least one square
+  // partition is allowed by selecting the next smaller square size as
+  // *min_block_size.
+  if (cpi->sf.use_square_partition_only) {
+    min_size = VPXMIN(min_size, next_square_size[max_size]);
+  }
+
+  *min_block_size = VPXMIN(min_size, cm->sb_size);
+  *max_block_size = VPXMIN(max_size, cm->sb_size);
+}
+
+// TODO(jingning) refactor functions setting partition search range
+static void set_partition_range(VP10_COMMON *cm, MACROBLOCKD *xd, int mi_row,
+                                int mi_col, BLOCK_SIZE bsize,
+                                BLOCK_SIZE *min_bs, BLOCK_SIZE *max_bs) {
+  int mi_width = num_8x8_blocks_wide_lookup[bsize];
+  int mi_height = num_8x8_blocks_high_lookup[bsize];
+  int idx, idy;
+
+  MODE_INFO *mi;
+  const int idx_str = cm->mi_stride * mi_row + mi_col;
+  MODE_INFO **prev_mi = &cm->prev_mi_grid_visible[idx_str];
+  BLOCK_SIZE bs, min_size, max_size;
+
+  min_size = BLOCK_LARGEST;
+  max_size = BLOCK_4X4;
+
+  if (prev_mi) {
+    for (idy = 0; idy < mi_height; ++idy) {
+      for (idx = 0; idx < mi_width; ++idx) {
+        mi = prev_mi[idy * cm->mi_stride + idx];
+        bs = mi ? mi->mbmi.sb_type : bsize;
+        min_size = VPXMIN(min_size, bs);
+        max_size = VPXMAX(max_size, bs);
+      }
+    }
+  }
+
+  if (xd->left_available) {
+    for (idy = 0; idy < mi_height; ++idy) {
+      mi = xd->mi[idy * cm->mi_stride - 1];
+      bs = mi ? mi->mbmi.sb_type : bsize;
+      min_size = VPXMIN(min_size, bs);
+      max_size = VPXMAX(max_size, bs);
+    }
+  }
+
+  if (xd->up_available) {
+    for (idx = 0; idx < mi_width; ++idx) {
+      mi = xd->mi[idx - cm->mi_stride];
+      bs = mi ? mi->mbmi.sb_type : bsize;
+      min_size = VPXMIN(min_size, bs);
+      max_size = VPXMAX(max_size, bs);
+    }
+  }
+
+  if (min_size == max_size) {
+    min_size = min_partition_size[min_size];
+    max_size = max_partition_size[max_size];
+  }
+
+  *min_bs = VPXMIN(min_size, cm->sb_size);
+  *max_bs = VPXMIN(max_size, cm->sb_size);
+}
+
+static INLINE void store_pred_mv(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx) {
+  memcpy(ctx->pred_mv, x->pred_mv, sizeof(x->pred_mv));
+}
+
+static INLINE void load_pred_mv(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx) {
+  memcpy(x->pred_mv, ctx->pred_mv, sizeof(x->pred_mv));
+}
+
+#if CONFIG_FP_MB_STATS
+const int qindex_skip_threshold_lookup[BLOCK_SIZES] = {
+  0,
+  10,
+  10,
+  30,
+  40,
+  40,
+  60,
+  80,
+  80,
+  90,
+  100,
+  100,
+  120,
+#if CONFIG_EXT_PARTITION
+  // TODO(debargha): What are the correct numbers here?
+  130,
+  130,
+  150
+#endif  // CONFIG_EXT_PARTITION
+};
+const int qindex_split_threshold_lookup[BLOCK_SIZES] = {
+  0,
+  3,
+  3,
+  7,
+  15,
+  15,
+  30,
+  40,
+  40,
+  60,
+  80,
+  80,
+  120,
+#if CONFIG_EXT_PARTITION
+  // TODO(debargha): What are the correct numbers here?
+  160,
+  160,
+  240
+#endif  // CONFIG_EXT_PARTITION
+};
+const int complexity_16x16_blocks_threshold[BLOCK_SIZES] = {
+  1,
+  1,
+  1,
+  1,
+  1,
+  1,
+  1,
+  1,
+  1,
+  1,
+  4,
+  4,
+  6
+#if CONFIG_EXT_PARTITION
+  // TODO(debargha): What are the correct numbers here?
+  8,
+  8,
+  10
+#endif  // CONFIG_EXT_PARTITION
+};
+
+typedef enum {
+  MV_ZERO = 0,
+  MV_LEFT = 1,
+  MV_UP = 2,
+  MV_RIGHT = 3,
+  MV_DOWN = 4,
+  MV_INVALID
+} MOTION_DIRECTION;
+
+static INLINE MOTION_DIRECTION get_motion_direction_fp(uint8_t fp_byte) {
+  if (fp_byte & FPMB_MOTION_ZERO_MASK) {
+    return MV_ZERO;
+  } else if (fp_byte & FPMB_MOTION_LEFT_MASK) {
+    return MV_LEFT;
+  } else if (fp_byte & FPMB_MOTION_RIGHT_MASK) {
+    return MV_RIGHT;
+  } else if (fp_byte & FPMB_MOTION_UP_MASK) {
+    return MV_UP;
+  } else {
+    return MV_DOWN;
+  }
+}
+
+static INLINE int get_motion_inconsistency(MOTION_DIRECTION this_mv,
+                                           MOTION_DIRECTION that_mv) {
+  if (this_mv == that_mv) {
+    return 0;
+  } else {
+    return abs(this_mv - that_mv) == 2 ? 2 : 1;
+  }
+}
+#endif
+
+#if CONFIG_EXT_PARTITION_TYPES
+static void rd_test_partition3(
+    VP10_COMP *cpi, ThreadData *td, TileDataEnc *tile_data, TOKENEXTRA **tp,
+    PC_TREE *pc_tree, RD_COST *best_rdc, PICK_MODE_CONTEXT ctxs[3],
+    PICK_MODE_CONTEXT *ctx, int mi_row, int mi_col, BLOCK_SIZE bsize,
+    PARTITION_TYPE partition,
+#if CONFIG_SUPERTX
+    int64_t best_rd, int *best_rate_nocoef, RD_SEARCH_MACROBLOCK_CONTEXT *x_ctx,
+#endif
+    int mi_row0, int mi_col0, BLOCK_SIZE subsize0, int mi_row1, int mi_col1,
+    BLOCK_SIZE subsize1, int mi_row2, int mi_col2, BLOCK_SIZE subsize2) {
+  MACROBLOCK *const x = &td->mb;
+  MACROBLOCKD *const xd = &x->e_mbd;
+  RD_COST this_rdc, sum_rdc;
+#if CONFIG_SUPERTX
+  VP10_COMMON *const cm = &cpi->common;
+  TileInfo *const tile_info = &tile_data->tile_info;
+  int this_rate_nocoef, sum_rate_nocoef;
+  int abort_flag;
+  const int supertx_allowed = !frame_is_intra_only(cm) &&
+                              bsize <= MAX_SUPERTX_BLOCK_SIZE &&
+                              !xd->lossless[0];
+#endif
+  if (cpi->sf.adaptive_motion_search) load_pred_mv(x, ctx);
+
+  rd_pick_sb_modes(cpi, tile_data, x, mi_row0, mi_col0, &sum_rdc,
+#if CONFIG_SUPERTX
+                   &sum_rate_nocoef,
+#endif
+#if CONFIG_EXT_PARTITION_TYPES
+                   partition,
+#endif
+                   subsize0, &ctxs[0], best_rdc->rdcost);
+#if CONFIG_SUPERTX
+  abort_flag = sum_rdc.rdcost >= best_rd;
+#endif
+
+#if CONFIG_SUPERTX
+  if (sum_rdc.rdcost < INT64_MAX) {
+#else
+  if (sum_rdc.rdcost < best_rdc->rdcost) {
+#endif
+    PICK_MODE_CONTEXT *ctx = &ctxs[0];
+    update_state(cpi, td, ctx, mi_row0, mi_col0, subsize0, 0);
+    encode_superblock(cpi, td, tp, 0, mi_row0, mi_col0, subsize0, ctx);
+
+    if (cpi->sf.adaptive_motion_search) load_pred_mv(x, ctx);
+
+#if CONFIG_SUPERTX
+    rd_pick_sb_modes(cpi, tile_data, x, mi_row1, mi_col1, &this_rdc,
+                     &this_rate_nocoef,
+#if CONFIG_EXT_PARTITION_TYPES
+                     partition,
+#endif
+                     subsize1, &ctxs[1], INT64_MAX - sum_rdc.rdcost);
+#else
+    rd_pick_sb_modes(cpi, tile_data, x, mi_row1, mi_col1, &this_rdc,
+#if CONFIG_EXT_PARTITION_TYPES
+                     partition,
+#endif
+                     subsize1, &ctxs[1], best_rdc->rdcost - sum_rdc.rdcost);
+#endif  // CONFIG_SUPERTX
+
+    if (this_rdc.rate == INT_MAX) {
+      sum_rdc.rdcost = INT64_MAX;
+#if CONFIG_SUPERTX
+      sum_rate_nocoef = INT_MAX;
+#endif
+    } else {
+      sum_rdc.rate += this_rdc.rate;
+      sum_rdc.dist += this_rdc.dist;
+      sum_rdc.rdcost += this_rdc.rdcost;
+#if CONFIG_SUPERTX
+      sum_rate_nocoef += this_rate_nocoef;
+#endif
+    }
+
+#if CONFIG_SUPERTX
+    if (sum_rdc.rdcost < INT64_MAX) {
+#else
+    if (sum_rdc.rdcost < best_rdc->rdcost) {
+#endif
+      PICK_MODE_CONTEXT *ctx = &ctxs[1];
+      update_state(cpi, td, ctx, mi_row1, mi_col1, subsize1, 0);
+      encode_superblock(cpi, td, tp, 0, mi_row1, mi_col1, subsize1, ctx);
+
+      if (cpi->sf.adaptive_motion_search) load_pred_mv(x, ctx);
+
+#if CONFIG_SUPERTX
+      rd_pick_sb_modes(cpi, tile_data, x, mi_row2, mi_col2, &this_rdc,
+                       &this_rate_nocoef,
+#if CONFIG_EXT_PARTITION_TYPES
+                       partition,
+#endif
+                       subsize2, &ctxs[2], INT64_MAX - sum_rdc.rdcost);
+#else
+      rd_pick_sb_modes(cpi, tile_data, x, mi_row2, mi_col2, &this_rdc,
+#if CONFIG_EXT_PARTITION_TYPES
+                       partition,
+#endif
+                       subsize2, &ctxs[2], best_rdc->rdcost - sum_rdc.rdcost);
+#endif  // CONFIG_SUPERTX
+
+      if (this_rdc.rate == INT_MAX) {
+        sum_rdc.rdcost = INT64_MAX;
+#if CONFIG_SUPERTX
+        sum_rate_nocoef = INT_MAX;
+#endif
+      } else {
+        sum_rdc.rate += this_rdc.rate;
+        sum_rdc.dist += this_rdc.dist;
+        sum_rdc.rdcost += this_rdc.rdcost;
+#if CONFIG_SUPERTX
+        sum_rate_nocoef += this_rate_nocoef;
+#endif
+      }
+
+#if CONFIG_SUPERTX
+      if (supertx_allowed && !abort_flag && sum_rdc.rdcost < INT64_MAX) {
+        TX_SIZE supertx_size = max_txsize_lookup[bsize];
+        const PARTITION_TYPE best_partition = pc_tree->partitioning;
+        pc_tree->partitioning = partition;
+        sum_rdc.rate += vp10_cost_bit(
+            cm->fc->supertx_prob[partition_supertx_context_lookup[partition]]
+                                [supertx_size],
+            0);
+        sum_rdc.rdcost =
+            RDCOST(x->rdmult, x->rddiv, sum_rdc.rate, sum_rdc.dist);
+
+        if (!check_intra_sb(cpi, tile_info, mi_row, mi_col, bsize, pc_tree)) {
+          TX_TYPE best_tx = DCT_DCT;
+          RD_COST tmp_rdc = { sum_rate_nocoef, 0, 0 };
+
+          restore_context(x, x_ctx, mi_row, mi_col, bsize);
+
+          rd_supertx_sb(cpi, td, tile_info, mi_row, mi_col, bsize,
+                        &tmp_rdc.rate, &tmp_rdc.dist, &best_tx, pc_tree);
+
+          tmp_rdc.rate += vp10_cost_bit(
+              cm->fc->supertx_prob[partition_supertx_context_lookup[partition]]
+                                  [supertx_size],
+              1);
+          tmp_rdc.rdcost =
+              RDCOST(x->rdmult, x->rddiv, tmp_rdc.rate, tmp_rdc.dist);
+          if (tmp_rdc.rdcost < sum_rdc.rdcost) {
+            sum_rdc = tmp_rdc;
+            update_supertx_param_sb(cpi, td, mi_row, mi_col, bsize, best_tx,
+                                    supertx_size, pc_tree);
+          }
+        }
+
+        pc_tree->partitioning = best_partition;
+      }
+#endif  // CONFIG_SUPERTX
+
+      if (sum_rdc.rdcost < best_rdc->rdcost) {
+        int pl = partition_plane_context(xd, mi_row, mi_col, bsize);
+        sum_rdc.rate += cpi->partition_cost[pl][partition];
+        sum_rdc.rdcost =
+            RDCOST(x->rdmult, x->rddiv, sum_rdc.rate, sum_rdc.dist);
+#if CONFIG_SUPERTX
+        sum_rate_nocoef += cpi->partition_cost[pl][partition];
+#endif
+        if (sum_rdc.rdcost < best_rdc->rdcost) {
+#if CONFIG_SUPERTX
+          *best_rate_nocoef = sum_rate_nocoef;
+          assert(*best_rate_nocoef >= 0);
+#endif
+          *best_rdc = sum_rdc;
+          pc_tree->partitioning = partition;
+        }
+      }
+    }
+  }
+}
+#endif  // CONFIG_EXT_PARTITION_TYPES
+
+// TODO(jingning,jimbankoski,rbultje): properly skip partition types that are
+// unlikely to be selected depending on previous rate-distortion optimization
+// results, for encoding speed-up.
+static void rd_pick_partition(VP10_COMP *cpi, ThreadData *td,
+                              TileDataEnc *tile_data, TOKENEXTRA **tp,
+                              int mi_row, int mi_col, BLOCK_SIZE bsize,
+                              RD_COST *rd_cost,
+#if CONFIG_SUPERTX
+                              int *rate_nocoef,
+#endif
+                              int64_t best_rd, PC_TREE *pc_tree) {
+  VP10_COMMON *const cm = &cpi->common;
+  TileInfo *const tile_info = &tile_data->tile_info;
+  MACROBLOCK *const x = &td->mb;
+  MACROBLOCKD *const xd = &x->e_mbd;
+  const int mi_step = num_8x8_blocks_wide_lookup[bsize] / 2;
+  RD_SEARCH_MACROBLOCK_CONTEXT x_ctx;
+  TOKENEXTRA *tp_orig = *tp;
+  PICK_MODE_CONTEXT *ctx = &pc_tree->none;
+  int i;
+  const int pl = partition_plane_context(xd, mi_row, mi_col, bsize);
+  int *partition_cost = cpi->partition_cost[pl];
+  int tmp_partition_cost[PARTITION_TYPES];
+  BLOCK_SIZE subsize;
+  RD_COST this_rdc, sum_rdc, best_rdc;
+#if CONFIG_SUPERTX
+  int this_rate_nocoef, sum_rate_nocoef = 0, best_rate_nocoef = INT_MAX;
+  int abort_flag;
+  const int supertx_allowed = !frame_is_intra_only(cm) &&
+                              bsize <= MAX_SUPERTX_BLOCK_SIZE &&
+                              !xd->lossless[0];
+#endif  // CONFIG_SUPERTX
+  int do_split = bsize >= BLOCK_8X8;
+  int do_rect = 1;
+#if CONFIG_EXT_PARTITION_TYPES
+  BLOCK_SIZE bsize2 = get_subsize(bsize, PARTITION_SPLIT);
+#endif
+
+  // Override skipping rectangular partition operations for edge blocks
+  const int force_horz_split = (mi_row + mi_step >= cm->mi_rows);
+  const int force_vert_split = (mi_col + mi_step >= cm->mi_cols);
+  const int xss = x->e_mbd.plane[1].subsampling_x;
+  const int yss = x->e_mbd.plane[1].subsampling_y;
+
+  BLOCK_SIZE min_size = x->min_partition_size;
+  BLOCK_SIZE max_size = x->max_partition_size;
+
+#if CONFIG_FP_MB_STATS
+  unsigned int src_diff_var = UINT_MAX;
+  int none_complexity = 0;
+#endif
+
+  int partition_none_allowed = !force_horz_split && !force_vert_split;
+  int partition_horz_allowed =
+      !force_vert_split && yss <= xss && bsize >= BLOCK_8X8;
+  int partition_vert_allowed =
+      !force_horz_split && xss <= yss && bsize >= BLOCK_8X8;
+  (void)*tp_orig;
+
+  if (force_horz_split || force_vert_split) {
+    tmp_partition_cost[PARTITION_NONE] = INT_MAX;
+
+    if (!force_vert_split) {  // force_horz_split only
+      tmp_partition_cost[PARTITION_VERT] = INT_MAX;
+      tmp_partition_cost[PARTITION_HORZ] =
+          vp10_cost_bit(cm->fc->partition_prob[pl][PARTITION_HORZ], 0);
+      tmp_partition_cost[PARTITION_SPLIT] =
+          vp10_cost_bit(cm->fc->partition_prob[pl][PARTITION_HORZ], 1);
+    } else if (!force_horz_split) {  // force_vert_split only
+      tmp_partition_cost[PARTITION_HORZ] = INT_MAX;
+      tmp_partition_cost[PARTITION_VERT] =
+          vp10_cost_bit(cm->fc->partition_prob[pl][PARTITION_VERT], 0);
+      tmp_partition_cost[PARTITION_SPLIT] =
+          vp10_cost_bit(cm->fc->partition_prob[pl][PARTITION_VERT], 1);
+    } else {  // force_ horz_split && force_vert_split horz_split
+      tmp_partition_cost[PARTITION_HORZ] = INT_MAX;
+      tmp_partition_cost[PARTITION_VERT] = INT_MAX;
+      tmp_partition_cost[PARTITION_SPLIT] = 0;
+    }
+
+    partition_cost = tmp_partition_cost;
+  }
+
+#if CONFIG_VAR_TX
+#ifndef NDEBUG
+  // Nothing should rely on the default value of this array (which is just
+  // leftover from encoding the previous block. Setting it to magic number
+  // when debugging.
+  memset(x->blk_skip[0], 234, sizeof(x->blk_skip[0]));
+#endif  // NDEBUG
+#endif  // CONFIG_VAR_TX
+
+  assert(num_8x8_blocks_wide_lookup[bsize] ==
+         num_8x8_blocks_high_lookup[bsize]);
+
+  vp10_rd_cost_init(&this_rdc);
+  vp10_rd_cost_init(&sum_rdc);
+  vp10_rd_cost_reset(&best_rdc);
+  best_rdc.rdcost = best_rd;
+
+  set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize);
+
+  if (bsize == BLOCK_16X16 && cpi->vaq_refresh)
+    x->mb_energy = vp10_block_energy(cpi, x, bsize);
+
+  if (cpi->sf.cb_partition_search && bsize == BLOCK_16X16) {
+    int cb_partition_search_ctrl =
+        ((pc_tree->index == 0 || pc_tree->index == 3) +
+         get_chessboard_index(cm->current_video_frame)) &
+        0x1;
+
+    if (cb_partition_search_ctrl && bsize > min_size && bsize < max_size)
+      set_partition_range(cm, xd, mi_row, mi_col, bsize, &min_size, &max_size);
+  }
+
+  // Determine partition types in search according to the speed features.
+  // The threshold set here has to be of square block size.
+  if (cpi->sf.auto_min_max_partition_size) {
+    partition_none_allowed &= (bsize <= max_size && bsize >= min_size);
+    partition_horz_allowed &=
+        ((bsize <= max_size && bsize > min_size) || force_horz_split);
+    partition_vert_allowed &=
+        ((bsize <= max_size && bsize > min_size) || force_vert_split);
+    do_split &= bsize > min_size;
+  }
+  if (cpi->sf.use_square_partition_only) {
+    partition_horz_allowed &= force_horz_split;
+    partition_vert_allowed &= force_vert_split;
+  }
+
+#if CONFIG_VAR_TX
+  xd->above_txfm_context = cm->above_txfm_context + mi_col;
+  xd->left_txfm_context =
+      xd->left_txfm_context_buffer + (mi_row & MAX_MIB_MASK);
+#endif
+
+  save_context(x, &x_ctx, mi_row, mi_col, bsize);
+
+#if CONFIG_FP_MB_STATS
+  if (cpi->use_fp_mb_stats) {
+    set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize);
+    src_diff_var = get_sby_perpixel_diff_variance(cpi, &x->plane[0].src, mi_row,
+                                                  mi_col, bsize);
+  }
+#endif
+
+#if CONFIG_FP_MB_STATS
+  // Decide whether we shall split directly and skip searching NONE by using
+  // the first pass block statistics
+  if (cpi->use_fp_mb_stats && bsize >= BLOCK_32X32 && do_split &&
+      partition_none_allowed && src_diff_var > 4 &&
+      cm->base_qindex < qindex_split_threshold_lookup[bsize]) {
+    int mb_row = mi_row >> 1;
+    int mb_col = mi_col >> 1;
+    int mb_row_end =
+        VPXMIN(mb_row + num_16x16_blocks_high_lookup[bsize], cm->mb_rows);
+    int mb_col_end =
+        VPXMIN(mb_col + num_16x16_blocks_wide_lookup[bsize], cm->mb_cols);
+    int r, c;
+
+    // compute a complexity measure, basically measure inconsistency of motion
+    // vectors obtained from the first pass in the current block
+    for (r = mb_row; r < mb_row_end; r++) {
+      for (c = mb_col; c < mb_col_end; c++) {
+        const int mb_index = r * cm->mb_cols + c;
+
+        MOTION_DIRECTION this_mv;
+        MOTION_DIRECTION right_mv;
+        MOTION_DIRECTION bottom_mv;
+
+        this_mv =
+            get_motion_direction_fp(cpi->twopass.this_frame_mb_stats[mb_index]);
+
+        // to its right
+        if (c != mb_col_end - 1) {
+          right_mv = get_motion_direction_fp(
+              cpi->twopass.this_frame_mb_stats[mb_index + 1]);
+          none_complexity += get_motion_inconsistency(this_mv, right_mv);
+        }
+
+        // to its bottom
+        if (r != mb_row_end - 1) {
+          bottom_mv = get_motion_direction_fp(
+              cpi->twopass.this_frame_mb_stats[mb_index + cm->mb_cols]);
+          none_complexity += get_motion_inconsistency(this_mv, bottom_mv);
+        }
+
+        // do not count its left and top neighbors to avoid double counting
+      }
+    }
+
+    if (none_complexity > complexity_16x16_blocks_threshold[bsize]) {
+      partition_none_allowed = 0;
+    }
+  }
+#endif
+
+  // PARTITION_NONE
+  if (partition_none_allowed) {
+    rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &this_rdc,
+#if CONFIG_SUPERTX
+                     &this_rate_nocoef,
+#endif
+#if CONFIG_EXT_PARTITION_TYPES
+                     PARTITION_NONE,
+#endif
+                     bsize, ctx, best_rdc.rdcost);
+    if (this_rdc.rate != INT_MAX) {
+      if (bsize >= BLOCK_8X8) {
+        this_rdc.rate += partition_cost[PARTITION_NONE];
+        this_rdc.rdcost =
+            RDCOST(x->rdmult, x->rddiv, this_rdc.rate, this_rdc.dist);
+#if CONFIG_SUPERTX
+        this_rate_nocoef += partition_cost[PARTITION_NONE];
+#endif
+      }
+
+      if (this_rdc.rdcost < best_rdc.rdcost) {
+        int64_t dist_breakout_thr = cpi->sf.partition_search_breakout_dist_thr;
+        int rate_breakout_thr = cpi->sf.partition_search_breakout_rate_thr;
+
+        best_rdc = this_rdc;
+#if CONFIG_SUPERTX
+        best_rate_nocoef = this_rate_nocoef;
+        assert(best_rate_nocoef >= 0);
+#endif
+        if (bsize >= BLOCK_8X8) pc_tree->partitioning = PARTITION_NONE;
+
+        // Adjust dist breakout threshold according to the partition size.
+        dist_breakout_thr >>=
+            (2 * (MAX_SB_SIZE_LOG2 - 2)) -
+            (b_width_log2_lookup[bsize] + b_height_log2_lookup[bsize]);
+
+        rate_breakout_thr *= num_pels_log2_lookup[bsize];
+
+        // If all y, u, v transform blocks in this partition are skippable, and
+        // the dist & rate are within the thresholds, the partition search is
+        // terminated for current branch of the partition search tree.
+        // The dist & rate thresholds are set to 0 at speed 0 to disable the
+        // early termination at that speed.
+        if (!x->e_mbd.lossless[xd->mi[0]->mbmi.segment_id] &&
+            (ctx->skippable && best_rdc.dist < dist_breakout_thr &&
+             best_rdc.rate < rate_breakout_thr)) {
+          do_split = 0;
+          do_rect = 0;
+        }
+
+#if CONFIG_FP_MB_STATS
+        // Check if every 16x16 first pass block statistics has zero
+        // motion and the corresponding first pass residue is small enough.
+        // If that is the case, check the difference variance between the
+        // current frame and the last frame. If the variance is small enough,
+        // stop further splitting in RD optimization
+        if (cpi->use_fp_mb_stats && do_split != 0 &&
+            cm->base_qindex > qindex_skip_threshold_lookup[bsize]) {
+          int mb_row = mi_row >> 1;
+          int mb_col = mi_col >> 1;
+          int mb_row_end =
+              VPXMIN(mb_row + num_16x16_blocks_high_lookup[bsize], cm->mb_rows);
+          int mb_col_end =
+              VPXMIN(mb_col + num_16x16_blocks_wide_lookup[bsize], cm->mb_cols);
+          int r, c;
+
+          int skip = 1;
+          for (r = mb_row; r < mb_row_end; r++) {
+            for (c = mb_col; c < mb_col_end; c++) {
+              const int mb_index = r * cm->mb_cols + c;
+              if (!(cpi->twopass.this_frame_mb_stats[mb_index] &
+                    FPMB_MOTION_ZERO_MASK) ||
+                  !(cpi->twopass.this_frame_mb_stats[mb_index] &
+                    FPMB_ERROR_SMALL_MASK)) {
+                skip = 0;
+                break;
+              }
+            }
+            if (skip == 0) {
+              break;
+            }
+          }
+          if (skip) {
+            if (src_diff_var == UINT_MAX) {
+              set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize);
+              src_diff_var = get_sby_perpixel_diff_variance(
+                  cpi, &x->plane[0].src, mi_row, mi_col, bsize);
+            }
+            if (src_diff_var < 8) {
+              do_split = 0;
+              do_rect = 0;
+            }
+          }
+        }
+#endif
+      }
+    }
+
+    restore_context(x, &x_ctx, mi_row, mi_col, bsize);
+  }
+
+  // store estimated motion vector
+  if (cpi->sf.adaptive_motion_search) store_pred_mv(x, ctx);
+
+  // PARTITION_SPLIT
+  // TODO(jingning): use the motion vectors given by the above search as
+  // the starting point of motion search in the following partition type check.
+  if (do_split) {
+    subsize = get_subsize(bsize, PARTITION_SPLIT);
+    if (bsize == BLOCK_8X8) {
+      i = 4;
+#if CONFIG_DUAL_FILTER
+      if (cpi->sf.adaptive_pred_interp_filter && partition_none_allowed)
+        pc_tree->leaf_split[0]->pred_interp_filter =
+            ctx->mic.mbmi.interp_filter[0];
+#else
+      if (cpi->sf.adaptive_pred_interp_filter && partition_none_allowed)
+        pc_tree->leaf_split[0]->pred_interp_filter =
+            ctx->mic.mbmi.interp_filter;
+#endif
+#if CONFIG_SUPERTX
+      rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc,
+                       &sum_rate_nocoef,
+#if CONFIG_EXT_PARTITION_TYPES
+                       PARTITION_SPLIT,
+#endif
+                       subsize, pc_tree->leaf_split[0], INT64_MAX);
+#else
+      rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc,
+#if CONFIG_EXT_PARTITION_TYPES
+                       PARTITION_SPLIT,
+#endif
+                       subsize, pc_tree->leaf_split[0], best_rdc.rdcost);
+#endif  // CONFIG_SUPERTX
+      if (sum_rdc.rate == INT_MAX) {
+        sum_rdc.rdcost = INT64_MAX;
+#if CONFIG_SUPERTX
+        sum_rate_nocoef = INT_MAX;
+#endif
+      }
+#if CONFIG_SUPERTX
+      if (supertx_allowed && sum_rdc.rdcost < INT64_MAX) {
+        TX_SIZE supertx_size = max_txsize_lookup[bsize];
+        const PARTITION_TYPE best_partition = pc_tree->partitioning;
+
+        pc_tree->partitioning = PARTITION_SPLIT;
+
+        sum_rdc.rate += vp10_cost_bit(
+            cm->fc->supertx_prob[partition_supertx_context_lookup
+                                     [PARTITION_SPLIT]][supertx_size],
+            0);
+        sum_rdc.rdcost =
+            RDCOST(x->rdmult, x->rddiv, sum_rdc.rate, sum_rdc.dist);
+
+        if (is_inter_mode(pc_tree->leaf_split[0]->mic.mbmi.mode)) {
+          TX_TYPE best_tx = DCT_DCT;
+          RD_COST tmp_rdc = { sum_rate_nocoef, 0, 0 };
+
+          restore_context(x, &x_ctx, mi_row, mi_col, bsize);
+
+          rd_supertx_sb(cpi, td, tile_info, mi_row, mi_col, bsize,
+                        &tmp_rdc.rate, &tmp_rdc.dist, &best_tx, pc_tree);
+
+          tmp_rdc.rate += vp10_cost_bit(
+              cm->fc->supertx_prob[partition_supertx_context_lookup
+                                       [PARTITION_SPLIT]][supertx_size],
+              1);
+          tmp_rdc.rdcost =
+              RDCOST(x->rdmult, x->rddiv, tmp_rdc.rate, tmp_rdc.dist);
+          if (tmp_rdc.rdcost < sum_rdc.rdcost) {
+            sum_rdc = tmp_rdc;
+            update_supertx_param_sb(cpi, td, mi_row, mi_col, bsize, best_tx,
+                                    supertx_size, pc_tree);
+          }
+        }
+
+        pc_tree->partitioning = best_partition;
+      }
+#endif  // CONFIG_SUPERTX
+    } else {
+#if CONFIG_SUPERTX
+      for (i = 0; i < 4 && sum_rdc.rdcost < INT64_MAX; ++i) {
+#else
+      for (i = 0; i < 4 && sum_rdc.rdcost < best_rdc.rdcost; ++i) {
+#endif  // CONFIG_SUPERTX
+        const int x_idx = (i & 1) * mi_step;
+        const int y_idx = (i >> 1) * mi_step;
+
+        if (mi_row + y_idx >= cm->mi_rows || mi_col + x_idx >= cm->mi_cols)
+          continue;
+
+        if (cpi->sf.adaptive_motion_search) load_pred_mv(x, ctx);
+
+        pc_tree->split[i]->index = i;
+#if CONFIG_SUPERTX
+        rd_pick_partition(cpi, td, tile_data, tp, mi_row + y_idx,
+                          mi_col + x_idx, subsize, &this_rdc, &this_rate_nocoef,
+                          INT64_MAX - sum_rdc.rdcost, pc_tree->split[i]);
+#else
+        rd_pick_partition(cpi, td, tile_data, tp, mi_row + y_idx,
+                          mi_col + x_idx, subsize, &this_rdc,
+                          best_rdc.rdcost - sum_rdc.rdcost, pc_tree->split[i]);
+#endif  // CONFIG_SUPERTX
+
+        if (this_rdc.rate == INT_MAX) {
+          sum_rdc.rdcost = INT64_MAX;
+#if CONFIG_SUPERTX
+          sum_rate_nocoef = INT_MAX;
+#endif  // CONFIG_SUPERTX
+          break;
+        } else {
+          sum_rdc.rate += this_rdc.rate;
+          sum_rdc.dist += this_rdc.dist;
+          sum_rdc.rdcost += this_rdc.rdcost;
+#if CONFIG_SUPERTX
+          sum_rate_nocoef += this_rate_nocoef;
+#endif  // CONFIG_SUPERTX
+        }
+      }
+#if CONFIG_SUPERTX
+      if (supertx_allowed && sum_rdc.rdcost < INT64_MAX && i == 4) {
+        TX_SIZE supertx_size = max_txsize_lookup[bsize];
+        const PARTITION_TYPE best_partition = pc_tree->partitioning;
+
+        pc_tree->partitioning = PARTITION_SPLIT;
+
+        sum_rdc.rate += vp10_cost_bit(
+            cm->fc->supertx_prob[partition_supertx_context_lookup
+                                     [PARTITION_SPLIT]][supertx_size],
+            0);
+        sum_rdc.rdcost =
+            RDCOST(x->rdmult, x->rddiv, sum_rdc.rate, sum_rdc.dist);
+
+        if (!check_intra_sb(cpi, tile_info, mi_row, mi_col, bsize, pc_tree)) {
+          TX_TYPE best_tx = DCT_DCT;
+          RD_COST tmp_rdc = { sum_rate_nocoef, 0, 0 };
+
+          restore_context(x, &x_ctx, mi_row, mi_col, bsize);
+
+          rd_supertx_sb(cpi, td, tile_info, mi_row, mi_col, bsize,
+                        &tmp_rdc.rate, &tmp_rdc.dist, &best_tx, pc_tree);
+
+          tmp_rdc.rate += vp10_cost_bit(
+              cm->fc->supertx_prob[partition_supertx_context_lookup
+                                       [PARTITION_SPLIT]][supertx_size],
+              1);
+          tmp_rdc.rdcost =
+              RDCOST(x->rdmult, x->rddiv, tmp_rdc.rate, tmp_rdc.dist);
+          if (tmp_rdc.rdcost < sum_rdc.rdcost) {
+            sum_rdc = tmp_rdc;
+            update_supertx_param_sb(cpi, td, mi_row, mi_col, bsize, best_tx,
+                                    supertx_size, pc_tree);
+          }
+        }
+
+        pc_tree->partitioning = best_partition;
+      }
+#endif  // CONFIG_SUPERTX
+    }
+
+    if (sum_rdc.rdcost < best_rdc.rdcost && i == 4) {
+      sum_rdc.rate += partition_cost[PARTITION_SPLIT];
+      sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, sum_rdc.rate, sum_rdc.dist);
+#if CONFIG_SUPERTX
+      sum_rate_nocoef += partition_cost[PARTITION_SPLIT];
+#endif  // CONFIG_SUPERTX
+
+      if (sum_rdc.rdcost < best_rdc.rdcost) {
+        best_rdc = sum_rdc;
+#if CONFIG_SUPERTX
+        best_rate_nocoef = sum_rate_nocoef;
+        assert(best_rate_nocoef >= 0);
+#endif  // CONFIG_SUPERTX
+        pc_tree->partitioning = PARTITION_SPLIT;
+      }
+    } else {
+      // skip rectangular partition test when larger block size
+      // gives better rd cost
+      if (cpi->sf.less_rectangular_check) do_rect &= !partition_none_allowed;
+    }
+
+    restore_context(x, &x_ctx, mi_row, mi_col, bsize);
+  }  // if (do_split)
+
+  // PARTITION_HORZ
+  if (partition_horz_allowed &&
+      (do_rect || vp10_active_h_edge(cpi, mi_row, mi_step))) {
+    subsize = get_subsize(bsize, PARTITION_HORZ);
+    if (cpi->sf.adaptive_motion_search) load_pred_mv(x, ctx);
+#if CONFIG_DUAL_FILTER
+    if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
+        partition_none_allowed)
+      pc_tree->horizontal[0].pred_interp_filter =
+          ctx->mic.mbmi.interp_filter[0];
+#else
+    if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
+        partition_none_allowed)
+      pc_tree->horizontal[0].pred_interp_filter = ctx->mic.mbmi.interp_filter;
+#endif
+    rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc,
+#if CONFIG_SUPERTX
+                     &sum_rate_nocoef,
+#endif  // CONFIG_SUPERTX
+#if CONFIG_EXT_PARTITION_TYPES
+                     PARTITION_HORZ,
+#endif
+                     subsize, &pc_tree->horizontal[0], best_rdc.rdcost);
+
+#if CONFIG_SUPERTX
+    abort_flag = (sum_rdc.rdcost >= best_rd && bsize > BLOCK_8X8) ||
+                 (sum_rdc.rate == INT_MAX && bsize == BLOCK_8X8);
+    if (sum_rdc.rdcost < INT64_MAX &&
+#else
+    if (sum_rdc.rdcost < best_rdc.rdcost &&
+#endif  // CONFIG_SUPERTX
+        mi_row + mi_step < cm->mi_rows && bsize > BLOCK_8X8) {
+      PICK_MODE_CONTEXT *ctx = &pc_tree->horizontal[0];
+      update_state(cpi, td, ctx, mi_row, mi_col, subsize, 0);
+      encode_superblock(cpi, td, tp, 0, mi_row, mi_col, subsize, ctx);
+
+      if (cpi->sf.adaptive_motion_search) load_pred_mv(x, ctx);
+
+#if CONFIG_DUAL_FILTER
+      if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
+          partition_none_allowed)
+        pc_tree->horizontal[1].pred_interp_filter =
+            ctx->mic.mbmi.interp_filter[0];
+#else
+      if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
+          partition_none_allowed)
+        pc_tree->horizontal[1].pred_interp_filter = ctx->mic.mbmi.interp_filter;
+#endif
+#if CONFIG_SUPERTX
+      rd_pick_sb_modes(cpi, tile_data, x, mi_row + mi_step, mi_col, &this_rdc,
+                       &this_rate_nocoef,
+#if CONFIG_EXT_PARTITION_TYPES
+                       PARTITION_HORZ,
+#endif
+                       subsize, &pc_tree->horizontal[1], INT64_MAX);
+#else
+      rd_pick_sb_modes(cpi, tile_data, x, mi_row + mi_step, mi_col, &this_rdc,
+#if CONFIG_EXT_PARTITION_TYPES
+                       PARTITION_HORZ,
+#endif
+                       subsize, &pc_tree->horizontal[1],
+                       best_rdc.rdcost - sum_rdc.rdcost);
+#endif  // CONFIG_SUPERTX
+      if (this_rdc.rate == INT_MAX) {
+        sum_rdc.rdcost = INT64_MAX;
+#if CONFIG_SUPERTX
+        sum_rate_nocoef = INT_MAX;
+#endif  // CONFIG_SUPERTX
+      } else {
+        sum_rdc.rate += this_rdc.rate;
+        sum_rdc.dist += this_rdc.dist;
+        sum_rdc.rdcost += this_rdc.rdcost;
+#if CONFIG_SUPERTX
+        sum_rate_nocoef += this_rate_nocoef;
+#endif  // CONFIG_SUPERTX
+      }
+    }
+
+#if CONFIG_SUPERTX
+    if (supertx_allowed && sum_rdc.rdcost < INT64_MAX && !abort_flag) {
+      TX_SIZE supertx_size = max_txsize_lookup[bsize];
+      const PARTITION_TYPE best_partition = pc_tree->partitioning;
+
+      pc_tree->partitioning = PARTITION_HORZ;
+
+      sum_rdc.rate += vp10_cost_bit(
+          cm->fc->supertx_prob[partition_supertx_context_lookup[PARTITION_HORZ]]
+                              [supertx_size],
+          0);
+      sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, sum_rdc.rate, sum_rdc.dist);
+
+      if (!check_intra_sb(cpi, tile_info, mi_row, mi_col, bsize, pc_tree)) {
+        TX_TYPE best_tx = DCT_DCT;
+        RD_COST tmp_rdc = { sum_rate_nocoef, 0, 0 };
+
+        restore_context(x, &x_ctx, mi_row, mi_col, bsize);
+
+        rd_supertx_sb(cpi, td, tile_info, mi_row, mi_col, bsize, &tmp_rdc.rate,
+                      &tmp_rdc.dist, &best_tx, pc_tree);
+
+        tmp_rdc.rate += vp10_cost_bit(
+            cm->fc
+                ->supertx_prob[partition_supertx_context_lookup[PARTITION_HORZ]]
+                              [supertx_size],
+            1);
+        tmp_rdc.rdcost =
+            RDCOST(x->rdmult, x->rddiv, tmp_rdc.rate, tmp_rdc.dist);
+        if (tmp_rdc.rdcost < sum_rdc.rdcost) {
+          sum_rdc = tmp_rdc;
+          update_supertx_param_sb(cpi, td, mi_row, mi_col, bsize, best_tx,
+                                  supertx_size, pc_tree);
+        }
+      }
+
+      pc_tree->partitioning = best_partition;
+    }
+#endif  // CONFIG_SUPERTX
+
+    if (sum_rdc.rdcost < best_rdc.rdcost) {
+      sum_rdc.rate += partition_cost[PARTITION_HORZ];
+      sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, sum_rdc.rate, sum_rdc.dist);
+#if CONFIG_SUPERTX
+      sum_rate_nocoef += partition_cost[PARTITION_HORZ];
+#endif  // CONFIG_SUPERTX
+      if (sum_rdc.rdcost < best_rdc.rdcost) {
+        best_rdc = sum_rdc;
+#if CONFIG_SUPERTX
+        best_rate_nocoef = sum_rate_nocoef;
+        assert(best_rate_nocoef >= 0);
+#endif  // CONFIG_SUPERTX
+        pc_tree->partitioning = PARTITION_HORZ;
+      }
+    }
+
+    restore_context(x, &x_ctx, mi_row, mi_col, bsize);
+  }
+
+  // PARTITION_VERT
+  if (partition_vert_allowed &&
+      (do_rect || vp10_active_v_edge(cpi, mi_col, mi_step))) {
+    subsize = get_subsize(bsize, PARTITION_VERT);
+
+    if (cpi->sf.adaptive_motion_search) load_pred_mv(x, ctx);
+
+#if CONFIG_DUAL_FILTER
+    if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
+        partition_none_allowed)
+      pc_tree->vertical[0].pred_interp_filter = ctx->mic.mbmi.interp_filter[0];
+#else
+    if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
+        partition_none_allowed)
+      pc_tree->vertical[0].pred_interp_filter = ctx->mic.mbmi.interp_filter;
+#endif
+    rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc,
+#if CONFIG_SUPERTX
+                     &sum_rate_nocoef,
+#endif  // CONFIG_SUPERTX
+#if CONFIG_EXT_PARTITION_TYPES
+                     PARTITION_VERT,
+#endif
+                     subsize, &pc_tree->vertical[0], best_rdc.rdcost);
+#if CONFIG_SUPERTX
+    abort_flag = (sum_rdc.rdcost >= best_rd && bsize > BLOCK_8X8) ||
+                 (sum_rdc.rate == INT_MAX && bsize == BLOCK_8X8);
+    if (sum_rdc.rdcost < INT64_MAX &&
+#else
+    if (sum_rdc.rdcost < best_rdc.rdcost &&
+#endif  // CONFIG_SUPERTX
+        mi_col + mi_step < cm->mi_cols && bsize > BLOCK_8X8) {
+      update_state(cpi, td, &pc_tree->vertical[0], mi_row, mi_col, subsize, 0);
+      encode_superblock(cpi, td, tp, 0, mi_row, mi_col, subsize,
+                        &pc_tree->vertical[0]);
+
+      if (cpi->sf.adaptive_motion_search) load_pred_mv(x, ctx);
+
+#if CONFIG_DUAL_FILTER
+      if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
+          partition_none_allowed)
+        pc_tree->vertical[1].pred_interp_filter =
+            ctx->mic.mbmi.interp_filter[0];
+#else
+      if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
+          partition_none_allowed)
+        pc_tree->vertical[1].pred_interp_filter = ctx->mic.mbmi.interp_filter;
+#endif
+#if CONFIG_SUPERTX
+      rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + mi_step, &this_rdc,
+                       &this_rate_nocoef,
+#if CONFIG_EXT_PARTITION_TYPES
+                       PARTITION_VERT,
+#endif
+                       subsize, &pc_tree->vertical[1],
+                       INT64_MAX - sum_rdc.rdcost);
+#else
+      rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + mi_step, &this_rdc,
+#if CONFIG_EXT_PARTITION_TYPES
+                       PARTITION_VERT,
+#endif
+                       subsize, &pc_tree->vertical[1],
+                       best_rdc.rdcost - sum_rdc.rdcost);
+#endif  // CONFIG_SUPERTX
+      if (this_rdc.rate == INT_MAX) {
+        sum_rdc.rdcost = INT64_MAX;
+#if CONFIG_SUPERTX
+        sum_rate_nocoef = INT_MAX;
+#endif  // CONFIG_SUPERTX
+      } else {
+        sum_rdc.rate += this_rdc.rate;
+        sum_rdc.dist += this_rdc.dist;
+        sum_rdc.rdcost += this_rdc.rdcost;
+#if CONFIG_SUPERTX
+        sum_rate_nocoef += this_rate_nocoef;
+#endif  // CONFIG_SUPERTX
+      }
+    }
+#if CONFIG_SUPERTX
+    if (supertx_allowed && sum_rdc.rdcost < INT64_MAX && !abort_flag) {
+      TX_SIZE supertx_size = max_txsize_lookup[bsize];
+      const PARTITION_TYPE best_partition = pc_tree->partitioning;
+
+      pc_tree->partitioning = PARTITION_VERT;
+
+      sum_rdc.rate += vp10_cost_bit(
+          cm->fc->supertx_prob[partition_supertx_context_lookup[PARTITION_VERT]]
+                              [supertx_size],
+          0);
+      sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, sum_rdc.rate, sum_rdc.dist);
+
+      if (!check_intra_sb(cpi, tile_info, mi_row, mi_col, bsize, pc_tree)) {
+        TX_TYPE best_tx = DCT_DCT;
+        RD_COST tmp_rdc = { sum_rate_nocoef, 0, 0 };
+
+        restore_context(x, &x_ctx, mi_row, mi_col, bsize);
+
+        rd_supertx_sb(cpi, td, tile_info, mi_row, mi_col, bsize, &tmp_rdc.rate,
+                      &tmp_rdc.dist, &best_tx, pc_tree);
+
+        tmp_rdc.rate += vp10_cost_bit(
+            cm->fc
+                ->supertx_prob[partition_supertx_context_lookup[PARTITION_VERT]]
+                              [supertx_size],
+            1);
+        tmp_rdc.rdcost =
+            RDCOST(x->rdmult, x->rddiv, tmp_rdc.rate, tmp_rdc.dist);
+        if (tmp_rdc.rdcost < sum_rdc.rdcost) {
+          sum_rdc = tmp_rdc;
+          update_supertx_param_sb(cpi, td, mi_row, mi_col, bsize, best_tx,
+                                  supertx_size, pc_tree);
+        }
+      }
+
+      pc_tree->partitioning = best_partition;
+    }
+#endif  // CONFIG_SUPERTX
+
+    if (sum_rdc.rdcost < best_rdc.rdcost) {
+      sum_rdc.rate += partition_cost[PARTITION_VERT];
+      sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, sum_rdc.rate, sum_rdc.dist);
+#if CONFIG_SUPERTX
+      sum_rate_nocoef += partition_cost[PARTITION_VERT];
+#endif  // CONFIG_SUPERTX
+      if (sum_rdc.rdcost < best_rdc.rdcost) {
+        best_rdc = sum_rdc;
+#if CONFIG_SUPERTX
+        best_rate_nocoef = sum_rate_nocoef;
+        assert(best_rate_nocoef >= 0);
+#endif  // CONFIG_SUPERTX
+        pc_tree->partitioning = PARTITION_VERT;
+      }
+    }
+    restore_context(x, &x_ctx, mi_row, mi_col, bsize);
+  }
+
+#if CONFIG_EXT_PARTITION_TYPES
+  // PARTITION_HORZ_A
+  if (partition_horz_allowed && do_rect && bsize > BLOCK_8X8 &&
+      partition_none_allowed) {
+    subsize = get_subsize(bsize, PARTITION_HORZ_A);
+    rd_test_partition3(cpi, td, tile_data, tp, pc_tree, &best_rdc,
+                       pc_tree->horizontala, ctx, mi_row, mi_col, bsize,
+                       PARTITION_HORZ_A,
+#if CONFIG_SUPERTX
+                       best_rd, &best_rate_nocoef, &x_ctx,
+#endif
+                       mi_row, mi_col, bsize2, mi_row, mi_col + mi_step, bsize2,
+                       mi_row + mi_step, mi_col, subsize);
+    restore_context(x, &x_ctx, mi_row, mi_col, bsize);
+  }
+  // PARTITION_HORZ_B
+  if (partition_horz_allowed && do_rect && bsize > BLOCK_8X8 &&
+      partition_none_allowed) {
+    subsize = get_subsize(bsize, PARTITION_HORZ_B);
+    rd_test_partition3(cpi, td, tile_data, tp, pc_tree, &best_rdc,
+                       pc_tree->horizontalb, ctx, mi_row, mi_col, bsize,
+                       PARTITION_HORZ_B,
+#if CONFIG_SUPERTX
+                       best_rd, &best_rate_nocoef, &x_ctx,
+#endif
+                       mi_row, mi_col, subsize, mi_row + mi_step, mi_col,
+                       bsize2, mi_row + mi_step, mi_col + mi_step, bsize2);
+    restore_context(x, &x_ctx, mi_row, mi_col, bsize);
+  }
+  // PARTITION_VERT_A
+  if (partition_vert_allowed && do_rect && bsize > BLOCK_8X8 &&
+      partition_none_allowed) {
+    subsize = get_subsize(bsize, PARTITION_VERT_A);
+    rd_test_partition3(cpi, td, tile_data, tp, pc_tree, &best_rdc,
+                       pc_tree->verticala, ctx, mi_row, mi_col, bsize,
+                       PARTITION_VERT_A,
+#if CONFIG_SUPERTX
+                       best_rd, &best_rate_nocoef, &x_ctx,
+#endif
+                       mi_row, mi_col, bsize2, mi_row + mi_step, mi_col, bsize2,
+                       mi_row, mi_col + mi_step, subsize);
+    restore_context(x, &x_ctx, mi_row, mi_col, bsize);
+  }
+  // PARTITION_VERT_B
+  if (partition_vert_allowed && do_rect && bsize > BLOCK_8X8 &&
+      partition_none_allowed) {
+    subsize = get_subsize(bsize, PARTITION_VERT_B);
+    rd_test_partition3(cpi, td, tile_data, tp, pc_tree, &best_rdc,
+                       pc_tree->verticalb, ctx, mi_row, mi_col, bsize,
+                       PARTITION_VERT_B,
+#if CONFIG_SUPERTX
+                       best_rd, &best_rate_nocoef, &x_ctx,
+#endif
+                       mi_row, mi_col, subsize, mi_row, mi_col + mi_step,
+                       bsize2, mi_row + mi_step, mi_col + mi_step, bsize2);
+    restore_context(x, &x_ctx, mi_row, mi_col, bsize);
+  }
+#endif  // CONFIG_EXT_PARTITION_TYPES
+
+  // TODO(jbb): This code added so that we avoid static analysis
+  // warning related to the fact that best_rd isn't used after this
+  // point.  This code should be refactored so that the duplicate
+  // checks occur in some sub function and thus are used...
+  (void)best_rd;
+  *rd_cost = best_rdc;
+#if CONFIG_SUPERTX
+  *rate_nocoef = best_rate_nocoef;
+#endif  // CONFIG_SUPERTX
+
+  if (best_rdc.rate < INT_MAX && best_rdc.dist < INT64_MAX &&
+      pc_tree->index != 3) {
+    int output_enabled = (bsize == cm->sb_size);
+    encode_sb(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled, bsize,
+              pc_tree);
+  }
+
+  if (bsize == cm->sb_size) {
+    assert(tp_orig < *tp || (tp_orig == *tp && xd->mi[0]->mbmi.skip));
+    assert(best_rdc.rate < INT_MAX);
+    assert(best_rdc.dist < INT64_MAX);
+  } else {
+    assert(tp_orig == *tp);
+  }
+}
+
+static void encode_rd_sb_row(VP10_COMP *cpi, ThreadData *td,
+                             TileDataEnc *tile_data, int mi_row,
+                             TOKENEXTRA **tp) {
+  VP10_COMMON *const cm = &cpi->common;
+  const TileInfo *const tile_info = &tile_data->tile_info;
+  MACROBLOCK *const x = &td->mb;
+  MACROBLOCKD *const xd = &x->e_mbd;
+  SPEED_FEATURES *const sf = &cpi->sf;
+  int mi_col;
+#if CONFIG_EXT_PARTITION
+  const int leaf_nodes = 256;
+#else
+  const int leaf_nodes = 64;
+#endif  // CONFIG_EXT_PARTITION
+
+  // Initialize the left context for the new SB row
+  vp10_zero_left_context(xd);
+
+  // Code each SB in the row
+  for (mi_col = tile_info->mi_col_start; mi_col < tile_info->mi_col_end;
+       mi_col += cm->mib_size) {
+    const struct segmentation *const seg = &cm->seg;
+    int dummy_rate;
+    int64_t dummy_dist;
+    RD_COST dummy_rdc;
+#if CONFIG_SUPERTX
+    int dummy_rate_nocoef;
+#endif  // CONFIG_SUPERTX
+    int i;
+    int seg_skip = 0;
+
+    const int idx_str = cm->mi_stride * mi_row + mi_col;
+    MODE_INFO **mi = cm->mi_grid_visible + idx_str;
+    PC_TREE *const pc_root = td->pc_root[cm->mib_size_log2 - MIN_MIB_SIZE_LOG2];
+
+    if (sf->adaptive_pred_interp_filter) {
+      for (i = 0; i < leaf_nodes; ++i)
+        td->leaf_tree[i].pred_interp_filter = SWITCHABLE;
+
+      for (i = 0; i < leaf_nodes; ++i) {
+        td->pc_tree[i].vertical[0].pred_interp_filter = SWITCHABLE;
+        td->pc_tree[i].vertical[1].pred_interp_filter = SWITCHABLE;
+        td->pc_tree[i].horizontal[0].pred_interp_filter = SWITCHABLE;
+        td->pc_tree[i].horizontal[1].pred_interp_filter = SWITCHABLE;
+      }
+    }
+
+    vp10_zero(x->pred_mv);
+    pc_root->index = 0;
+
+    if (seg->enabled) {
+      const uint8_t *const map =
+          seg->update_map ? cpi->segmentation_map : cm->last_frame_seg_map;
+      int segment_id = get_segment_id(cm, map, cm->sb_size, mi_row, mi_col);
+      seg_skip = segfeature_active(seg, segment_id, SEG_LVL_SKIP);
+    }
+
+    x->source_variance = UINT_MAX;
+    if (sf->partition_search_type == FIXED_PARTITION || seg_skip) {
+      BLOCK_SIZE bsize;
+      set_offsets(cpi, tile_info, x, mi_row, mi_col, cm->sb_size);
+      bsize = seg_skip ? cm->sb_size : sf->always_this_block_size;
+      set_fixed_partitioning(cpi, tile_info, mi, mi_row, mi_col, bsize);
+      rd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col, cm->sb_size,
+                       &dummy_rate, &dummy_dist,
+#if CONFIG_SUPERTX
+                       &dummy_rate_nocoef,
+#endif  // CONFIG_SUPERTX
+                       1, pc_root);
+    } else if (cpi->partition_search_skippable_frame) {
+      BLOCK_SIZE bsize;
+      set_offsets(cpi, tile_info, x, mi_row, mi_col, cm->sb_size);
+      bsize = get_rd_var_based_fixed_partition(cpi, x, mi_row, mi_col);
+      set_fixed_partitioning(cpi, tile_info, mi, mi_row, mi_col, bsize);
+      rd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col, cm->sb_size,
+                       &dummy_rate, &dummy_dist,
+#if CONFIG_SUPERTX
+                       &dummy_rate_nocoef,
+#endif  // CONFIG_SUPERTX
+                       1, pc_root);
+    } else if (sf->partition_search_type == VAR_BASED_PARTITION) {
+      choose_partitioning(cpi, td, tile_info, x, mi_row, mi_col);
+      rd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col, cm->sb_size,
+                       &dummy_rate, &dummy_dist,
+#if CONFIG_SUPERTX
+                       &dummy_rate_nocoef,
+#endif  // CONFIG_SUPERTX
+                       1, pc_root);
+    } else {
+      // If required set upper and lower partition size limits
+      if (sf->auto_min_max_partition_size) {
+        set_offsets(cpi, tile_info, x, mi_row, mi_col, cm->sb_size);
+        rd_auto_partition_range(cpi, tile_info, xd, mi_row, mi_col,
+                                &x->min_partition_size, &x->max_partition_size);
+      }
+      rd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col, cm->sb_size,
+                        &dummy_rdc,
+#if CONFIG_SUPERTX
+                        &dummy_rate_nocoef,
+#endif  // CONFIG_SUPERTX
+                        INT64_MAX, pc_root);
+    }
+  }
+#if CONFIG_ENTROPY
+  if (cm->do_subframe_update &&
+      cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD) {
+    if ((mi_row + MI_SIZE) %
+                (MI_SIZE *
+                 VPXMAX(cm->mi_rows / MI_SIZE / COEF_PROBS_BUFS, 1)) ==
+            0 &&
+        mi_row + MI_SIZE < cm->mi_rows &&
+        cm->coef_probs_update_idx < COEF_PROBS_BUFS - 1) {
+      TX_SIZE t;
+      SUBFRAME_STATS *subframe_stats = &cpi->subframe_stats;
+
+      for (t = TX_4X4; t <= TX_32X32; ++t)
+        vp10_full_to_model_counts(cpi->td.counts->coef[t],
+                                  cpi->td.rd_counts.coef_counts[t]);
+      vp10_partial_adapt_probs(cm, mi_row, mi_col);
+      ++cm->coef_probs_update_idx;
+      vp10_copy(subframe_stats->coef_probs_buf[cm->coef_probs_update_idx],
+                cm->fc->coef_probs);
+      vp10_copy(subframe_stats->coef_counts_buf[cm->coef_probs_update_idx],
+                cpi->td.rd_counts.coef_counts);
+      vp10_copy(subframe_stats->eob_counts_buf[cm->coef_probs_update_idx],
+                cm->counts.eob_branch);
+      vp10_fill_token_costs(x->token_costs,
+#if CONFIG_ANS
+                            cm->fc->coef_cdfs,
+#endif  // CONFIG_ANS
+                            cm->fc->coef_probs);
+    }
+  }
+#endif  // CONFIG_ENTROPY
+}
+
+static void init_encode_frame_mb_context(VP10_COMP *cpi) {
+  MACROBLOCK *const x = &cpi->td.mb;
+  VP10_COMMON *const cm = &cpi->common;
+  MACROBLOCKD *const xd = &x->e_mbd;
+
+  // Copy data over into macro block data structures.
+  vp10_setup_src_planes(x, cpi->Source, 0, 0);
+
+  vp10_setup_block_planes(xd, cm->subsampling_x, cm->subsampling_y);
+}
+
+static int check_dual_ref_flags(VP10_COMP *cpi) {
+  const int ref_flags = cpi->ref_frame_flags;
+
+  if (segfeature_active(&cpi->common.seg, 1, SEG_LVL_REF_FRAME)) {
+    return 0;
+  } else {
+    return (!!(ref_flags & VPX_GOLD_FLAG) + !!(ref_flags & VPX_LAST_FLAG) +
+#if CONFIG_EXT_REFS
+            !!(ref_flags & VPX_LAST2_FLAG) + !!(ref_flags & VPX_LAST3_FLAG) +
+            !!(ref_flags & VPX_BWD_FLAG) +
+#endif  // CONFIG_EXT_REFS
+            !!(ref_flags & VPX_ALT_FLAG)) >= 2;
+  }
+}
+
+#if !CONFIG_VAR_TX
+static void reset_skip_tx_size(VP10_COMMON *cm, TX_SIZE max_tx_size) {
+  int mi_row, mi_col;
+  const int mis = cm->mi_stride;
+  MODE_INFO **mi_ptr = cm->mi_grid_visible;
+
+  for (mi_row = 0; mi_row < cm->mi_rows; ++mi_row, mi_ptr += mis) {
+    for (mi_col = 0; mi_col < cm->mi_cols; ++mi_col) {
+      if (txsize_sqr_up_map[mi_ptr[mi_col]->mbmi.tx_size] > max_tx_size)
+        mi_ptr[mi_col]->mbmi.tx_size = max_tx_size;
+    }
+  }
+}
+#endif
+
+static MV_REFERENCE_FRAME get_frame_type(const VP10_COMP *cpi) {
+  if (frame_is_intra_only(&cpi->common)) return INTRA_FRAME;
+#if CONFIG_EXT_REFS
+  // We will not update the golden frame with an internal overlay frame
+  else if ((cpi->rc.is_src_frame_alt_ref && cpi->refresh_golden_frame) ||
+           cpi->rc.is_src_frame_ext_arf)
+#else
+  else if (cpi->rc.is_src_frame_alt_ref && cpi->refresh_golden_frame)
+#endif
+    return ALTREF_FRAME;
+  else if (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)
+    return GOLDEN_FRAME;
+  else
+    // TODO(zoeliu): To investigate whether a frame_type other than
+    // INTRA/ALTREF/GOLDEN/LAST needs to be specified seperately.
+    return LAST_FRAME;
+}
+
+static TX_MODE select_tx_mode(const VP10_COMP *cpi, MACROBLOCKD *const xd) {
+  if (xd->lossless[0]) return ONLY_4X4;
+  if (cpi->sf.tx_size_search_method == USE_LARGESTALL)
+    return ALLOW_32X32;
+  else if (cpi->sf.tx_size_search_method == USE_FULL_RD ||
+           cpi->sf.tx_size_search_method == USE_TX_8X8)
+    return TX_MODE_SELECT;
+  else
+    return cpi->common.tx_mode;
+}
+
+void vp10_init_tile_data(VP10_COMP *cpi) {
+  VP10_COMMON *const cm = &cpi->common;
+  const int tile_cols = cm->tile_cols;
+  const int tile_rows = cm->tile_rows;
+  int tile_col, tile_row;
+  TOKENEXTRA *pre_tok = cpi->tile_tok[0][0];
+  unsigned int tile_tok = 0;
+
+  if (cpi->tile_data == NULL || cpi->allocated_tiles < tile_cols * tile_rows) {
+    if (cpi->tile_data != NULL) vpx_free(cpi->tile_data);
+    CHECK_MEM_ERROR(cm, cpi->tile_data, vpx_malloc(tile_cols * tile_rows *
+                                                   sizeof(*cpi->tile_data)));
+    cpi->allocated_tiles = tile_cols * tile_rows;
+
+    for (tile_row = 0; tile_row < tile_rows; ++tile_row)
+      for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
+        TileDataEnc *const tile_data =
+            &cpi->tile_data[tile_row * tile_cols + tile_col];
+        int i, j;
+        for (i = 0; i < BLOCK_SIZES; ++i) {
+          for (j = 0; j < MAX_MODES; ++j) {
+            tile_data->thresh_freq_fact[i][j] = 32;
+            tile_data->mode_map[i][j] = j;
+          }
+        }
+      }
+  }
+
+  for (tile_row = 0; tile_row < tile_rows; ++tile_row) {
+    for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
+      TileInfo *const tile_info =
+          &cpi->tile_data[tile_row * tile_cols + tile_col].tile_info;
+      vp10_tile_init(tile_info, cm, tile_row, tile_col);
+
+      cpi->tile_tok[tile_row][tile_col] = pre_tok + tile_tok;
+      pre_tok = cpi->tile_tok[tile_row][tile_col];
+      tile_tok = allocated_tokens(*tile_info);
+    }
+  }
+}
+
+void vp10_encode_tile(VP10_COMP *cpi, ThreadData *td, int tile_row,
+                      int tile_col) {
+  VP10_COMMON *const cm = &cpi->common;
+  TileDataEnc *const this_tile =
+      &cpi->tile_data[tile_row * cm->tile_cols + tile_col];
+  const TileInfo *const tile_info = &this_tile->tile_info;
+  TOKENEXTRA *tok = cpi->tile_tok[tile_row][tile_col];
+  int mi_row;
+
+  vp10_zero_above_context(cm, tile_info->mi_col_start, tile_info->mi_col_end);
+
+  // Set up pointers to per thread motion search counters.
+  td->mb.m_search_count_ptr = &td->rd_counts.m_search_count;
+  td->mb.ex_search_count_ptr = &td->rd_counts.ex_search_count;
+
+  for (mi_row = tile_info->mi_row_start; mi_row < tile_info->mi_row_end;
+       mi_row += cm->mib_size) {
+    encode_rd_sb_row(cpi, td, this_tile, mi_row, &tok);
+  }
+
+  cpi->tok_count[tile_row][tile_col] =
+      (unsigned int)(tok - cpi->tile_tok[tile_row][tile_col]);
+  assert(cpi->tok_count[tile_row][tile_col] <= allocated_tokens(*tile_info));
+}
+
+static void encode_tiles(VP10_COMP *cpi) {
+  VP10_COMMON *const cm = &cpi->common;
+  int tile_col, tile_row;
+
+  vp10_init_tile_data(cpi);
+
+  for (tile_row = 0; tile_row < cm->tile_rows; ++tile_row)
+    for (tile_col = 0; tile_col < cm->tile_cols; ++tile_col)
+      vp10_encode_tile(cpi, &cpi->td, tile_row, tile_col);
+}
+
+#if CONFIG_FP_MB_STATS
+static int input_fpmb_stats(FIRSTPASS_MB_STATS *firstpass_mb_stats,
+                            VP10_COMMON *cm, uint8_t **this_frame_mb_stats) {
+  uint8_t *mb_stats_in = firstpass_mb_stats->mb_stats_start +
+                         cm->current_video_frame * cm->MBs * sizeof(uint8_t);
+
+  if (mb_stats_in > firstpass_mb_stats->mb_stats_end) return EOF;
+
+  *this_frame_mb_stats = mb_stats_in;
+
+  return 1;
+}
+#endif
+
+#if CONFIG_GLOBAL_MOTION
+#define MIN_TRANS_THRESH 8
+#define GLOBAL_MOTION_ADVANTAGE_THRESH 0.60
+#define GLOBAL_MOTION_MODEL ROTZOOM
+static void convert_to_params(double *H, TransformationType type,
+                              Global_Motion_Params *model) {
+  int i;
+  int alpha_present = 0;
+  int n_params = n_trans_model_params[type];
+  model->motion_params.wmmat[0] =
+      (int)floor(H[0] * (1 << GM_TRANS_PREC_BITS) + 0.5);
+  model->motion_params.wmmat[1] =
+      (int)floor(H[1] * (1 << GM_TRANS_PREC_BITS) + 0.5);
+  model->motion_params.wmmat[0] =
+      clamp(model->motion_params.wmmat[0], GM_TRANS_MIN, GM_TRANS_MAX) *
+      GM_TRANS_DECODE_FACTOR;
+  model->motion_params.wmmat[1] =
+      clamp(model->motion_params.wmmat[1], GM_TRANS_MIN, GM_TRANS_MAX) *
+      GM_TRANS_DECODE_FACTOR;
+
+  for (i = 2; i < n_params; ++i) {
+    model->motion_params.wmmat[i] =
+        (int)floor(H[i] * (1 << GM_ALPHA_PREC_BITS) + 0.5);
+    model->motion_params.wmmat[i] =
+        clamp(model->motion_params.wmmat[i], GM_ALPHA_MIN, GM_ALPHA_MAX) *
+        GM_ALPHA_DECODE_FACTOR;
+    alpha_present |= (model->motion_params.wmmat[i] != 0);
+  }
+
+  if (!alpha_present) {
+    if (abs(model->motion_params.wmmat[0]) < MIN_TRANS_THRESH &&
+        abs(model->motion_params.wmmat[1]) < MIN_TRANS_THRESH) {
+      model->motion_params.wmmat[0] = 0;
+      model->motion_params.wmmat[1] = 0;
+    }
+  }
+}
+
+static void convert_model_to_params(double *H, TransformationType type,
+                                    Global_Motion_Params *model) {
+  // TODO(sarahparker) implement for homography
+  if (type > HOMOGRAPHY) convert_to_params(H, type, model);
+  model->gmtype = get_gmtype(model);
+  model->motion_params.wmtype = gm_to_trans_type(model->gmtype);
+}
+#endif  // CONFIG_GLOBAL_MOTION
+
+static void encode_frame_internal(VP10_COMP *cpi) {
+  ThreadData *const td = &cpi->td;
+  MACROBLOCK *const x = &td->mb;
+  VP10_COMMON *const cm = &cpi->common;
+  MACROBLOCKD *const xd = &x->e_mbd;
+  RD_COUNTS *const rdc = &cpi->td.rd_counts;
+  int i;
+
+  x->min_partition_size = VPXMIN(x->min_partition_size, cm->sb_size);
+  x->max_partition_size = VPXMIN(x->max_partition_size, cm->sb_size);
+
+  xd->mi = cm->mi_grid_visible;
+  xd->mi[0] = cm->mi;
+
+  vp10_zero(*td->counts);
+  vp10_zero(rdc->coef_counts);
+  vp10_zero(rdc->comp_pred_diff);
+  rdc->m_search_count = 0;   // Count of motion search hits.
+  rdc->ex_search_count = 0;  // Exhaustive mesh search hits.
+
+#if CONFIG_GLOBAL_MOTION
+  vpx_clear_system_state();
+  vp10_zero(cpi->global_motion_used);
+  if (cpi->common.frame_type == INTER_FRAME && cpi->Source) {
+    YV12_BUFFER_CONFIG *ref_buf;
+    int frame;
+    double H[9] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+    for (frame = LAST_FRAME; frame <= ALTREF_FRAME; ++frame) {
+      ref_buf = get_ref_frame_buffer(cpi, frame);
+      if (ref_buf) {
+        if (compute_global_motion_feature_based(GLOBAL_MOTION_MODEL,
+                                                cpi->Source, ref_buf, H)) {
+          convert_model_to_params(H, GLOBAL_MOTION_MODEL,
+                                  &cm->global_motion[frame]);
+          if (get_gmtype(&cm->global_motion[frame]) > GLOBAL_ZERO) {
+            // compute the advantage of using gm parameters over 0 motion
+            double erroradvantage = vp10_warp_erroradv(
+                &cm->global_motion[frame].motion_params,
+#if CONFIG_VP9_HIGHBITDEPTH
+                xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH, xd->bd,
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+                ref_buf->y_buffer, ref_buf->y_width, ref_buf->y_height,
+                ref_buf->y_stride, cpi->Source->y_buffer, 0, 0,
+                cpi->Source->y_width, cpi->Source->y_height,
+                cpi->Source->y_stride, 0, 0, 16, 16);
+            if (erroradvantage > GLOBAL_MOTION_ADVANTAGE_THRESH)
+              // Not enough advantage in using a global model. Make 0.
+              memset(&cm->global_motion[frame], 0,
+                     sizeof(cm->global_motion[frame]));
+          }
+        }
+      }
+    }
+  }
+#endif  // CONFIG_GLOBAL_MOTION
+
+  for (i = 0; i < MAX_SEGMENTS; ++i) {
+    const int qindex = cm->seg.enabled
+                           ? vp10_get_qindex(&cm->seg, i, cm->base_qindex)
+                           : cm->base_qindex;
+    xd->lossless[i] = qindex == 0 && cm->y_dc_delta_q == 0 &&
+                      cm->uv_dc_delta_q == 0 && cm->uv_ac_delta_q == 0;
+  }
+
+  if (!cm->seg.enabled && xd->lossless[0]) x->optimize = 0;
+
+  cm->tx_mode = select_tx_mode(cpi, xd);
+  vp10_frame_init_quantizer(cpi);
+
+  vp10_initialize_rd_consts(cpi);
+  vp10_initialize_me_consts(cpi, x, cm->base_qindex);
+  init_encode_frame_mb_context(cpi);
+
+  cm->use_prev_frame_mvs =
+      !cm->error_resilient_mode && cm->width == cm->last_width &&
+      cm->height == cm->last_height && !cm->intra_only && cm->last_show_frame;
+#if CONFIG_EXT_REFS
+  // NOTE(zoeliu): As cm->prev_frame can take neither a frame of
+  //               show_exisiting_frame=1, nor can it take a frame not used as
+  //               a reference, it is probable that by the time it is being
+  //               referred to, the frame buffer it originally points to may
+  //               already get expired and have been reassigned to the current
+  //               newly coded frame. Hence, we need to check whether this is
+  //               the case, and if yes, we have 2 choices:
+  //               (1) Simply disable the use of previous frame mvs; or
+  //               (2) Have cm->prev_frame point to one reference frame buffer,
+  //                   e.g. LAST_FRAME.
+  if (cm->use_prev_frame_mvs && !enc_is_ref_frame_buf(cpi, cm->prev_frame)) {
+    // Reassign the LAST_FRAME buffer to cm->prev_frame.
+    const int last_fb_buf_idx = get_ref_frame_buf_idx(cpi, LAST_FRAME);
+    cm->prev_frame = &cm->buffer_pool->frame_bufs[last_fb_buf_idx];
+  }
+#endif  // CONFIG_EXT_REFS
+
+  // Special case: set prev_mi to NULL when the previous mode info
+  // context cannot be used.
+  cm->prev_mi =
+      cm->use_prev_frame_mvs ? cm->prev_mip + cm->mi_stride + 1 : NULL;
+
+#if CONFIG_VAR_TX
+#if CONFIG_REF_MV
+  vp10_zero(x->blk_skip_drl);
+#endif
+#endif
+
+  if (cpi->sf.partition_search_type == VAR_BASED_PARTITION &&
+      cpi->td.var_root[0] == NULL)
+    vp10_setup_var_tree(&cpi->common, &cpi->td);
+
+  {
+    struct vpx_usec_timer emr_timer;
+    vpx_usec_timer_start(&emr_timer);
+
+#if CONFIG_FP_MB_STATS
+    if (cpi->use_fp_mb_stats) {
+      input_fpmb_stats(&cpi->twopass.firstpass_mb_stats, cm,
+                       &cpi->twopass.this_frame_mb_stats);
+    }
+#endif
+
+    // If allowed, encoding tiles in parallel with one thread handling one tile.
+    // TODO(geza.lore): The multi-threaded encoder is not safe with more than
+    // 1 tile rows, as it uses the single above_context et al arrays from
+    // cpi->common
+    if (VPXMIN(cpi->oxcf.max_threads, cm->tile_cols) > 1 && cm->tile_rows == 1)
+      vp10_encode_tiles_mt(cpi);
+    else
+      encode_tiles(cpi);
+
+    vpx_usec_timer_mark(&emr_timer);
+    cpi->time_encode_sb_row += vpx_usec_timer_elapsed(&emr_timer);
+  }
+
+#if 0
+  // Keep record of the total distortion this time around for future use
+  cpi->last_frame_distortion = cpi->frame_distortion;
+#endif
+}
+
+void vp10_encode_frame(VP10_COMP *cpi) {
+  VP10_COMMON *const cm = &cpi->common;
+
+  // In the longer term the encoder should be generalized to match the
+  // decoder such that we allow compound where one of the 3 buffers has a
+  // different sign bias and that buffer is then the fixed ref. However, this
+  // requires further work in the rd loop. For now the only supported encoder
+  // side behavior is where the ALT ref buffer has opposite sign bias to
+  // the other two.
+  if (!frame_is_intra_only(cm)) {
+    if ((cm->ref_frame_sign_bias[ALTREF_FRAME] ==
+         cm->ref_frame_sign_bias[GOLDEN_FRAME]) ||
+        (cm->ref_frame_sign_bias[ALTREF_FRAME] ==
+         cm->ref_frame_sign_bias[LAST_FRAME])) {
+      cpi->allow_comp_inter_inter = 0;
+    } else {
+      cpi->allow_comp_inter_inter = 1;
+
+#if CONFIG_EXT_REFS
+      cm->comp_fwd_ref[0] = LAST_FRAME;
+      cm->comp_fwd_ref[1] = LAST2_FRAME;
+      cm->comp_fwd_ref[2] = LAST3_FRAME;
+      cm->comp_fwd_ref[3] = GOLDEN_FRAME;
+      cm->comp_bwd_ref[0] = BWDREF_FRAME;
+      cm->comp_bwd_ref[1] = ALTREF_FRAME;
+#else
+      cm->comp_fixed_ref = ALTREF_FRAME;
+      cm->comp_var_ref[0] = LAST_FRAME;
+      cm->comp_var_ref[1] = GOLDEN_FRAME;
+#endif  // CONFIG_EXT_REFS
+    }
+  } else {
+    cpi->allow_comp_inter_inter = 0;
+  }
+
+  if (cpi->sf.frame_parameter_update) {
+    int i;
+    RD_OPT *const rd_opt = &cpi->rd;
+    FRAME_COUNTS *counts = cpi->td.counts;
+    RD_COUNTS *const rdc = &cpi->td.rd_counts;
+
+    // This code does a single RD pass over the whole frame assuming
+    // either compound, single or hybrid prediction as per whatever has
+    // worked best for that type of frame in the past.
+    // It also predicts whether another coding mode would have worked
+    // better than this coding mode. If that is the case, it remembers
+    // that for subsequent frames.
+    // It does the same analysis for transform size selection also.
+    //
+    // TODO(zoeliu): To investigate whether a frame_type other than
+    // INTRA/ALTREF/GOLDEN/LAST needs to be specified seperately.
+    const MV_REFERENCE_FRAME frame_type = get_frame_type(cpi);
+    int64_t *const mode_thrs = rd_opt->prediction_type_threshes[frame_type];
+    const int is_alt_ref = frame_type == ALTREF_FRAME;
+
+    /* prediction (compound, single or hybrid) mode selection */
+    if (is_alt_ref || !cpi->allow_comp_inter_inter)
+      cm->reference_mode = SINGLE_REFERENCE;
+    else if (mode_thrs[COMPOUND_REFERENCE] > mode_thrs[SINGLE_REFERENCE] &&
+             mode_thrs[COMPOUND_REFERENCE] > mode_thrs[REFERENCE_MODE_SELECT] &&
+             check_dual_ref_flags(cpi) && cpi->static_mb_pct == 100)
+      cm->reference_mode = COMPOUND_REFERENCE;
+    else if (mode_thrs[SINGLE_REFERENCE] > mode_thrs[REFERENCE_MODE_SELECT])
+      cm->reference_mode = SINGLE_REFERENCE;
+    else
+      cm->reference_mode = REFERENCE_MODE_SELECT;
+
+#if CONFIG_DUAL_FILTER
+    cm->interp_filter = SWITCHABLE;
+#endif
+
+    encode_frame_internal(cpi);
+
+    for (i = 0; i < REFERENCE_MODES; ++i)
+      mode_thrs[i] = (mode_thrs[i] + rdc->comp_pred_diff[i] / cm->MBs) / 2;
+
+    if (cm->reference_mode == REFERENCE_MODE_SELECT) {
+      int single_count_zero = 0;
+      int comp_count_zero = 0;
+
+      for (i = 0; i < COMP_INTER_CONTEXTS; i++) {
+        single_count_zero += counts->comp_inter[i][0];
+        comp_count_zero += counts->comp_inter[i][1];
+      }
+
+      if (comp_count_zero == 0) {
+        cm->reference_mode = SINGLE_REFERENCE;
+        vp10_zero(counts->comp_inter);
+      } else if (single_count_zero == 0) {
+        cm->reference_mode = COMPOUND_REFERENCE;
+        vp10_zero(counts->comp_inter);
+      }
+    }
+
+#if !CONFIG_VAR_TX
+    if (cm->tx_mode == TX_MODE_SELECT) {
+      int count4x4 = 0;
+      int count8x8_lp = 0, count8x8_8x8p = 0;
+      int count16x16_16x16p = 0, count16x16_lp = 0;
+      int count32x32 = 0;
+      for (i = 0; i < TX_SIZE_CONTEXTS; ++i) {
+        count4x4 += counts->tx_size[0][i][TX_4X4];
+        count4x4 += counts->tx_size[1][i][TX_4X4];
+        count4x4 += counts->tx_size[2][i][TX_4X4];
+
+        count8x8_lp += counts->tx_size[1][i][TX_8X8];
+        count8x8_lp += counts->tx_size[2][i][TX_8X8];
+        count8x8_8x8p += counts->tx_size[0][i][TX_8X8];
+
+        count16x16_16x16p += counts->tx_size[1][i][TX_16X16];
+        count16x16_lp += counts->tx_size[2][i][TX_16X16];
+        count32x32 += counts->tx_size[2][i][TX_32X32];
+      }
+#if CONFIG_EXT_TX && CONFIG_RECT_TX
+      count4x4 += counts->tx_size_implied[0][TX_4X4];
+      count4x4 += counts->tx_size_implied[1][TX_4X4];
+      count4x4 += counts->tx_size_implied[2][TX_4X4];
+      count4x4 += counts->tx_size_implied[3][TX_4X4];
+      count8x8_lp += counts->tx_size_implied[2][TX_8X8];
+      count8x8_lp += counts->tx_size_implied[3][TX_8X8];
+      count8x8_8x8p += counts->tx_size_implied[1][TX_8X8];
+      count16x16_lp += counts->tx_size_implied[3][TX_16X16];
+      count16x16_16x16p += counts->tx_size_implied[2][TX_16X16];
+      count32x32 += counts->tx_size_implied[3][TX_32X32];
+#endif  // CONFIG_EXT_TX && CONFIG_RECT_TX
+      if (count4x4 == 0 && count16x16_lp == 0 && count16x16_16x16p == 0 &&
+#if CONFIG_SUPERTX
+          cm->counts.supertx_size[TX_16X16] == 0 &&
+          cm->counts.supertx_size[TX_32X32] == 0 &&
+#endif  // CONFIG_SUPERTX
+          count32x32 == 0) {
+        cm->tx_mode = ALLOW_8X8;
+        reset_skip_tx_size(cm, TX_8X8);
+      } else if (count8x8_8x8p == 0 && count16x16_16x16p == 0 &&
+                 count8x8_lp == 0 && count16x16_lp == 0 &&
+#if CONFIG_SUPERTX
+                 cm->counts.supertx_size[TX_8X8] == 0 &&
+                 cm->counts.supertx_size[TX_16X16] == 0 &&
+                 cm->counts.supertx_size[TX_32X32] == 0 &&
+#endif  // CONFIG_SUPERTX
+                 count32x32 == 0) {
+        cm->tx_mode = ONLY_4X4;
+        reset_skip_tx_size(cm, TX_4X4);
+      } else if (count8x8_lp == 0 && count16x16_lp == 0 && count4x4 == 0) {
+        cm->tx_mode = ALLOW_32X32;
+      } else if (count32x32 == 0 && count8x8_lp == 0 &&
+#if CONFIG_SUPERTX
+                 cm->counts.supertx_size[TX_32X32] == 0 &&
+#endif  // CONFIG_SUPERTX
+                 count4x4 == 0) {
+        cm->tx_mode = ALLOW_16X16;
+        reset_skip_tx_size(cm, TX_16X16);
+      }
+    }
+#endif
+  } else {
+    encode_frame_internal(cpi);
+  }
+}
+
+static void sum_intra_stats(FRAME_COUNTS *counts, const MODE_INFO *mi,
+                            const MODE_INFO *above_mi, const MODE_INFO *left_mi,
+                            const int intraonly) {
+  const PREDICTION_MODE y_mode = mi->mbmi.mode;
+  const PREDICTION_MODE uv_mode = mi->mbmi.uv_mode;
+  const BLOCK_SIZE bsize = mi->mbmi.sb_type;
+
+  if (bsize < BLOCK_8X8) {
+    int idx, idy;
+    const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
+    const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
+    for (idy = 0; idy < 2; idy += num_4x4_h)
+      for (idx = 0; idx < 2; idx += num_4x4_w) {
+        const int bidx = idy * 2 + idx;
+        const PREDICTION_MODE bmode = mi->bmi[bidx].as_mode;
+        if (intraonly) {
+          const PREDICTION_MODE a = vp10_above_block_mode(mi, above_mi, bidx);
+          const PREDICTION_MODE l = vp10_left_block_mode(mi, left_mi, bidx);
+          ++counts->kf_y_mode[a][l][bmode];
+        } else {
+          ++counts->y_mode[0][bmode];
+        }
+      }
+  } else {
+    if (intraonly) {
+      const PREDICTION_MODE above = vp10_above_block_mode(mi, above_mi, 0);
+      const PREDICTION_MODE left = vp10_left_block_mode(mi, left_mi, 0);
+      ++counts->kf_y_mode[above][left][y_mode];
+    } else {
+      ++counts->y_mode[size_group_lookup[bsize]][y_mode];
+    }
+  }
+
+  ++counts->uv_mode[y_mode][uv_mode];
+}
+
+#if CONFIG_VAR_TX
+static void update_txfm_count(MACROBLOCKD *xd, FRAME_COUNTS *counts,
+                              TX_SIZE tx_size, int blk_row, int blk_col) {
+  MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
+  const int tx_row = blk_row >> 1;
+  const int tx_col = blk_col >> 1;
+  int max_blocks_high = num_4x4_blocks_high_lookup[mbmi->sb_type];
+  int max_blocks_wide = num_4x4_blocks_wide_lookup[mbmi->sb_type];
+  int ctx = txfm_partition_context(xd->above_txfm_context + tx_col,
+                                   xd->left_txfm_context + tx_row, tx_size);
+  const TX_SIZE plane_tx_size = mbmi->inter_tx_size[tx_row][tx_col];
+
+  if (xd->mb_to_bottom_edge < 0) max_blocks_high += xd->mb_to_bottom_edge >> 5;
+  if (xd->mb_to_right_edge < 0) max_blocks_wide += xd->mb_to_right_edge >> 5;
+
+  if (blk_row >= max_blocks_high || blk_col >= max_blocks_wide) return;
+
+  if (tx_size == plane_tx_size) {
+    ++counts->txfm_partition[ctx][0];
+    mbmi->tx_size = tx_size;
+    txfm_partition_update(xd->above_txfm_context + tx_col,
+                          xd->left_txfm_context + tx_row, tx_size);
+  } else {
+    BLOCK_SIZE bsize = txsize_to_bsize[tx_size];
+    int bh = num_4x4_blocks_high_lookup[bsize];
+    int i;
+    ++counts->txfm_partition[ctx][1];
+
+    if (tx_size == TX_8X8) {
+      mbmi->inter_tx_size[tx_row][tx_col] = TX_4X4;
+      mbmi->tx_size = TX_4X4;
+      txfm_partition_update(xd->above_txfm_context + tx_col,
+                            xd->left_txfm_context + tx_row, TX_4X4);
+      return;
+    }
+
+    for (i = 0; i < 4; ++i) {
+      int offsetr = (i >> 1) * bh / 2;
+      int offsetc = (i & 0x01) * bh / 2;
+      update_txfm_count(xd, counts, tx_size - 1, blk_row + offsetr,
+                        blk_col + offsetc);
+    }
+  }
+}
+
+static void tx_partition_count_update(VP10_COMMON *cm, MACROBLOCKD *xd,
+                                      BLOCK_SIZE plane_bsize, int mi_row,
+                                      int mi_col, FRAME_COUNTS *td_counts) {
+  const int mi_width = num_4x4_blocks_wide_lookup[plane_bsize];
+  const int mi_height = num_4x4_blocks_high_lookup[plane_bsize];
+  TX_SIZE max_tx_size = max_txsize_lookup[plane_bsize];
+  BLOCK_SIZE txb_size = txsize_to_bsize[max_tx_size];
+  int bh = num_4x4_blocks_wide_lookup[txb_size];
+  int idx, idy;
+
+  xd->above_txfm_context = cm->above_txfm_context + mi_col;
+  xd->left_txfm_context =
+      xd->left_txfm_context_buffer + (mi_row & MAX_MIB_MASK);
+
+  for (idy = 0; idy < mi_height; idy += bh)
+    for (idx = 0; idx < mi_width; idx += bh)
+      update_txfm_count(xd, td_counts, max_tx_size, idy, idx);
+}
+
+static void set_txfm_context(MACROBLOCKD *xd, TX_SIZE tx_size, int blk_row,
+                             int blk_col) {
+  MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
+  const int tx_row = blk_row >> 1;
+  const int tx_col = blk_col >> 1;
+  int max_blocks_high = num_4x4_blocks_high_lookup[mbmi->sb_type];
+  int max_blocks_wide = num_4x4_blocks_wide_lookup[mbmi->sb_type];
+  const TX_SIZE plane_tx_size = mbmi->inter_tx_size[tx_row][tx_col];
+
+  if (xd->mb_to_bottom_edge < 0) max_blocks_high += xd->mb_to_bottom_edge >> 5;
+  if (xd->mb_to_right_edge < 0) max_blocks_wide += xd->mb_to_right_edge >> 5;
+
+  if (blk_row >= max_blocks_high || blk_col >= max_blocks_wide) return;
+
+  if (tx_size == plane_tx_size) {
+    mbmi->tx_size = tx_size;
+    txfm_partition_update(xd->above_txfm_context + tx_col,
+                          xd->left_txfm_context + tx_row, tx_size);
+
+  } else {
+    BLOCK_SIZE bsize = txsize_to_bsize[tx_size];
+    int bsl = b_width_log2_lookup[bsize];
+    int i;
+
+    if (tx_size == TX_8X8) {
+      mbmi->inter_tx_size[tx_row][tx_col] = TX_4X4;
+      mbmi->tx_size = TX_4X4;
+      txfm_partition_update(xd->above_txfm_context + tx_col,
+                            xd->left_txfm_context + tx_row, TX_4X4);
+      return;
+    }
+
+    assert(bsl > 0);
+    --bsl;
+    for (i = 0; i < 4; ++i) {
+      int offsetr = (i >> 1) << bsl;
+      int offsetc = (i & 0x01) << bsl;
+      set_txfm_context(xd, tx_size - 1, blk_row + offsetr, blk_col + offsetc);
+    }
+  }
+}
+
+static void tx_partition_set_contexts(VP10_COMMON *cm, MACROBLOCKD *xd,
+                                      BLOCK_SIZE plane_bsize, int mi_row,
+                                      int mi_col) {
+  const int mi_width = num_4x4_blocks_wide_lookup[plane_bsize];
+  const int mi_height = num_4x4_blocks_high_lookup[plane_bsize];
+  TX_SIZE max_tx_size = max_txsize_lookup[plane_bsize];
+  BLOCK_SIZE txb_size = txsize_to_bsize[max_tx_size];
+  int bh = num_4x4_blocks_wide_lookup[txb_size];
+  int idx, idy;
+
+  xd->above_txfm_context = cm->above_txfm_context + mi_col;
+  xd->left_txfm_context =
+      xd->left_txfm_context_buffer + (mi_row & MAX_MIB_MASK);
+
+  for (idy = 0; idy < mi_height; idy += bh)
+    for (idx = 0; idx < mi_width; idx += bh)
+      set_txfm_context(xd, max_tx_size, idy, idx);
+}
+#endif
+
+static void encode_superblock(VP10_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
+                              int output_enabled, int mi_row, int mi_col,
+                              BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx) {
+  VP10_COMMON *const cm = &cpi->common;
+  MACROBLOCK *const x = &td->mb;
+  MACROBLOCKD *const xd = &x->e_mbd;
+  MODE_INFO **mi_8x8 = xd->mi;
+  MODE_INFO *mi = mi_8x8[0];
+  MB_MODE_INFO *mbmi = &mi->mbmi;
+  const int seg_skip =
+      segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP);
+  const int mis = cm->mi_stride;
+  const int mi_width = num_8x8_blocks_wide_lookup[bsize];
+  const int mi_height = num_8x8_blocks_high_lookup[bsize];
+
+  x->skip_optimize = ctx->is_coded;
+  ctx->is_coded = 1;
+  x->use_lp32x32fdct = cpi->sf.use_lp32x32fdct;
+
+  if (!is_inter_block(mbmi)) {
+    int plane;
+    mbmi->skip = 1;
+    for (plane = 0; plane < MAX_MB_PLANE; ++plane)
+      vp10_encode_intra_block_plane(x, VPXMAX(bsize, BLOCK_8X8), plane, 1);
+    if (output_enabled)
+      sum_intra_stats(td->counts, mi, xd->above_mi, xd->left_mi,
+                      frame_is_intra_only(cm));
+
+#if CONFIG_EXT_INTRA
+    if (output_enabled && bsize >= BLOCK_8X8) {
+      FRAME_COUNTS *counts = td->counts;
+      if (mbmi->mode == DC_PRED && mbmi->palette_mode_info.palette_size[0] == 0)
+        ++counts->ext_intra[0][mbmi->ext_intra_mode_info.use_ext_intra_mode[0]];
+      if (mbmi->uv_mode == DC_PRED &&
+          mbmi->palette_mode_info.palette_size[1] == 0)
+        ++counts->ext_intra[1][mbmi->ext_intra_mode_info.use_ext_intra_mode[1]];
+      if (mbmi->mode != DC_PRED && mbmi->mode != TM_PRED) {
+        int p_angle;
+        const int intra_filter_ctx = vp10_get_pred_context_intra_interp(xd);
+        p_angle =
+            mode_to_angle_map[mbmi->mode] + mbmi->angle_delta[0] * ANGLE_STEP;
+        if (vp10_is_intra_filter_switchable(p_angle))
+          ++counts->intra_filter[intra_filter_ctx][mbmi->intra_filter];
+      }
+    }
+#endif  // CONFIG_EXT_INTRA
+
+    if (bsize >= BLOCK_8X8 && output_enabled) {
+      for (plane = 0; plane <= 1; ++plane) {
+        if (mbmi->palette_mode_info.palette_size[plane] > 0) {
+          mbmi->palette_mode_info.palette_first_color_idx[plane] =
+              xd->plane[plane].color_index_map[0];
+          // TODO(huisu): this increases the use of token buffer. Needs stretch
+          // test to verify.
+          vp10_tokenize_palette_sb(td, bsize, plane, t);
+        }
+      }
+    }
+    vp10_tokenize_sb(cpi, td, t, !output_enabled, VPXMAX(bsize, BLOCK_8X8));
+  } else {
+    int ref;
+    const int is_compound = has_second_ref(mbmi);
+
+    set_ref_ptrs(cm, xd, mbmi->ref_frame[0], mbmi->ref_frame[1]);
+    for (ref = 0; ref < 1 + is_compound; ++ref) {
+      YV12_BUFFER_CONFIG *cfg = get_ref_frame_buffer(cpi, mbmi->ref_frame[ref]);
+      assert(cfg != NULL);
+      vp10_setup_pre_planes(xd, ref, cfg, mi_row, mi_col,
+                            &xd->block_refs[ref]->sf);
+    }
+    if (!(cpi->sf.reuse_inter_pred_sby && ctx->pred_pixel_ready) || seg_skip)
+      vp10_build_inter_predictors_sby(xd, mi_row, mi_col,
+                                      VPXMAX(bsize, BLOCK_8X8));
+
+    vp10_build_inter_predictors_sbuv(xd, mi_row, mi_col,
+                                     VPXMAX(bsize, BLOCK_8X8));
+
+#if CONFIG_OBMC
+    if (mbmi->motion_variation == OBMC_CAUSAL) {
+#if CONFIG_VP9_HIGHBITDEPTH
+      DECLARE_ALIGNED(16, uint8_t, tmp_buf1[2 * MAX_MB_PLANE * MAX_SB_SQUARE]);
+      DECLARE_ALIGNED(16, uint8_t, tmp_buf2[2 * MAX_MB_PLANE * MAX_SB_SQUARE]);
+#else
+      DECLARE_ALIGNED(16, uint8_t, tmp_buf1[MAX_MB_PLANE * MAX_SB_SQUARE]);
+      DECLARE_ALIGNED(16, uint8_t, tmp_buf2[MAX_MB_PLANE * MAX_SB_SQUARE]);
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+      uint8_t *dst_buf1[MAX_MB_PLANE], *dst_buf2[MAX_MB_PLANE];
+      int dst_stride1[MAX_MB_PLANE] = { MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE };
+      int dst_stride2[MAX_MB_PLANE] = { MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE };
+      int dst_width1[MAX_MB_PLANE] = { MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE };
+      int dst_width2[MAX_MB_PLANE] = { MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE };
+      int dst_height1[MAX_MB_PLANE] = { MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE };
+      int dst_height2[MAX_MB_PLANE] = { MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE };
+
+      assert(mbmi->sb_type >= BLOCK_8X8);
+
+#if CONFIG_VP9_HIGHBITDEPTH
+      if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+        int len = sizeof(uint16_t);
+        dst_buf1[0] = CONVERT_TO_BYTEPTR(tmp_buf1);
+        dst_buf1[1] = CONVERT_TO_BYTEPTR(tmp_buf1 + MAX_SB_SQUARE * len);
+        dst_buf1[2] = CONVERT_TO_BYTEPTR(tmp_buf1 + MAX_SB_SQUARE * 2 * len);
+        dst_buf2[0] = CONVERT_TO_BYTEPTR(tmp_buf2);
+        dst_buf2[1] = CONVERT_TO_BYTEPTR(tmp_buf2 + MAX_SB_SQUARE * len);
+        dst_buf2[2] = CONVERT_TO_BYTEPTR(tmp_buf2 + MAX_SB_SQUARE * 2 * len);
+      } else {
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+        dst_buf1[0] = tmp_buf1;
+        dst_buf1[1] = tmp_buf1 + MAX_SB_SQUARE;
+        dst_buf1[2] = tmp_buf1 + MAX_SB_SQUARE * 2;
+        dst_buf2[0] = tmp_buf2;
+        dst_buf2[1] = tmp_buf2 + MAX_SB_SQUARE;
+        dst_buf2[2] = tmp_buf2 + MAX_SB_SQUARE * 2;
+#if CONFIG_VP9_HIGHBITDEPTH
+      }
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+      vp10_build_prediction_by_above_preds(cm, xd, mi_row, mi_col, dst_buf1,
+                                           dst_width1, dst_height1,
+                                           dst_stride1);
+      vp10_build_prediction_by_left_preds(cm, xd, mi_row, mi_col, dst_buf2,
+                                          dst_width2, dst_height2, dst_stride2);
+      vp10_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row,
+                            mi_col);
+      vp10_build_obmc_inter_prediction(cm, xd, mi_row, mi_col, dst_buf1,
+                                       dst_stride1, dst_buf2, dst_stride2);
+    }
+#endif  // CONFIG_OBMC
+
+    vp10_encode_sb(x, VPXMAX(bsize, BLOCK_8X8));
+#if CONFIG_VAR_TX
+#if CONFIG_EXT_TX && CONFIG_RECT_TX
+    if (mbmi->tx_size >= TX_SIZES)
+      vp10_tokenize_sb(cpi, td, t, !output_enabled, VPXMAX(bsize, BLOCK_8X8));
+    else
+#endif
+      vp10_tokenize_sb_inter(cpi, td, t, !output_enabled, mi_row, mi_col,
+                             VPXMAX(bsize, BLOCK_8X8));
+#else
+    vp10_tokenize_sb(cpi, td, t, !output_enabled, VPXMAX(bsize, BLOCK_8X8));
+#endif
+  }
+
+  if (output_enabled) {
+    if (cm->tx_mode == TX_MODE_SELECT && mbmi->sb_type >= BLOCK_8X8 &&
+        !(is_inter_block(mbmi) && (mbmi->skip || seg_skip))) {
+      const int is_inter = is_inter_block(mbmi);
+      const int tx_size_ctx = get_tx_size_context(xd);
+      const int tx_size_cat = is_inter ? inter_tx_size_cat_lookup[bsize]
+                                       : intra_tx_size_cat_lookup[bsize];
+      const TX_SIZE coded_tx_size = txsize_sqr_up_map[mbmi->tx_size];
+#if CONFIG_EXT_TX && CONFIG_RECT_TX
+      assert(IMPLIES(is_rect_tx(mbmi->tx_size), is_rect_tx_allowed(mbmi)));
+#endif  // CONFIG_EXT_TX && CONFIG_RECT_TX
+#if CONFIG_VAR_TX
+      if (is_inter)
+        tx_partition_count_update(cm, xd, bsize, mi_row, mi_col, td->counts);
+#endif
+      ++td->counts->tx_size[tx_size_cat][tx_size_ctx][coded_tx_size];
+    } else {
+      int x, y;
+      TX_SIZE tx_size;
+      // The new intra coding scheme requires no change of transform size
+      if (is_inter_block(&mi->mbmi)) {
+        if (xd->lossless[mbmi->segment_id]) {
+          tx_size = TX_4X4;
+        } else {
+          tx_size = tx_size_from_tx_mode(bsize, cm->tx_mode, 1);
+        }
+#if CONFIG_EXT_TX && CONFIG_RECT_TX
+        ++td->counts->tx_size_implied[max_txsize_lookup[bsize]]
+                                     [txsize_sqr_up_map[mbmi->tx_size]];
+#endif  // CONFIG_EXT_TX && CONFIG_RECT_TX
+      } else {
+        tx_size = (bsize >= BLOCK_8X8) ? mbmi->tx_size : TX_4X4;
+      }
+
+      for (y = 0; y < mi_height; y++)
+        for (x = 0; x < mi_width; x++)
+          if (mi_col + x < cm->mi_cols && mi_row + y < cm->mi_rows)
+            mi_8x8[mis * y + x]->mbmi.tx_size = tx_size;
+    }
+    ++td->counts->tx_size_totals[txsize_sqr_map[mbmi->tx_size]];
+    ++td->counts->tx_size_totals[get_uv_tx_size(mbmi, &xd->plane[1])];
+#if CONFIG_EXT_TX
+    if (get_ext_tx_types(mbmi->tx_size, bsize, is_inter_block(mbmi)) > 1 &&
+        cm->base_qindex > 0 && !mbmi->skip &&
+        !segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
+      int eset = get_ext_tx_set(mbmi->tx_size, bsize, is_inter_block(mbmi));
+      if (eset > 0) {
+        if (is_inter_block(mbmi)) {
+          ++td->counts->inter_ext_tx[eset][txsize_sqr_map[mbmi->tx_size]]
+                                    [mbmi->tx_type];
+        } else {
+          ++td->counts
+                ->intra_ext_tx[eset][mbmi->tx_size][mbmi->mode][mbmi->tx_type];
+        }
+      }
+    }
+#else
+    if (mbmi->tx_size < TX_32X32 && cm->base_qindex > 0 && !mbmi->skip &&
+        !segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
+      if (is_inter_block(mbmi)) {
+        ++td->counts->inter_ext_tx[mbmi->tx_size][mbmi->tx_type];
+      } else {
+        ++td->counts
+              ->intra_ext_tx[mbmi->tx_size][intra_mode_to_tx_type_context
+                                                [mbmi->mode]][mbmi->tx_type];
+      }
+    }
+#endif  // CONFIG_EXT_TX
+  }
+
+#if CONFIG_VAR_TX
+  if (cm->tx_mode == TX_MODE_SELECT && mbmi->sb_type >= BLOCK_8X8 &&
+      is_inter_block(mbmi) && !(mbmi->skip || seg_skip)) {
+    if (!output_enabled)
+      tx_partition_set_contexts(cm, xd, bsize, mi_row, mi_col);
+  } else {
+    TX_SIZE tx_size;
+    // The new intra coding scheme requires no change of transform size
+    if (is_inter_block(mbmi))
+#if CONFIG_EXT_TX && CONFIG_RECT_TX
+    {
+      tx_size = VPXMIN(tx_mode_to_biggest_tx_size[cm->tx_mode],
+                       max_txsize_lookup[bsize]);
+      if (txsize_sqr_map[max_txsize_rect_lookup[bsize]] <= tx_size)
+        tx_size = max_txsize_rect_lookup[bsize];
+      if (xd->lossless[mbmi->segment_id]) tx_size = TX_4X4;
+    }
+#else
+      tx_size = VPXMIN(tx_mode_to_biggest_tx_size[cm->tx_mode],
+                       max_txsize_lookup[bsize]);
+#endif
+    else
+      tx_size = (bsize >= BLOCK_8X8) ? mbmi->tx_size : TX_4X4;
+    mbmi->tx_size = tx_size;
+    set_txfm_ctxs(tx_size, xd->n8_w, xd->n8_h, xd);
+  }
+#endif
+}
+
+#if CONFIG_SUPERTX
+static int check_intra_b(PICK_MODE_CONTEXT *ctx) {
+  if (!is_inter_mode((&ctx->mic)->mbmi.mode)) return 1;
+#if CONFIG_EXT_INTER
+  if (ctx->mic.mbmi.ref_frame[1] == INTRA_FRAME) return 1;
+#endif  // CONFIG_EXT_INTER
+  return 0;
+}
+
+static int check_intra_sb(VP10_COMP *cpi, const TileInfo *const tile,
+                          int mi_row, int mi_col, BLOCK_SIZE bsize,
+                          PC_TREE *pc_tree) {
+  const VP10_COMMON *const cm = &cpi->common;
+
+  const int hbs = num_8x8_blocks_wide_lookup[bsize] / 2;
+  const PARTITION_TYPE partition = pc_tree->partitioning;
+  const BLOCK_SIZE subsize = get_subsize(bsize, partition);
+#if CONFIG_EXT_PARTITION_TYPES
+  int i;
+#endif
+
+  assert(bsize >= BLOCK_8X8);
+
+  if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return 1;
+
+  switch (partition) {
+    case PARTITION_NONE: return check_intra_b(&pc_tree->none); break;
+    case PARTITION_VERT:
+      if (check_intra_b(&pc_tree->vertical[0])) return 1;
+      if (mi_col + hbs < cm->mi_cols && bsize > BLOCK_8X8) {
+        if (check_intra_b(&pc_tree->vertical[1])) return 1;
+      }
+      break;
+    case PARTITION_HORZ:
+      if (check_intra_b(&pc_tree->horizontal[0])) return 1;
+      if (mi_row + hbs < cm->mi_rows && bsize > BLOCK_8X8) {
+        if (check_intra_b(&pc_tree->horizontal[1])) return 1;
+      }
+      break;
+    case PARTITION_SPLIT:
+      if (bsize == BLOCK_8X8) {
+        if (check_intra_b(pc_tree->leaf_split[0])) return 1;
+      } else {
+        if (check_intra_sb(cpi, tile, mi_row, mi_col, subsize,
+                           pc_tree->split[0]))
+          return 1;
+        if (check_intra_sb(cpi, tile, mi_row, mi_col + hbs, subsize,
+                           pc_tree->split[1]))
+          return 1;
+        if (check_intra_sb(cpi, tile, mi_row + hbs, mi_col, subsize,
+                           pc_tree->split[2]))
+          return 1;
+        if (check_intra_sb(cpi, tile, mi_row + hbs, mi_col + hbs, subsize,
+                           pc_tree->split[3]))
+          return 1;
+      }
+      break;
+#if CONFIG_EXT_PARTITION_TYPES
+    case PARTITION_HORZ_A:
+      for (i = 0; i < 3; i++) {
+        if (check_intra_b(&pc_tree->horizontala[i])) return 1;
+      }
+      break;
+    case PARTITION_HORZ_B:
+      for (i = 0; i < 3; i++) {
+        if (check_intra_b(&pc_tree->horizontalb[i])) return 1;
+      }
+      break;
+    case PARTITION_VERT_A:
+      for (i = 0; i < 3; i++) {
+        if (check_intra_b(&pc_tree->verticala[i])) return 1;
+      }
+      break;
+    case PARTITION_VERT_B:
+      for (i = 0; i < 3; i++) {
+        if (check_intra_b(&pc_tree->verticalb[i])) return 1;
+      }
+      break;
+#endif  // CONFIG_EXT_PARTITION_TYPES
+    default: assert(0);
+  }
+  return 0;
+}
+
+static int check_supertx_b(TX_SIZE supertx_size, PICK_MODE_CONTEXT *ctx) {
+  return ctx->mic.mbmi.tx_size == supertx_size;
+}
+
+static int check_supertx_sb(BLOCK_SIZE bsize, TX_SIZE supertx_size,
+                            PC_TREE *pc_tree) {
+  PARTITION_TYPE partition;
+  BLOCK_SIZE subsize;
+
+  partition = pc_tree->partitioning;
+  subsize = get_subsize(bsize, partition);
+  switch (partition) {
+    case PARTITION_NONE: return check_supertx_b(supertx_size, &pc_tree->none);
+    case PARTITION_VERT:
+      return check_supertx_b(supertx_size, &pc_tree->vertical[0]);
+    case PARTITION_HORZ:
+      return check_supertx_b(supertx_size, &pc_tree->horizontal[0]);
+    case PARTITION_SPLIT:
+      if (bsize == BLOCK_8X8)
+        return check_supertx_b(supertx_size, pc_tree->leaf_split[0]);
+      else
+        return check_supertx_sb(subsize, supertx_size, pc_tree->split[0]);
+#if CONFIG_EXT_PARTITION_TYPES
+    case PARTITION_HORZ_A:
+      return check_supertx_b(supertx_size, &pc_tree->horizontala[0]);
+    case PARTITION_HORZ_B:
+      return check_supertx_b(supertx_size, &pc_tree->horizontalb[0]);
+    case PARTITION_VERT_A:
+      return check_supertx_b(supertx_size, &pc_tree->verticala[0]);
+    case PARTITION_VERT_B:
+      return check_supertx_b(supertx_size, &pc_tree->verticalb[0]);
+#endif  // CONFIG_EXT_PARTITION_TYPES
+    default: assert(0); return 0;
+  }
+}
+
+static void predict_superblock(VP10_COMP *cpi, ThreadData *td,
+#if CONFIG_EXT_INTER
+                               int mi_row_ori, int mi_col_ori,
+#endif  // CONFIG_EXT_INTER
+                               int mi_row_pred, int mi_col_pred,
+                               BLOCK_SIZE bsize_pred, int b_sub8x8, int block) {
+  // Used in supertx
+  // (mi_row_ori, mi_col_ori): location for mv
+  // (mi_row_pred, mi_col_pred, bsize_pred): region to predict
+  VP10_COMMON *const cm = &cpi->common;
+  MACROBLOCK *const x = &td->mb;
+  MACROBLOCKD *const xd = &x->e_mbd;
+  MODE_INFO *mi_8x8 = xd->mi[0];
+  MODE_INFO *mi = mi_8x8;
+  MB_MODE_INFO *mbmi = &mi->mbmi;
+  int ref;
+  const int is_compound = has_second_ref(mbmi);
+
+  set_ref_ptrs(cm, xd, mbmi->ref_frame[0], mbmi->ref_frame[1]);
+
+  for (ref = 0; ref < 1 + is_compound; ++ref) {
+    YV12_BUFFER_CONFIG *cfg = get_ref_frame_buffer(cpi, mbmi->ref_frame[ref]);
+    vp10_setup_pre_planes(xd, ref, cfg, mi_row_pred, mi_col_pred,
+                          &xd->block_refs[ref]->sf);
+  }
+
+  if (!b_sub8x8)
+    vp10_build_inter_predictors_sb_extend(xd,
+#if CONFIG_EXT_INTER
+                                          mi_row_ori, mi_col_ori,
+#endif  // CONFIG_EXT_INTER
+                                          mi_row_pred, mi_col_pred, bsize_pred);
+  else
+    vp10_build_inter_predictors_sb_sub8x8_extend(xd,
+#if CONFIG_EXT_INTER
+                                                 mi_row_ori, mi_col_ori,
+#endif  // CONFIG_EXT_INTER
+                                                 mi_row_pred, mi_col_pred,
+                                                 bsize_pred, block);
+}
+
+static void predict_b_extend(VP10_COMP *cpi, ThreadData *td,
+                             const TileInfo *const tile, int block,
+                             int mi_row_ori, int mi_col_ori, int mi_row_pred,
+                             int mi_col_pred, int mi_row_top, int mi_col_top,
+                             uint8_t *dst_buf[3], int dst_stride[3],
+                             BLOCK_SIZE bsize_top, BLOCK_SIZE bsize_pred,
+                             int output_enabled, int b_sub8x8, int bextend) {
+  // Used in supertx
+  // (mi_row_ori, mi_col_ori): location for mv
+  // (mi_row_pred, mi_col_pred, bsize_pred): region to predict
+  // (mi_row_top, mi_col_top, bsize_top): region of the top partition size
+  // block: sub location of sub8x8 blocks
+  // b_sub8x8: 1: ori is sub8x8; 0: ori is not sub8x8
+  // bextend: 1: region to predict is an extension of ori; 0: not
+
+  MACROBLOCK *const x = &td->mb;
+  VP10_COMMON *const cm = &cpi->common;
+  MACROBLOCKD *const xd = &x->e_mbd;
+  int r = (mi_row_pred - mi_row_top) * MI_SIZE;
+  int c = (mi_col_pred - mi_col_top) * MI_SIZE;
+  const int mi_width_top = num_8x8_blocks_wide_lookup[bsize_top];
+  const int mi_height_top = num_8x8_blocks_high_lookup[bsize_top];
+
+  if (mi_row_pred < mi_row_top || mi_col_pred < mi_col_top ||
+      mi_row_pred >= mi_row_top + mi_height_top ||
+      mi_col_pred >= mi_col_top + mi_width_top || mi_row_pred >= cm->mi_rows ||
+      mi_col_pred >= cm->mi_cols)
+    return;
+
+  set_offsets_extend(cpi, td, tile, mi_row_pred, mi_col_pred, mi_row_ori,
+                     mi_col_ori, bsize_pred);
+  xd->plane[0].dst.stride = dst_stride[0];
+  xd->plane[1].dst.stride = dst_stride[1];
+  xd->plane[2].dst.stride = dst_stride[2];
+  xd->plane[0].dst.buf = dst_buf[0] +
+                         (r >> xd->plane[0].subsampling_y) * dst_stride[0] +
+                         (c >> xd->plane[0].subsampling_x);
+  xd->plane[1].dst.buf = dst_buf[1] +
+                         (r >> xd->plane[1].subsampling_y) * dst_stride[1] +
+                         (c >> xd->plane[1].subsampling_x);
+  xd->plane[2].dst.buf = dst_buf[2] +
+                         (r >> xd->plane[2].subsampling_y) * dst_stride[2] +
+                         (c >> xd->plane[2].subsampling_x);
+
+  predict_superblock(cpi, td,
+#if CONFIG_EXT_INTER
+                     mi_row_ori, mi_col_ori,
+#endif  // CONFIG_EXT_INTER
+                     mi_row_pred, mi_col_pred, bsize_pred, b_sub8x8, block);
+
+  if (output_enabled && !bextend) update_stats(&cpi->common, td, 1);
+}
+
+static void extend_dir(VP10_COMP *cpi, ThreadData *td,
+                       const TileInfo *const tile, int block, BLOCK_SIZE bsize,
+                       BLOCK_SIZE top_bsize, int mi_row, int mi_col,
+                       int mi_row_top, int mi_col_top, int output_enabled,
+                       uint8_t *dst_buf[3], int dst_stride[3], int dir) {
+  // dir: 0-lower, 1-upper, 2-left, 3-right
+  //      4-lowerleft, 5-upperleft, 6-lowerright, 7-upperright
+  MACROBLOCKD *xd = &td->mb.e_mbd;
+  const int mi_width = num_8x8_blocks_wide_lookup[bsize];
+  const int mi_height = num_8x8_blocks_high_lookup[bsize];
+  int xss = xd->plane[1].subsampling_x;
+  int yss = xd->plane[1].subsampling_y;
+  int b_sub8x8 = (bsize < BLOCK_8X8) ? 1 : 0;
+
+  BLOCK_SIZE extend_bsize;
+  int unit, mi_row_pred, mi_col_pred;
+
+  if (dir == 0 || dir == 1) {  // lower and upper
+    extend_bsize = (mi_width == 1 || bsize < BLOCK_8X8 || xss < yss)
+                       ? BLOCK_8X8
+                       : BLOCK_16X8;
+    unit = num_8x8_blocks_wide_lookup[extend_bsize];
+    mi_row_pred = mi_row + ((dir == 0) ? mi_height : -1);
+    mi_col_pred = mi_col;
+
+    predict_b_extend(cpi, td, tile, block, mi_row, mi_col, mi_row_pred,
+                     mi_col_pred, mi_row_top, mi_col_top, dst_buf, dst_stride,
+                     top_bsize, extend_bsize, output_enabled, b_sub8x8, 1);
+
+    if (mi_width > unit) {
+      int i;
+      for (i = 0; i < mi_width / unit - 1; i++) {
+        mi_col_pred += unit;
+        predict_b_extend(cpi, td, tile, block, mi_row, mi_col, mi_row_pred,
+                         mi_col_pred, mi_row_top, mi_col_top, dst_buf,
+                         dst_stride, top_bsize, extend_bsize, output_enabled,
+                         b_sub8x8, 1);
+      }
+    }
+  } else if (dir == 2 || dir == 3) {  // left and right
+    extend_bsize = (mi_height == 1 || bsize < BLOCK_8X8 || yss < xss)
+                       ? BLOCK_8X8
+                       : BLOCK_8X16;
+    unit = num_8x8_blocks_high_lookup[extend_bsize];
+    mi_row_pred = mi_row;
+    mi_col_pred = mi_col + ((dir == 3) ? mi_width : -1);
+
+    predict_b_extend(cpi, td, tile, block, mi_row, mi_col, mi_row_pred,
+                     mi_col_pred, mi_row_top, mi_col_top, dst_buf, dst_stride,
+                     top_bsize, extend_bsize, output_enabled, b_sub8x8, 1);
+
+    if (mi_height > unit) {
+      int i;
+      for (i = 0; i < mi_height / unit - 1; i++) {
+        mi_row_pred += unit;
+        predict_b_extend(cpi, td, tile, block, mi_row, mi_col, mi_row_pred,
+                         mi_col_pred, mi_row_top, mi_col_top, dst_buf,
+                         dst_stride, top_bsize, extend_bsize, output_enabled,
+                         b_sub8x8, 1);
+      }
+    }
+  } else {
+    extend_bsize = BLOCK_8X8;
+    mi_row_pred = mi_row + ((dir == 4 || dir == 6) ? mi_height : -1);
+    mi_col_pred = mi_col + ((dir == 6 || dir == 7) ? mi_width : -1);
+
+    predict_b_extend(cpi, td, tile, block, mi_row, mi_col, mi_row_pred,
+                     mi_col_pred, mi_row_top, mi_col_top, dst_buf, dst_stride,
+                     top_bsize, extend_bsize, output_enabled, b_sub8x8, 1);
+  }
+}
+
+static void extend_all(VP10_COMP *cpi, ThreadData *td,
+                       const TileInfo *const tile, int block, BLOCK_SIZE bsize,
+                       BLOCK_SIZE top_bsize, int mi_row, int mi_col,
+                       int mi_row_top, int mi_col_top, int output_enabled,
+                       uint8_t *dst_buf[3], int dst_stride[3]) {
+  assert(block >= 0 && block < 4);
+  extend_dir(cpi, td, tile, block, bsize, top_bsize, mi_row, mi_col, mi_row_top,
+             mi_col_top, output_enabled, dst_buf, dst_stride, 0);
+  extend_dir(cpi, td, tile, block, bsize, top_bsize, mi_row, mi_col, mi_row_top,
+             mi_col_top, output_enabled, dst_buf, dst_stride, 1);
+  extend_dir(cpi, td, tile, block, bsize, top_bsize, mi_row, mi_col, mi_row_top,
+             mi_col_top, output_enabled, dst_buf, dst_stride, 2);
+  extend_dir(cpi, td, tile, block, bsize, top_bsize, mi_row, mi_col, mi_row_top,
+             mi_col_top, output_enabled, dst_buf, dst_stride, 3);
+  extend_dir(cpi, td, tile, block, bsize, top_bsize, mi_row, mi_col, mi_row_top,
+             mi_col_top, output_enabled, dst_buf, dst_stride, 4);
+  extend_dir(cpi, td, tile, block, bsize, top_bsize, mi_row, mi_col, mi_row_top,
+             mi_col_top, output_enabled, dst_buf, dst_stride, 5);
+  extend_dir(cpi, td, tile, block, bsize, top_bsize, mi_row, mi_col, mi_row_top,
+             mi_col_top, output_enabled, dst_buf, dst_stride, 6);
+  extend_dir(cpi, td, tile, block, bsize, top_bsize, mi_row, mi_col, mi_row_top,
+             mi_col_top, output_enabled, dst_buf, dst_stride, 7);
+}
+
+// This function generates prediction for multiple blocks, between which
+// discontinuity around boundary is reduced by smoothing masks. The basic
+// smoothing mask is a soft step function along horz/vert direction. In more
+// complicated case when a block is split into 4 subblocks, the basic mask is
+// first applied to neighboring subblocks (2 pairs) in horizontal direction and
+// then applied to the 2 masked prediction mentioned above in vertical direction
+// If the block is split into more than one level, at every stage, masked
+// prediction is stored in dst_buf[] passed from higher level.
+static void predict_sb_complex(VP10_COMP *cpi, ThreadData *td,
+                               const TileInfo *const tile, int mi_row,
+                               int mi_col, int mi_row_top, int mi_col_top,
+                               int output_enabled, BLOCK_SIZE bsize,
+                               BLOCK_SIZE top_bsize, uint8_t *dst_buf[3],
+                               int dst_stride[3], PC_TREE *pc_tree) {
+  VP10_COMMON *const cm = &cpi->common;
+  MACROBLOCK *const x = &td->mb;
+  MACROBLOCKD *const xd = &x->e_mbd;
+
+  const int ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
+  const int hbs = num_8x8_blocks_wide_lookup[bsize] / 2;
+  const PARTITION_TYPE partition = pc_tree->partitioning;
+  const BLOCK_SIZE subsize = get_subsize(bsize, partition);
+#if CONFIG_EXT_PARTITION_TYPES
+  const BLOCK_SIZE bsize2 = get_subsize(bsize, PARTITION_SPLIT);
+#endif
+
+  int i;
+  uint8_t *dst_buf1[3], *dst_buf2[3], *dst_buf3[3];
+  DECLARE_ALIGNED(16, uint8_t, tmp_buf1[MAX_MB_PLANE * MAX_TX_SQUARE * 2]);
+  DECLARE_ALIGNED(16, uint8_t, tmp_buf2[MAX_MB_PLANE * MAX_TX_SQUARE * 2]);
+  DECLARE_ALIGNED(16, uint8_t, tmp_buf3[MAX_MB_PLANE * MAX_TX_SQUARE * 2]);
+  int dst_stride1[3] = { MAX_TX_SIZE, MAX_TX_SIZE, MAX_TX_SIZE };
+  int dst_stride2[3] = { MAX_TX_SIZE, MAX_TX_SIZE, MAX_TX_SIZE };
+  int dst_stride3[3] = { MAX_TX_SIZE, MAX_TX_SIZE, MAX_TX_SIZE };
+
+  assert(bsize >= BLOCK_8X8);
+
+  if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
+
+#if CONFIG_VP9_HIGHBITDEPTH
+  if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+    int len = sizeof(uint16_t);
+    dst_buf1[0] = CONVERT_TO_BYTEPTR(tmp_buf1);
+    dst_buf1[1] = CONVERT_TO_BYTEPTR(tmp_buf1 + MAX_TX_SQUARE * len);
+    dst_buf1[2] = CONVERT_TO_BYTEPTR(tmp_buf1 + 2 * MAX_TX_SQUARE * len);
+    dst_buf2[0] = CONVERT_TO_BYTEPTR(tmp_buf2);
+    dst_buf2[1] = CONVERT_TO_BYTEPTR(tmp_buf2 + MAX_TX_SQUARE * len);
+    dst_buf2[2] = CONVERT_TO_BYTEPTR(tmp_buf2 + 2 * MAX_TX_SQUARE * len);
+    dst_buf3[0] = CONVERT_TO_BYTEPTR(tmp_buf3);
+    dst_buf3[1] = CONVERT_TO_BYTEPTR(tmp_buf3 + MAX_TX_SQUARE * len);
+    dst_buf3[2] = CONVERT_TO_BYTEPTR(tmp_buf3 + 2 * MAX_TX_SQUARE * len);
+  } else {
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+    dst_buf1[0] = tmp_buf1;
+    dst_buf1[1] = tmp_buf1 + MAX_TX_SQUARE;
+    dst_buf1[2] = tmp_buf1 + 2 * MAX_TX_SQUARE;
+    dst_buf2[0] = tmp_buf2;
+    dst_buf2[1] = tmp_buf2 + MAX_TX_SQUARE;
+    dst_buf2[2] = tmp_buf2 + 2 * MAX_TX_SQUARE;
+    dst_buf3[0] = tmp_buf3;
+    dst_buf3[1] = tmp_buf3 + MAX_TX_SQUARE;
+    dst_buf3[2] = tmp_buf3 + 2 * MAX_TX_SQUARE;
+#if CONFIG_VP9_HIGHBITDEPTH
+  }
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+  if (output_enabled && bsize < top_bsize)
+    cm->counts.partition[ctx][partition]++;
+
+  for (i = 0; i < MAX_MB_PLANE; i++) {
+    xd->plane[i].dst.buf = dst_buf[i];
+    xd->plane[i].dst.stride = dst_stride[i];
+  }
+
+  switch (partition) {
+    case PARTITION_NONE:
+      assert(bsize < top_bsize);
+      predict_b_extend(cpi, td, tile, 0, mi_row, mi_col, mi_row, mi_col,
+                       mi_row_top, mi_col_top, dst_buf, dst_stride, top_bsize,
+                       bsize, output_enabled, 0, 0);
+      extend_all(cpi, td, tile, 0, bsize, top_bsize, mi_row, mi_col, mi_row_top,
+                 mi_col_top, output_enabled, dst_buf, dst_stride);
+      break;
+    case PARTITION_HORZ:
+      if (bsize == BLOCK_8X8) {
+        // Fisrt half
+        predict_b_extend(cpi, td, tile, 0, mi_row, mi_col, mi_row, mi_col,
+                         mi_row_top, mi_col_top, dst_buf, dst_stride, top_bsize,
+                         BLOCK_8X8, output_enabled, 1, 0);
+        if (bsize < top_bsize)
+          extend_all(cpi, td, tile, 0, subsize, top_bsize, mi_row, mi_col,
+                     mi_row_top, mi_col_top, output_enabled, dst_buf,
+                     dst_stride);
+
+        // Second half
+        predict_b_extend(cpi, td, tile, 2, mi_row, mi_col, mi_row, mi_col,
+                         mi_row_top, mi_col_top, dst_buf1, dst_stride1,
+                         top_bsize, BLOCK_8X8, output_enabled, 1, 1);
+        if (bsize < top_bsize)
+          extend_all(cpi, td, tile, 2, subsize, top_bsize, mi_row, mi_col,
+                     mi_row_top, mi_col_top, output_enabled, dst_buf1,
+                     dst_stride1);
+
+        // Smooth
+        xd->plane[0].dst.buf = dst_buf[0];
+        xd->plane[0].dst.stride = dst_stride[0];
+        vp10_build_masked_inter_predictor_complex(
+            xd, dst_buf[0], dst_stride[0], dst_buf1[0], dst_stride1[0], mi_row,
+            mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_HORZ,
+            0);
+      } else {
+        // First half
+        predict_b_extend(cpi, td, tile, 0, mi_row, mi_col, mi_row, mi_col,
+                         mi_row_top, mi_col_top, dst_buf, dst_stride, top_bsize,
+                         subsize, output_enabled, 0, 0);
+        if (bsize < top_bsize)
+          extend_all(cpi, td, tile, 0, subsize, top_bsize, mi_row, mi_col,
+                     mi_row_top, mi_col_top, output_enabled, dst_buf,
+                     dst_stride);
+        else
+          extend_dir(cpi, td, tile, 0, subsize, top_bsize, mi_row, mi_col,
+                     mi_row_top, mi_col_top, output_enabled, dst_buf,
+                     dst_stride, 0);
+
+        if (mi_row + hbs < cm->mi_rows) {
+          // Second half
+          predict_b_extend(cpi, td, tile, 0, mi_row + hbs, mi_col, mi_row + hbs,
+                           mi_col, mi_row_top, mi_col_top, dst_buf1,
+                           dst_stride1, top_bsize, subsize, output_enabled, 0,
+                           0);
+          if (bsize < top_bsize)
+            extend_all(cpi, td, tile, 0, subsize, top_bsize, mi_row + hbs,
+                       mi_col, mi_row_top, mi_col_top, output_enabled, dst_buf1,
+                       dst_stride1);
+          else
+            extend_dir(cpi, td, tile, 0, subsize, top_bsize, mi_row + hbs,
+                       mi_col, mi_row_top, mi_col_top, output_enabled, dst_buf1,
+                       dst_stride1, 1);
+
+          // Smooth
+          for (i = 0; i < MAX_MB_PLANE; i++) {
+            xd->plane[i].dst.buf = dst_buf[i];
+            xd->plane[i].dst.stride = dst_stride[i];
+            vp10_build_masked_inter_predictor_complex(
+                xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i],
+                mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize,
+                PARTITION_HORZ, i);
+          }
+        }
+      }
+      break;
+    case PARTITION_VERT:
+      if (bsize == BLOCK_8X8) {
+        // First half
+        predict_b_extend(cpi, td, tile, 0, mi_row, mi_col, mi_row, mi_col,
+                         mi_row_top, mi_col_top, dst_buf, dst_stride, top_bsize,
+                         BLOCK_8X8, output_enabled, 1, 0);
+        if (bsize < top_bsize)
+          extend_all(cpi, td, tile, 0, subsize, top_bsize, mi_row, mi_col,
+                     mi_row_top, mi_col_top, output_enabled, dst_buf,
+                     dst_stride);
+
+        // Second half
+        predict_b_extend(cpi, td, tile, 1, mi_row, mi_col, mi_row, mi_col,
+                         mi_row_top, mi_col_top, dst_buf1, dst_stride1,
+                         top_bsize, BLOCK_8X8, output_enabled, 1, 1);
+        if (bsize < top_bsize)
+          extend_all(cpi, td, tile, 1, subsize, top_bsize, mi_row, mi_col,
+                     mi_row_top, mi_col_top, output_enabled, dst_buf1,
+                     dst_stride1);
+
+        // Smooth
+        xd->plane[0].dst.buf = dst_buf[0];
+        xd->plane[0].dst.stride = dst_stride[0];
+        vp10_build_masked_inter_predictor_complex(
+            xd, dst_buf[0], dst_stride[0], dst_buf1[0], dst_stride1[0], mi_row,
+            mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_VERT,
+            0);
+      } else {
+        // bsize: not important, not useful
+        predict_b_extend(cpi, td, tile, 0, mi_row, mi_col, mi_row, mi_col,
+                         mi_row_top, mi_col_top, dst_buf, dst_stride, top_bsize,
+                         subsize, output_enabled, 0, 0);
+        if (bsize < top_bsize)
+          extend_all(cpi, td, tile, 0, subsize, top_bsize, mi_row, mi_col,
+                     mi_row_top, mi_col_top, output_enabled, dst_buf,
+                     dst_stride);
+        else
+          extend_dir(cpi, td, tile, 0, subsize, top_bsize, mi_row, mi_col,
+                     mi_row_top, mi_col_top, output_enabled, dst_buf,
+                     dst_stride, 3);
+
+        if (mi_col + hbs < cm->mi_cols) {
+          predict_b_extend(cpi, td, tile, 0, mi_row, mi_col + hbs, mi_row,
+                           mi_col + hbs, mi_row_top, mi_col_top, dst_buf1,
+                           dst_stride1, top_bsize, subsize, output_enabled, 0,
+                           0);
+          if (bsize < top_bsize)
+            extend_all(cpi, td, tile, 0, subsize, top_bsize, mi_row,
+                       mi_col + hbs, mi_row_top, mi_col_top, output_enabled,
+                       dst_buf1, dst_stride1);
+          else
+            extend_dir(cpi, td, tile, 0, subsize, top_bsize, mi_row,
+                       mi_col + hbs, mi_row_top, mi_col_top, output_enabled,
+                       dst_buf1, dst_stride1, 2);
+
+          for (i = 0; i < MAX_MB_PLANE; i++) {
+            xd->plane[i].dst.buf = dst_buf[i];
+            xd->plane[i].dst.stride = dst_stride[i];
+            vp10_build_masked_inter_predictor_complex(
+                xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i],
+                mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize,
+                PARTITION_VERT, i);
+          }
+        }
+      }
+      break;
+    case PARTITION_SPLIT:
+      if (bsize == BLOCK_8X8) {
+        predict_b_extend(cpi, td, tile, 0, mi_row, mi_col, mi_row, mi_col,
+                         mi_row_top, mi_col_top, dst_buf, dst_stride, top_bsize,
+                         BLOCK_8X8, output_enabled, 1, 0);
+        predict_b_extend(cpi, td, tile, 1, mi_row, mi_col, mi_row, mi_col,
+                         mi_row_top, mi_col_top, dst_buf1, dst_stride1,
+                         top_bsize, BLOCK_8X8, output_enabled, 1, 1);
+        predict_b_extend(cpi, td, tile, 2, mi_row, mi_col, mi_row, mi_col,
+                         mi_row_top, mi_col_top, dst_buf2, dst_stride2,
+                         top_bsize, BLOCK_8X8, output_enabled, 1, 1);
+        predict_b_extend(cpi, td, tile, 3, mi_row, mi_col, mi_row, mi_col,
+                         mi_row_top, mi_col_top, dst_buf3, dst_stride3,
+                         top_bsize, BLOCK_8X8, output_enabled, 1, 1);
+
+        if (bsize < top_bsize) {
+          extend_all(cpi, td, tile, 0, subsize, top_bsize, mi_row, mi_col,
+                     mi_row_top, mi_col_top, output_enabled, dst_buf,
+                     dst_stride);
+          extend_all(cpi, td, tile, 1, subsize, top_bsize, mi_row, mi_col,
+                     mi_row_top, mi_col_top, output_enabled, dst_buf1,
+                     dst_stride1);
+          extend_all(cpi, td, tile, 2, subsize, top_bsize, mi_row, mi_col,
+                     mi_row_top, mi_col_top, output_enabled, dst_buf2,
+                     dst_stride2);
+          extend_all(cpi, td, tile, 3, subsize, top_bsize, mi_row, mi_col,
+                     mi_row_top, mi_col_top, output_enabled, dst_buf3,
+                     dst_stride3);
+        }
+      } else {
+        predict_sb_complex(cpi, td, tile, mi_row, mi_col, mi_row_top,
+                           mi_col_top, output_enabled, subsize, top_bsize,
+                           dst_buf, dst_stride, pc_tree->split[0]);
+        if (mi_row < cm->mi_rows && mi_col + hbs < cm->mi_cols)
+          predict_sb_complex(cpi, td, tile, mi_row, mi_col + hbs, mi_row_top,
+                             mi_col_top, output_enabled, subsize, top_bsize,
+                             dst_buf1, dst_stride1, pc_tree->split[1]);
+        if (mi_row + hbs < cm->mi_rows && mi_col < cm->mi_cols)
+          predict_sb_complex(cpi, td, tile, mi_row + hbs, mi_col, mi_row_top,
+                             mi_col_top, output_enabled, subsize, top_bsize,
+                             dst_buf2, dst_stride2, pc_tree->split[2]);
+        if (mi_row + hbs < cm->mi_rows && mi_col + hbs < cm->mi_cols)
+          predict_sb_complex(cpi, td, tile, mi_row + hbs, mi_col + hbs,
+                             mi_row_top, mi_col_top, output_enabled, subsize,
+                             top_bsize, dst_buf3, dst_stride3,
+                             pc_tree->split[3]);
+      }
+      for (i = 0; i < MAX_MB_PLANE; i++) {
+        if (bsize == BLOCK_8X8 && i != 0)
+          continue;  // Skip <4x4 chroma smoothing
+        if (mi_row < cm->mi_rows && mi_col + hbs < cm->mi_cols) {
+          vp10_build_masked_inter_predictor_complex(
+              xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i],
+              mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize,
+              PARTITION_VERT, i);
+          if (mi_row + hbs < cm->mi_rows) {
+            vp10_build_masked_inter_predictor_complex(
+                xd, dst_buf2[i], dst_stride2[i], dst_buf3[i], dst_stride3[i],
+                mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize,
+                PARTITION_VERT, i);
+            vp10_build_masked_inter_predictor_complex(
+                xd, dst_buf[i], dst_stride[i], dst_buf2[i], dst_stride2[i],
+                mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize,
+                PARTITION_HORZ, i);
+          }
+        } else if (mi_row + hbs < cm->mi_rows && mi_col < cm->mi_cols) {
+          vp10_build_masked_inter_predictor_complex(
+              xd, dst_buf[i], dst_stride[i], dst_buf2[i], dst_stride2[i],
+              mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize,
+              PARTITION_HORZ, i);
+        }
+      }
+      break;
+#if CONFIG_EXT_PARTITION_TYPES
+    case PARTITION_HORZ_A:
+      predict_b_extend(cpi, td, tile, 0, mi_row, mi_col, mi_row, mi_col,
+                       mi_row_top, mi_col_top, dst_buf, dst_stride, top_bsize,
+                       bsize2, output_enabled, 0, 0);
+      extend_all(cpi, td, tile, 0, bsize2, top_bsize, mi_row, mi_col,
+                 mi_row_top, mi_col_top, output_enabled, dst_buf, dst_stride);
+
+      predict_b_extend(cpi, td, tile, 0, mi_row, mi_col + hbs, mi_row,
+                       mi_col + hbs, mi_row_top, mi_col_top, dst_buf1,
+                       dst_stride1, top_bsize, bsize2, output_enabled, 0, 0);
+      extend_all(cpi, td, tile, 0, bsize2, top_bsize, mi_row, mi_col + hbs,
+                 mi_row_top, mi_col_top, output_enabled, dst_buf1, dst_stride1);
+
+      predict_b_extend(cpi, td, tile, 0, mi_row + hbs, mi_col, mi_row + hbs,
+                       mi_col, mi_row_top, mi_col_top, dst_buf2, dst_stride2,
+                       top_bsize, subsize, output_enabled, 0, 0);
+      if (bsize < top_bsize)
+        extend_all(cpi, td, tile, 0, subsize, top_bsize, mi_row + hbs, mi_col,
+                   mi_row_top, mi_col_top, output_enabled, dst_buf2,
+                   dst_stride2);
+      else
+        extend_dir(cpi, td, tile, 0, subsize, top_bsize, mi_row + hbs, mi_col,
+                   mi_row_top, mi_col_top, output_enabled, dst_buf2,
+                   dst_stride2, 1);
+
+      for (i = 0; i < MAX_MB_PLANE; i++) {
+        xd->plane[i].dst.buf = dst_buf[i];
+        xd->plane[i].dst.stride = dst_stride[i];
+        vp10_build_masked_inter_predictor_complex(
+            xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i], mi_row,
+            mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_VERT,
+            i);
+      }
+      for (i = 0; i < MAX_MB_PLANE; i++) {
+        vp10_build_masked_inter_predictor_complex(
+            xd, dst_buf[i], dst_stride[i], dst_buf2[i], dst_stride2[i], mi_row,
+            mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_HORZ,
+            i);
+      }
+
+      break;
+    case PARTITION_VERT_A:
+
+      predict_b_extend(cpi, td, tile, 0, mi_row, mi_col, mi_row, mi_col,
+                       mi_row_top, mi_col_top, dst_buf, dst_stride, top_bsize,
+                       bsize2, output_enabled, 0, 0);
+      extend_all(cpi, td, tile, 0, bsize2, top_bsize, mi_row, mi_col,
+                 mi_row_top, mi_col_top, output_enabled, dst_buf, dst_stride);
+
+      predict_b_extend(cpi, td, tile, 0, mi_row + hbs, mi_col, mi_row + hbs,
+                       mi_col, mi_row_top, mi_col_top, dst_buf1, dst_stride1,
+                       top_bsize, bsize2, output_enabled, 0, 0);
+      extend_all(cpi, td, tile, 0, bsize2, top_bsize, mi_row + hbs, mi_col,
+                 mi_row_top, mi_col_top, output_enabled, dst_buf1, dst_stride1);
+
+      predict_b_extend(cpi, td, tile, 0, mi_row, mi_col + hbs, mi_row,
+                       mi_col + hbs, mi_row_top, mi_col_top, dst_buf2,
+                       dst_stride2, top_bsize, subsize, output_enabled, 0, 0);
+      if (bsize < top_bsize)
+        extend_all(cpi, td, tile, 0, subsize, top_bsize, mi_row, mi_col + hbs,
+                   mi_row_top, mi_col_top, output_enabled, dst_buf2,
+                   dst_stride2);
+      else
+        extend_dir(cpi, td, tile, 0, subsize, top_bsize, mi_row, mi_col + hbs,
+                   mi_row_top, mi_col_top, output_enabled, dst_buf2,
+                   dst_stride2, 2);
+
+      for (i = 0; i < MAX_MB_PLANE; i++) {
+        xd->plane[i].dst.buf = dst_buf[i];
+        xd->plane[i].dst.stride = dst_stride[i];
+        vp10_build_masked_inter_predictor_complex(
+            xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i], mi_row,
+            mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_HORZ,
+            i);
+      }
+      for (i = 0; i < MAX_MB_PLANE; i++) {
+        vp10_build_masked_inter_predictor_complex(
+            xd, dst_buf[i], dst_stride[i], dst_buf2[i], dst_stride2[i], mi_row,
+            mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_VERT,
+            i);
+      }
+      break;
+    case PARTITION_HORZ_B:
+
+      predict_b_extend(cpi, td, tile, 0, mi_row, mi_col, mi_row, mi_col,
+                       mi_row_top, mi_col_top, dst_buf, dst_stride, top_bsize,
+                       subsize, output_enabled, 0, 0);
+      if (bsize < top_bsize)
+        extend_all(cpi, td, tile, 0, subsize, top_bsize, mi_row, mi_col,
+                   mi_row_top, mi_col_top, output_enabled, dst_buf, dst_stride);
+      else
+        extend_dir(cpi, td, tile, 0, subsize, top_bsize, mi_row, mi_col,
+                   mi_row_top, mi_col_top, output_enabled, dst_buf, dst_stride,
+                   0);
+
+      predict_b_extend(cpi, td, tile, 0, mi_row + hbs, mi_col, mi_row + hbs,
+                       mi_col, mi_row_top, mi_col_top, dst_buf1, dst_stride1,
+                       top_bsize, bsize2, output_enabled, 0, 0);
+      extend_all(cpi, td, tile, 0, bsize2, top_bsize, mi_row + hbs, mi_col,
+                 mi_row_top, mi_col_top, output_enabled, dst_buf1, dst_stride1);
+
+      predict_b_extend(cpi, td, tile, 0, mi_row + hbs, mi_col + hbs,
+                       mi_row + hbs, mi_col + hbs, mi_row_top, mi_col_top,
+                       dst_buf2, dst_stride2, top_bsize, bsize2, output_enabled,
+                       0, 0);
+      extend_all(cpi, td, tile, 0, bsize2, top_bsize, mi_row + hbs,
+                 mi_col + hbs, mi_row_top, mi_col_top, output_enabled, dst_buf2,
+                 dst_stride2);
+
+      for (i = 0; i < MAX_MB_PLANE; i++) {
+        xd->plane[i].dst.buf = dst_buf1[i];
+        xd->plane[i].dst.stride = dst_stride1[i];
+        vp10_build_masked_inter_predictor_complex(
+            xd, dst_buf1[i], dst_stride1[i], dst_buf2[i], dst_stride2[i],
+            mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize,
+            PARTITION_VERT, i);
+      }
+      for (i = 0; i < MAX_MB_PLANE; i++) {
+        xd->plane[i].dst.buf = dst_buf[i];
+        xd->plane[i].dst.stride = dst_stride[i];
+        vp10_build_masked_inter_predictor_complex(
+            xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i], mi_row,
+            mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_HORZ,
+            i);
+      }
+      break;
+    case PARTITION_VERT_B:
+
+      predict_b_extend(cpi, td, tile, 0, mi_row, mi_col, mi_row, mi_col,
+                       mi_row_top, mi_col_top, dst_buf, dst_stride, top_bsize,
+                       subsize, output_enabled, 0, 0);
+      if (bsize < top_bsize)
+        extend_all(cpi, td, tile, 0, subsize, top_bsize, mi_row, mi_col,
+                   mi_row_top, mi_col_top, output_enabled, dst_buf, dst_stride);
+      else
+        extend_dir(cpi, td, tile, 0, subsize, top_bsize, mi_row, mi_col,
+                   mi_row_top, mi_col_top, output_enabled, dst_buf, dst_stride,
+                   3);
+
+      predict_b_extend(cpi, td, tile, 0, mi_row, mi_col + hbs, mi_row,
+                       mi_col + hbs, mi_row_top, mi_col_top, dst_buf1,
+                       dst_stride1, top_bsize, bsize2, output_enabled, 0, 0);
+      extend_all(cpi, td, tile, 0, bsize2, top_bsize, mi_row, mi_col + hbs,
+                 mi_row_top, mi_col_top, output_enabled, dst_buf1, dst_stride1);
+
+      predict_b_extend(cpi, td, tile, 0, mi_row + hbs, mi_col + hbs,
+                       mi_row + hbs, mi_col + hbs, mi_row_top, mi_col_top,
+                       dst_buf2, dst_stride2, top_bsize, bsize2, output_enabled,
+                       0, 0);
+      extend_all(cpi, td, tile, 0, bsize2, top_bsize, mi_row + hbs,
+                 mi_col + hbs, mi_row_top, mi_col_top, output_enabled, dst_buf2,
+                 dst_stride2);
+
+      for (i = 0; i < MAX_MB_PLANE; i++) {
+        xd->plane[i].dst.buf = dst_buf1[i];
+        xd->plane[i].dst.stride = dst_stride1[i];
+        vp10_build_masked_inter_predictor_complex(
+            xd, dst_buf1[i], dst_stride1[i], dst_buf2[i], dst_stride2[i],
+            mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize,
+            PARTITION_HORZ, i);
+      }
+      for (i = 0; i < MAX_MB_PLANE; i++) {
+        xd->plane[i].dst.buf = dst_buf[i];
+        xd->plane[i].dst.stride = dst_stride[i];
+        vp10_build_masked_inter_predictor_complex(
+            xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i], mi_row,
+            mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_VERT,
+            i);
+      }
+      break;
+#endif  // CONFIG_EXT_PARTITION_TYPES
+    default: assert(0);
+  }
+
+#if CONFIG_EXT_PARTITION_TYPES
+  if (bsize < top_bsize)
+    update_ext_partition_context(xd, mi_row, mi_col, subsize, bsize, partition);
+#else
+  if (bsize < top_bsize && (partition != PARTITION_SPLIT || bsize == BLOCK_8X8))
+    update_partition_context(xd, mi_row, mi_col, subsize, bsize);
+#endif  // CONFIG_EXT_PARTITION_TYPES
+}
+
+static void rd_supertx_sb(VP10_COMP *cpi, ThreadData *td,
+                          const TileInfo *const tile, int mi_row, int mi_col,
+                          BLOCK_SIZE bsize, int *tmp_rate, int64_t *tmp_dist,
+                          TX_TYPE *best_tx, PC_TREE *pc_tree) {
+  VP10_COMMON *const cm = &cpi->common;
+  MACROBLOCK *const x = &td->mb;
+  MACROBLOCKD *const xd = &x->e_mbd;
+  int plane, pnskip, skippable, skippable_uv, rate_uv, this_rate,
+      base_rate = *tmp_rate;
+  int64_t sse, pnsse, sse_uv, this_dist, dist_uv;
+  uint8_t *dst_buf[3];
+  int dst_stride[3];
+  TX_SIZE tx_size;
+  MB_MODE_INFO *mbmi;
+  TX_TYPE tx_type, best_tx_nostx;
+#if CONFIG_EXT_TX
+  int ext_tx_set;
+#endif  // CONFIG_EXT_TX
+  int tmp_rate_tx = 0, skip_tx = 0;
+  int64_t tmp_dist_tx = 0, rd_tx, bestrd_tx = INT64_MAX;
+
+  set_skip_context(xd, mi_row, mi_col);
+  set_mode_info_offsets(cpi, x, xd, mi_row, mi_col);
+  update_state_sb_supertx(cpi, td, tile, mi_row, mi_col, bsize, 0, pc_tree);
+  vp10_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
+  for (plane = 0; plane < MAX_MB_PLANE; plane++) {
+    dst_buf[plane] = xd->plane[plane].dst.buf;
+    dst_stride[plane] = xd->plane[plane].dst.stride;
+  }
+  predict_sb_complex(cpi, td, tile, mi_row, mi_col, mi_row, mi_col, 0, bsize,
+                     bsize, dst_buf, dst_stride, pc_tree);
+
+  set_offsets_without_segment_id(cpi, tile, x, mi_row, mi_col, bsize);
+  set_segment_id_supertx(cpi, x, mi_row, mi_col, bsize);
+
+  mbmi = &xd->mi[0]->mbmi;
+  best_tx_nostx = mbmi->tx_type;
+
+  *best_tx = DCT_DCT;
+
+  // chroma
+  skippable_uv = 1;
+  rate_uv = 0;
+  dist_uv = 0;
+  sse_uv = 0;
+  for (plane = 1; plane < MAX_MB_PLANE; ++plane) {
+#if CONFIG_VAR_TX
+    ENTROPY_CONTEXT ctxa[2 * MAX_MIB_SIZE];
+    ENTROPY_CONTEXT ctxl[2 * MAX_MIB_SIZE];
+    const struct macroblockd_plane *const pd = &xd->plane[plane];
+    int coeff_ctx = 1;
+
+    this_rate = 0;
+    this_dist = 0;
+    pnsse = 0;
+    pnskip = 1;
+
+    tx_size = max_txsize_lookup[bsize];
+    tx_size = get_uv_tx_size_impl(tx_size, bsize, cm->subsampling_x,
+                                  cm->subsampling_y);
+    vp10_get_entropy_contexts(bsize, tx_size, pd, ctxa, ctxl);
+    coeff_ctx = combine_entropy_contexts(ctxa[0], ctxl[0]);
+
+    vp10_subtract_plane(x, bsize, plane);
+    vp10_tx_block_rd_b(cpi, x, tx_size, 0, 0, plane, 0,
+                       get_plane_block_size(bsize, pd), coeff_ctx, &this_rate,
+                       &this_dist, &pnsse, &pnskip);
+#else
+    tx_size = max_txsize_lookup[bsize];
+    tx_size = get_uv_tx_size_impl(tx_size, bsize, cm->subsampling_x,
+                                  cm->subsampling_y);
+    vp10_subtract_plane(x, bsize, plane);
+    vp10_txfm_rd_in_plane_supertx(x, cpi, &this_rate, &this_dist, &pnskip,
+                                  &pnsse, INT64_MAX, plane, bsize, tx_size, 0);
+#endif  // CONFIG_VAR_TX
+
+    rate_uv += this_rate;
+    dist_uv += this_dist;
+    sse_uv += pnsse;
+    skippable_uv &= pnskip;
+  }
+
+  // luma
+  tx_size = max_txsize_lookup[bsize];
+  vp10_subtract_plane(x, bsize, 0);
+#if CONFIG_EXT_TX
+  ext_tx_set = get_ext_tx_set(tx_size, bsize, 1);
+#endif  // CONFIG_EXT_TX
+  for (tx_type = DCT_DCT; tx_type < TX_TYPES; ++tx_type) {
+#if CONFIG_VAR_TX
+    ENTROPY_CONTEXT ctxa[2 * MAX_MIB_SIZE];
+    ENTROPY_CONTEXT ctxl[2 * MAX_MIB_SIZE];
+    const struct macroblockd_plane *const pd = &xd->plane[0];
+    int coeff_ctx = 1;
+#endif  // CONFIG_VAR_TX
+#if CONFIG_EXT_TX
+    if (!ext_tx_used_inter[ext_tx_set][tx_type]) continue;
+#else
+    if (tx_size >= TX_32X32 && tx_type != DCT_DCT) continue;
+#endif  // CONFIG_EXT_TX
+    mbmi->tx_type = tx_type;
+
+#if CONFIG_VAR_TX
+    this_rate = 0;
+    this_dist = 0;
+    pnsse = 0;
+    pnskip = 1;
+
+    vp10_get_entropy_contexts(bsize, tx_size, pd, ctxa, ctxl);
+    coeff_ctx = combine_entropy_contexts(ctxa[0], ctxl[0]);
+    vp10_tx_block_rd_b(cpi, x, tx_size, 0, 0, 0, 0, bsize, coeff_ctx,
+                       &this_rate, &this_dist, &pnsse, &pnskip);
+#else
+    vp10_txfm_rd_in_plane_supertx(x, cpi, &this_rate, &this_dist, &pnskip,
+                                  &pnsse, INT64_MAX, 0, bsize, tx_size, 0);
+#endif  // CONFIG_VAR_TX
+
+#if CONFIG_EXT_TX
+    if (get_ext_tx_types(tx_size, bsize, 1) > 1 &&
+        !xd->lossless[xd->mi[0]->mbmi.segment_id] && this_rate != INT_MAX) {
+      if (ext_tx_set > 0)
+        this_rate +=
+            cpi->inter_tx_type_costs[ext_tx_set][mbmi->tx_size][mbmi->tx_type];
+    }
+#else
+    if (tx_size < TX_32X32 && !xd->lossless[xd->mi[0]->mbmi.segment_id] &&
+        this_rate != INT_MAX) {
+      this_rate += cpi->inter_tx_type_costs[tx_size][mbmi->tx_type];
+    }
+#endif  // CONFIG_EXT_TX
+    *tmp_rate = rate_uv + this_rate;
+    *tmp_dist = dist_uv + this_dist;
+    sse = sse_uv + pnsse;
+    skippable = skippable_uv && pnskip;
+    if (skippable) {
+      *tmp_rate = vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1);
+      x->skip = 1;
+    } else {
+      if (RDCOST(x->rdmult, x->rddiv, *tmp_rate, *tmp_dist) <
+          RDCOST(x->rdmult, x->rddiv, 0, sse)) {
+        *tmp_rate += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0);
+        x->skip = 0;
+      } else {
+        *tmp_dist = sse;
+        *tmp_rate = vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1);
+        x->skip = 1;
+      }
+    }
+    *tmp_rate += base_rate;
+    rd_tx = RDCOST(x->rdmult, x->rddiv, *tmp_rate, *tmp_dist);
+    if (rd_tx < bestrd_tx * 0.99 || tx_type == DCT_DCT) {
+      *best_tx = tx_type;
+      bestrd_tx = rd_tx;
+      tmp_rate_tx = *tmp_rate;
+      tmp_dist_tx = *tmp_dist;
+      skip_tx = x->skip;
+    }
+  }
+  *tmp_rate = tmp_rate_tx;
+  *tmp_dist = tmp_dist_tx;
+  x->skip = skip_tx;
+#if CONFIG_VAR_TX
+  for (plane = 0; plane < 1; ++plane)
+    memset(x->blk_skip[plane], x->skip,
+           sizeof(uint8_t) * pc_tree->none.num_4x4_blk);
+#endif  // CONFIG_VAR_TX
+  xd->mi[0]->mbmi.tx_type = best_tx_nostx;
+}
+#endif  // CONFIG_SUPERTX
diff --git a/av1/encoder/encodeframe.h b/av1/encoder/encodeframe.h
new file mode 100644
index 0000000..338cb86
--- /dev/null
+++ b/av1/encoder/encodeframe.h
@@ -0,0 +1,48 @@
+/*
+ *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP10_ENCODER_ENCODEFRAME_H_
+#define VP10_ENCODER_ENCODEFRAME_H_
+
+#include "aom/vpx_integer.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct macroblock;
+struct yv12_buffer_config;
+struct VP10_COMP;
+struct ThreadData;
+
+// Constants used in SOURCE_VAR_BASED_PARTITION
+#define VAR_HIST_MAX_BG_VAR 1000
+#define VAR_HIST_FACTOR 10
+#define VAR_HIST_BINS (VAR_HIST_MAX_BG_VAR / VAR_HIST_FACTOR + 1)
+#define VAR_HIST_LARGE_CUT_OFF 75
+#define VAR_HIST_SMALL_CUT_OFF 45
+
+void vp10_setup_src_planes(struct macroblock *x,
+                           const struct yv12_buffer_config *src, int mi_row,
+                           int mi_col);
+
+void vp10_encode_frame(struct VP10_COMP *cpi);
+
+void vp10_init_tile_data(struct VP10_COMP *cpi);
+void vp10_encode_tile(struct VP10_COMP *cpi, struct ThreadData *td,
+                      int tile_row, int tile_col);
+
+void vp10_set_variance_partition_thresholds(struct VP10_COMP *cpi, int q);
+
+#ifdef __cplusplus
+}  // extern "C"
+#endif
+
+#endif  // VP10_ENCODER_ENCODEFRAME_H_
diff --git a/av1/encoder/encodemb.c b/av1/encoder/encodemb.c
new file mode 100644
index 0000000..e72db2d
--- /dev/null
+++ b/av1/encoder/encodemb.c
@@ -0,0 +1,1158 @@
+/*
+ *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "./vp10_rtcd.h"
+#include "./vpx_config.h"
+#include "./vpx_dsp_rtcd.h"
+
+#include "aom_dsp/quantize.h"
+#include "aom_mem/vpx_mem.h"
+#include "aom_ports/mem.h"
+
+#include "av1/common/idct.h"
+#include "av1/common/reconinter.h"
+#include "av1/common/reconintra.h"
+#include "av1/common/scan.h"
+
+#include "av1/encoder/encodemb.h"
+#include "av1/encoder/hybrid_fwd_txfm.h"
+#include "av1/encoder/quantize.h"
+#include "av1/encoder/rd.h"
+#include "av1/encoder/tokenize.h"
+
+void vp10_subtract_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) {
+  struct macroblock_plane *const p = &x->plane[plane];
+  const struct macroblockd_plane *const pd = &x->e_mbd.plane[plane];
+  const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd);
+  const int bw = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
+  const int bh = 4 * num_4x4_blocks_high_lookup[plane_bsize];
+
+#if CONFIG_VP9_HIGHBITDEPTH
+  if (x->e_mbd.cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+    vpx_highbd_subtract_block(bh, bw, p->src_diff, bw, p->src.buf,
+                              p->src.stride, pd->dst.buf, pd->dst.stride,
+                              x->e_mbd.bd);
+    return;
+  }
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+  vpx_subtract_block(bh, bw, p->src_diff, bw, p->src.buf, p->src.stride,
+                     pd->dst.buf, pd->dst.stride);
+}
+
+typedef struct vp10_token_state {
+  int rate;
+  int64_t error;
+  int next;
+  int16_t token;
+  tran_low_t qc;
+  tran_low_t dqc;
+} vp10_token_state;
+
+// These numbers are empirically obtained.
+static const int plane_rd_mult[REF_TYPES][PLANE_TYPES] = {
+  { 10, 6 }, { 8, 5 },
+};
+
+#define UPDATE_RD_COST()                             \
+  {                                                  \
+    rd_cost0 = RDCOST(rdmult, rddiv, rate0, error0); \
+    rd_cost1 = RDCOST(rdmult, rddiv, rate1, error1); \
+  }
+
+int vp10_optimize_b(MACROBLOCK *mb, int plane, int block, TX_SIZE tx_size,
+                    int ctx) {
+  MACROBLOCKD *const xd = &mb->e_mbd;
+  struct macroblock_plane *const p = &mb->plane[plane];
+  struct macroblockd_plane *const pd = &xd->plane[plane];
+  const int ref = is_inter_block(&xd->mi[0]->mbmi);
+  vp10_token_state tokens[MAX_TX_SQUARE + 1][2];
+  unsigned best_index[MAX_TX_SQUARE + 1][2];
+  uint8_t token_cache[MAX_TX_SQUARE];
+  const tran_low_t *const coeff = BLOCK_OFFSET(mb->plane[plane].coeff, block);
+  tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block);
+  tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
+  const int eob = p->eobs[block];
+  const PLANE_TYPE type = pd->plane_type;
+  const int default_eob = get_tx2d_size(tx_size);
+  const int16_t *const dequant_ptr = pd->dequant;
+  const uint8_t *const band_translate = get_band_translate(tx_size);
+  TX_TYPE tx_type = get_tx_type(type, xd, block, tx_size);
+  const scan_order *const so =
+      get_scan(tx_size, tx_type, is_inter_block(&xd->mi[0]->mbmi));
+  const int16_t *const scan = so->scan;
+  const int16_t *const nb = so->neighbors;
+#if CONFIG_AOM_QM
+  int seg_id = xd->mi[0]->mbmi.segment_id;
+  int is_intra = !is_inter_block(&xd->mi[0]->mbmi);
+  const qm_val_t *iqmatrix = pd->seg_iqmatrix[seg_id][is_intra][tx_size];
+#endif
+  const int shift = get_tx_scale(xd, tx_type, tx_size);
+#if CONFIG_NEW_QUANT
+  int dq = get_dq_profile_from_ctx(ctx);
+  const dequant_val_type_nuq *dequant_val = pd->dequant_val_nuq[dq];
+#else
+  const int dq_step[2] = { dequant_ptr[0] >> shift, dequant_ptr[1] >> shift };
+#endif  // CONFIG_NEW_QUANT
+  int next = eob, sz = 0;
+  const int64_t rdmult = (mb->rdmult * plane_rd_mult[ref][type]) >> 1;
+  const int64_t rddiv = mb->rddiv;
+  int64_t rd_cost0, rd_cost1;
+  int rate0, rate1;
+  int64_t error0, error1;
+  int16_t t0, t1;
+  int best, band = (eob < default_eob) ? band_translate[eob]
+                                       : band_translate[eob - 1];
+  int pt, i, final_eob;
+#if CONFIG_VP9_HIGHBITDEPTH
+  const int *cat6_high_cost = vp10_get_high_cost_table(xd->bd);
+#else
+  const int *cat6_high_cost = vp10_get_high_cost_table(8);
+#endif
+  unsigned int(*token_costs)[2][COEFF_CONTEXTS][ENTROPY_TOKENS] =
+      mb->token_costs[txsize_sqr_map[tx_size]][type][ref];
+  const uint16_t *band_counts = &band_count_table[tx_size][band];
+  uint16_t band_left = eob - band_cum_count_table[tx_size][band] + 1;
+  int shortcut = 0;
+  int next_shortcut = 0;
+
+  token_costs += band;
+
+  assert((!type && !plane) || (type && plane));
+  assert(eob <= default_eob);
+
+  /* Now set up a Viterbi trellis to evaluate alternative roundings. */
+  /* Initialize the sentinel node of the trellis. */
+  tokens[eob][0].rate = 0;
+  tokens[eob][0].error = 0;
+  tokens[eob][0].next = default_eob;
+  tokens[eob][0].token = EOB_TOKEN;
+  tokens[eob][0].qc = 0;
+  tokens[eob][1] = tokens[eob][0];
+
+  for (i = 0; i < eob; i++) {
+    const int rc = scan[i];
+    tokens[i][0].rate = vp10_get_token_cost(qcoeff[rc], &t0, cat6_high_cost);
+    tokens[i][0].token = t0;
+    token_cache[rc] = vp10_pt_energy_class[t0];
+  }
+
+  for (i = eob; i-- > 0;) {
+    int base_bits, dx;
+    int64_t d2;
+    const int rc = scan[i];
+#if CONFIG_AOM_QM
+    int iwt = iqmatrix[rc];
+#endif
+    int x = qcoeff[rc];
+    next_shortcut = shortcut;
+
+    /* Only add a trellis state for non-zero coefficients. */
+    if (UNLIKELY(x)) {
+      error0 = tokens[next][0].error;
+      error1 = tokens[next][1].error;
+      /* Evaluate the first possibility for this state. */
+      rate0 = tokens[next][0].rate;
+      rate1 = tokens[next][1].rate;
+
+      if (next_shortcut) {
+        /* Consider both possible successor states. */
+        if (next < default_eob) {
+          pt = get_coef_context(nb, token_cache, i + 1);
+          rate0 += (*token_costs)[0][pt][tokens[next][0].token];
+          rate1 += (*token_costs)[0][pt][tokens[next][1].token];
+        }
+        UPDATE_RD_COST();
+        /* And pick the best. */
+        best = rd_cost1 < rd_cost0;
+      } else {
+        if (next < default_eob) {
+          pt = get_coef_context(nb, token_cache, i + 1);
+          rate0 += (*token_costs)[0][pt][tokens[next][0].token];
+        }
+        best = 0;
+      }
+
+      dx = (dqcoeff[rc] - coeff[rc]) * (1 << shift);
+#if CONFIG_VP9_HIGHBITDEPTH
+      if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+        dx >>= xd->bd - 8;
+      }
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+      d2 = (int64_t)dx * dx;
+      tokens[i][0].rate += (best ? rate1 : rate0);
+      tokens[i][0].error = d2 + (best ? error1 : error0);
+      tokens[i][0].next = next;
+      tokens[i][0].qc = x;
+      tokens[i][0].dqc = dqcoeff[rc];
+      best_index[i][0] = best;
+
+      /* Evaluate the second possibility for this state. */
+      rate0 = tokens[next][0].rate;
+      rate1 = tokens[next][1].rate;
+
+      // The threshold of 3 is empirically obtained.
+      if (UNLIKELY(abs(x) > 3)) {
+        shortcut = 0;
+      } else {
+#if CONFIG_NEW_QUANT
+        shortcut = ((vp10_dequant_abscoeff_nuq(abs(x), dequant_ptr[rc != 0],
+                                               dequant_val[band_translate[i]]) >
+                     (abs(coeff[rc]) << shift)) &&
+                    (vp10_dequant_abscoeff_nuq(abs(x) - 1, dequant_ptr[rc != 0],
+                                               dequant_val[band_translate[i]]) <
+                     (abs(coeff[rc]) << shift)));
+#else  // CONFIG_NEW_QUANT
+#if CONFIG_AOM_QM
+        if ((abs(x) * dequant_ptr[rc != 0] * iwt >
+             ((abs(coeff[rc]) << shift) << AOM_QM_BITS)) &&
+            (abs(x) * dequant_ptr[rc != 0] * iwt <
+             (((abs(coeff[rc]) << shift) + dequant_ptr[rc != 0])
+              << AOM_QM_BITS)))
+#else
+        if ((abs(x) * dequant_ptr[rc != 0] > (abs(coeff[rc]) << shift)) &&
+            (abs(x) * dequant_ptr[rc != 0] <
+             (abs(coeff[rc]) << shift) + dequant_ptr[rc != 0]))
+#endif  // CONFIG_AOM_QM
+          shortcut = 1;
+        else
+          shortcut = 0;
+#endif  // CONFIG_NEW_QUANT
+      }
+
+      if (shortcut) {
+        sz = -(x < 0);
+        x -= 2 * sz + 1;
+      } else {
+        tokens[i][1] = tokens[i][0];
+        best_index[i][1] = best_index[i][0];
+        next = i;
+
+        if (UNLIKELY(!(--band_left))) {
+          --band_counts;
+          band_left = *band_counts;
+          --token_costs;
+        }
+        continue;
+      }
+
+      /* Consider both possible successor states. */
+      if (!x) {
+        /* If we reduced this coefficient to zero, check to see if
+         *  we need to move the EOB back here.
+         */
+        t0 = tokens[next][0].token == EOB_TOKEN ? EOB_TOKEN : ZERO_TOKEN;
+        t1 = tokens[next][1].token == EOB_TOKEN ? EOB_TOKEN : ZERO_TOKEN;
+        base_bits = 0;
+      } else {
+        base_bits = vp10_get_token_cost(x, &t0, cat6_high_cost);
+        t1 = t0;
+      }
+
+      if (next_shortcut) {
+        if (LIKELY(next < default_eob)) {
+          if (t0 != EOB_TOKEN) {
+            token_cache[rc] = vp10_pt_energy_class[t0];
+            pt = get_coef_context(nb, token_cache, i + 1);
+            rate0 += (*token_costs)[!x][pt][tokens[next][0].token];
+          }
+          if (t1 != EOB_TOKEN) {
+            token_cache[rc] = vp10_pt_energy_class[t1];
+            pt = get_coef_context(nb, token_cache, i + 1);
+            rate1 += (*token_costs)[!x][pt][tokens[next][1].token];
+          }
+        }
+
+        UPDATE_RD_COST();
+        /* And pick the best. */
+        best = rd_cost1 < rd_cost0;
+      } else {
+        // The two states in next stage are identical.
+        if (next < default_eob && t0 != EOB_TOKEN) {
+          token_cache[rc] = vp10_pt_energy_class[t0];
+          pt = get_coef_context(nb, token_cache, i + 1);
+          rate0 += (*token_costs)[!x][pt][tokens[next][0].token];
+        }
+        best = 0;
+      }
+
+#if CONFIG_NEW_QUANT
+      dx = vp10_dequant_coeff_nuq(x, dequant_ptr[rc != 0],
+                                  dequant_val[band_translate[i]]) -
+           (coeff[rc] << shift);
+#if CONFIG_VP9_HIGHBITDEPTH
+      if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+        dx >>= xd->bd - 8;
+      }
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+#else   // CONFIG_NEW_QUANT
+#if CONFIG_VP9_HIGHBITDEPTH
+      if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+        dx -= ((dequant_ptr[rc != 0] >> (xd->bd - 8)) + sz) ^ sz;
+      } else {
+        dx -= (dequant_ptr[rc != 0] + sz) ^ sz;
+      }
+#else
+      dx -= (dequant_ptr[rc != 0] + sz) ^ sz;
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_NEW_QUANT
+      d2 = (int64_t)dx * dx;
+
+      tokens[i][1].rate = base_bits + (best ? rate1 : rate0);
+      tokens[i][1].error = d2 + (best ? error1 : error0);
+      tokens[i][1].next = next;
+      tokens[i][1].token = best ? t1 : t0;
+      tokens[i][1].qc = x;
+
+      if (x) {
+#if CONFIG_NEW_QUANT
+        tokens[i][1].dqc = vp10_dequant_abscoeff_nuq(
+            abs(x), dequant_ptr[rc != 0], dequant_val[band_translate[i]]);
+        tokens[i][1].dqc = shift ? ROUND_POWER_OF_TWO(tokens[i][1].dqc, shift)
+                                 : tokens[i][1].dqc;
+        if (sz) tokens[i][1].dqc = -tokens[i][1].dqc;
+#else
+        tran_low_t offset = dq_step[rc != 0];
+        // The 32x32 transform coefficient uses half quantization step size.
+        // Account for the rounding difference in the dequantized coefficeint
+        // value when the quantization index is dropped from an even number
+        // to an odd number.
+        if (shift & x) offset += (dequant_ptr[rc != 0] & 0x01);
+
+        if (sz == 0)
+          tokens[i][1].dqc = dqcoeff[rc] - offset;
+        else
+          tokens[i][1].dqc = dqcoeff[rc] + offset;
+#endif  // CONFIG_NEW_QUANT
+      } else {
+        tokens[i][1].dqc = 0;
+      }
+
+      best_index[i][1] = best;
+      /* Finally, make this the new head of the trellis. */
+      next = i;
+    } else {
+      /* There's no choice to make for a zero coefficient, so we don't
+       *  add a new trellis node, but we do need to update the costs.
+       */
+      t0 = tokens[next][0].token;
+      t1 = tokens[next][1].token;
+      pt = get_coef_context(nb, token_cache, i + 1);
+      /* Update the cost of each path if we're past the EOB token. */
+      if (t0 != EOB_TOKEN) {
+        tokens[next][0].rate += (*token_costs)[1][pt][t0];
+        tokens[next][0].token = ZERO_TOKEN;
+      }
+      if (t1 != EOB_TOKEN) {
+        tokens[next][1].rate += (*token_costs)[1][pt][t1];
+        tokens[next][1].token = ZERO_TOKEN;
+      }
+      best_index[i][0] = best_index[i][1] = 0;
+      shortcut = (tokens[next][0].rate != tokens[next][1].rate);
+      /* Don't update next, because we didn't add a new node. */
+    }
+
+    if (UNLIKELY(!(--band_left))) {
+      --band_counts;
+      band_left = *band_counts;
+      --token_costs;
+    }
+  }
+
+  /* Now pick the best path through the whole trellis. */
+  rate0 = tokens[next][0].rate;
+  rate1 = tokens[next][1].rate;
+  error0 = tokens[next][0].error;
+  error1 = tokens[next][1].error;
+  t0 = tokens[next][0].token;
+  t1 = tokens[next][1].token;
+  rate0 += (*token_costs)[0][ctx][t0];
+  rate1 += (*token_costs)[0][ctx][t1];
+  UPDATE_RD_COST();
+  best = rd_cost1 < rd_cost0;
+
+  final_eob = -1;
+
+  for (i = next; i < eob; i = next) {
+    const int x = tokens[i][best].qc;
+    const int rc = scan[i];
+#if CONFIG_AOM_QM
+    const int iwt = iqmatrix[rc];
+    const int dequant =
+        (dequant_ptr[rc != 0] * iwt + (1 << (AOM_QM_BITS - 1))) >> AOM_QM_BITS;
+#endif
+
+    if (x) final_eob = i;
+    qcoeff[rc] = x;
+    dqcoeff[rc] = tokens[i][best].dqc;
+
+    next = tokens[i][best].next;
+    best = best_index[i][best];
+  }
+  final_eob++;
+
+  mb->plane[plane].eobs[block] = final_eob;
+  assert(final_eob <= default_eob);
+  return final_eob;
+}
+
+#if CONFIG_VP9_HIGHBITDEPTH
+typedef enum QUANT_FUNC {
+  QUANT_FUNC_LOWBD = 0,
+  QUANT_FUNC_HIGHBD = 1,
+  QUANT_FUNC_LAST = 2
+} QUANT_FUNC;
+
+static VP10_QUANT_FACADE
+    quant_func_list[VP10_XFORM_QUANT_LAST][QUANT_FUNC_LAST] = {
+      { vp10_quantize_fp_facade, vp10_highbd_quantize_fp_facade },
+      { vp10_quantize_b_facade, vp10_highbd_quantize_b_facade },
+      { vp10_quantize_dc_facade, vp10_highbd_quantize_dc_facade },
+      { NULL, NULL }
+    };
+
+#else
+typedef enum QUANT_FUNC {
+  QUANT_FUNC_LOWBD = 0,
+  QUANT_FUNC_LAST = 1
+} QUANT_FUNC;
+
+static VP10_QUANT_FACADE
+    quant_func_list[VP10_XFORM_QUANT_LAST][QUANT_FUNC_LAST] = {
+      { vp10_quantize_fp_facade },
+      { vp10_quantize_b_facade },
+      { vp10_quantize_dc_facade },
+      { NULL }
+    };
+#endif
+
+static FWD_TXFM_OPT fwd_txfm_opt_list[VP10_XFORM_QUANT_LAST] = {
+  FWD_TXFM_OPT_NORMAL, FWD_TXFM_OPT_NORMAL, FWD_TXFM_OPT_DC, FWD_TXFM_OPT_NORMAL
+};
+
+void vp10_xform_quant(MACROBLOCK *x, int plane, int block, int blk_row,
+                      int blk_col, BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
+                      VP10_XFORM_QUANT xform_quant_idx) {
+  MACROBLOCKD *const xd = &x->e_mbd;
+  const struct macroblock_plane *const p = &x->plane[plane];
+  const struct macroblockd_plane *const pd = &xd->plane[plane];
+  PLANE_TYPE plane_type = (plane == 0) ? PLANE_TYPE_Y : PLANE_TYPE_UV;
+  TX_TYPE tx_type = get_tx_type(plane_type, xd, block, tx_size);
+  const scan_order *const scan_order =
+      get_scan(tx_size, tx_type, is_inter_block(&xd->mi[0]->mbmi));
+  tran_low_t *const coeff = BLOCK_OFFSET(p->coeff, block);
+  tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block);
+  tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
+  uint16_t *const eob = &p->eobs[block];
+  const int diff_stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
+#if CONFIG_AOM_QM
+  int seg_id = xd->mi[0]->mbmi.segment_id;
+  int is_intra = !is_inter_block(&xd->mi[0]->mbmi);
+  const qm_val_t *qmatrix = pd->seg_qmatrix[seg_id][is_intra][tx_size];
+  const qm_val_t *iqmatrix = pd->seg_iqmatrix[seg_id][is_intra][tx_size];
+#endif
+  const int16_t *src_diff;
+  const int tx2d_size = get_tx2d_size(tx_size);
+
+  FWD_TXFM_PARAM fwd_txfm_param;
+  QUANT_PARAM qparam;
+
+  fwd_txfm_param.tx_type = tx_type;
+  fwd_txfm_param.tx_size = tx_size;
+  fwd_txfm_param.fwd_txfm_opt = fwd_txfm_opt_list[xform_quant_idx];
+  fwd_txfm_param.rd_transform = x->use_lp32x32fdct;
+  fwd_txfm_param.lossless = xd->lossless[xd->mi[0]->mbmi.segment_id];
+
+  src_diff = &p->src_diff[4 * (blk_row * diff_stride + blk_col)];
+
+  qparam.log_scale = get_tx_scale(xd, tx_type, tx_size);
+#if CONFIG_VP9_HIGHBITDEPTH
+  fwd_txfm_param.bd = xd->bd;
+  if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+    highbd_fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
+    if (xform_quant_idx != VP10_XFORM_QUANT_SKIP_QUANT) {
+      if (LIKELY(!x->skip_block)) {
+        quant_func_list[xform_quant_idx][QUANT_FUNC_HIGHBD](
+            coeff, tx2d_size, p, qcoeff, pd, dqcoeff, eob, scan_order, &qparam
+#if CONFIG_AOM_QM
+            ,
+            qmatrix, iqmatrix
+#endif  // CONFIG_AOM_QM
+            );
+      } else {
+        vp10_quantize_skip(tx2d_size, qcoeff, dqcoeff, eob);
+      }
+    }
+    return;
+  }
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+  fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
+  if (xform_quant_idx != VP10_XFORM_QUANT_SKIP_QUANT) {
+    if (LIKELY(!x->skip_block)) {
+      quant_func_list[xform_quant_idx][QUANT_FUNC_LOWBD](
+          coeff, tx2d_size, p, qcoeff, pd, dqcoeff, eob, scan_order, &qparam
+#if CONFIG_AOM_QM
+          ,
+          qmatrix, iqmatrix
+#endif  // CONFIG_AOM_QM
+          );
+    } else {
+      vp10_quantize_skip(tx2d_size, qcoeff, dqcoeff, eob);
+    }
+  }
+}
+
+#if CONFIG_NEW_QUANT
+void vp10_xform_quant_nuq(MACROBLOCK *x, int plane, int block, int blk_row,
+                          int blk_col, BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
+                          int ctx) {
+  MACROBLOCKD *const xd = &x->e_mbd;
+  const struct macroblock_plane *const p = &x->plane[plane];
+  const struct macroblockd_plane *const pd = &xd->plane[plane];
+  PLANE_TYPE plane_type = (plane == 0) ? PLANE_TYPE_Y : PLANE_TYPE_UV;
+  TX_TYPE tx_type = get_tx_type(plane_type, xd, block, tx_size);
+  const scan_order *const scan_order =
+      get_scan(tx_size, tx_type, is_inter_block(&xd->mi[0]->mbmi));
+  tran_low_t *const coeff = BLOCK_OFFSET(p->coeff, block);
+  tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block);
+  tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
+  int dq = get_dq_profile_from_ctx(ctx);
+  uint16_t *const eob = &p->eobs[block];
+  const int diff_stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
+  const int16_t *src_diff;
+  const uint8_t *band = get_band_translate(tx_size);
+
+  FWD_TXFM_PARAM fwd_txfm_param;
+
+  fwd_txfm_param.tx_type = tx_type;
+  fwd_txfm_param.tx_size = tx_size;
+  fwd_txfm_param.fwd_txfm_opt = fwd_txfm_opt_list[VP10_XFORM_QUANT_FP];
+  fwd_txfm_param.rd_transform = x->use_lp32x32fdct;
+  fwd_txfm_param.lossless = xd->lossless[xd->mi[0]->mbmi.segment_id];
+
+  src_diff = &p->src_diff[4 * (blk_row * diff_stride + blk_col)];
+
+// TODO(sarahparker) add all of these new quant quantize functions
+// to quant_func_list, just trying to get this expr to work for now
+#if CONFIG_VP9_HIGHBITDEPTH
+  fwd_txfm_param.bd = xd->bd;
+  if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+    highbd_fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
+    if (tx_size == TX_32X32) {
+      highbd_quantize_32x32_nuq(
+          coeff, get_tx2d_size(tx_size), x->skip_block, p->quant,
+          p->quant_shift, pd->dequant,
+          (const cuml_bins_type_nuq *)p->cuml_bins_nuq[dq],
+          (const dequant_val_type_nuq *)pd->dequant_val_nuq[dq], qcoeff,
+          dqcoeff, eob, scan_order->scan, band);
+    } else {
+      highbd_quantize_nuq(coeff, get_tx2d_size(tx_size), x->skip_block,
+                          p->quant, p->quant_shift, pd->dequant,
+                          (const cuml_bins_type_nuq *)p->cuml_bins_nuq[dq],
+                          (const dequant_val_type_nuq *)pd->dequant_val_nuq[dq],
+                          qcoeff, dqcoeff, eob, scan_order->scan, band);
+    }
+    return;
+  }
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+  fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
+  if (tx_size == TX_32X32) {
+    quantize_32x32_nuq(coeff, 1024, x->skip_block, p->quant, p->quant_shift,
+                       pd->dequant,
+                       (const cuml_bins_type_nuq *)p->cuml_bins_nuq[dq],
+                       (const dequant_val_type_nuq *)pd->dequant_val_nuq[dq],
+                       qcoeff, dqcoeff, eob, scan_order->scan, band);
+  } else {
+    quantize_nuq(coeff, get_tx2d_size(tx_size), x->skip_block, p->quant,
+                 p->quant_shift, pd->dequant,
+                 (const cuml_bins_type_nuq *)p->cuml_bins_nuq[dq],
+                 (const dequant_val_type_nuq *)pd->dequant_val_nuq[dq], qcoeff,
+                 dqcoeff, eob, scan_order->scan, band);
+  }
+}
+
+void vp10_xform_quant_fp_nuq(MACROBLOCK *x, int plane, int block, int blk_row,
+                             int blk_col, BLOCK_SIZE plane_bsize,
+                             TX_SIZE tx_size, int ctx) {
+  MACROBLOCKD *const xd = &x->e_mbd;
+  const struct macroblock_plane *const p = &x->plane[plane];
+  const struct macroblockd_plane *const pd = &xd->plane[plane];
+  int dq = get_dq_profile_from_ctx(ctx);
+  PLANE_TYPE plane_type = (plane == 0) ? PLANE_TYPE_Y : PLANE_TYPE_UV;
+  TX_TYPE tx_type = get_tx_type(plane_type, xd, block, tx_size);
+  const scan_order *const scan_order =
+      get_scan(tx_size, tx_type, is_inter_block(&xd->mi[0]->mbmi));
+  tran_low_t *const coeff = BLOCK_OFFSET(p->coeff, block);
+  tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block);
+  tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
+  uint16_t *const eob = &p->eobs[block];
+  const int diff_stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
+  const int16_t *src_diff;
+  const uint8_t *band = get_band_translate(tx_size);
+
+  FWD_TXFM_PARAM fwd_txfm_param;
+
+  fwd_txfm_param.tx_type = tx_type;
+  fwd_txfm_param.tx_size = tx_size;
+  fwd_txfm_param.fwd_txfm_opt = fwd_txfm_opt_list[VP10_XFORM_QUANT_FP];
+  fwd_txfm_param.rd_transform = x->use_lp32x32fdct;
+  fwd_txfm_param.lossless = xd->lossless[xd->mi[0]->mbmi.segment_id];
+
+  src_diff = &p->src_diff[4 * (blk_row * diff_stride + blk_col)];
+
+// TODO(sarahparker) add all of these new quant quantize functions
+// to quant_func_list, just trying to get this expr to work for now
+#if CONFIG_VP9_HIGHBITDEPTH
+  fwd_txfm_param.bd = xd->bd;
+  if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+    highbd_fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
+    if (tx_size == TX_32X32) {
+      highbd_quantize_32x32_fp_nuq(
+          coeff, get_tx2d_size(tx_size), x->skip_block, p->quant_fp,
+          pd->dequant, (const cuml_bins_type_nuq *)p->cuml_bins_nuq[dq],
+          (const dequant_val_type_nuq *)pd->dequant_val_nuq[dq], qcoeff,
+          dqcoeff, eob, scan_order->scan, band);
+    } else {
+      highbd_quantize_fp_nuq(
+          coeff, get_tx2d_size(tx_size), x->skip_block, p->quant_fp,
+          pd->dequant, (const cuml_bins_type_nuq *)p->cuml_bins_nuq[dq],
+          (const dequant_val_type_nuq *)pd->dequant_val_nuq[dq], qcoeff,
+          dqcoeff, eob, scan_order->scan, band);
+    }
+    return;
+  }
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+  fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
+  if (tx_size == TX_32X32) {
+    quantize_32x32_fp_nuq(coeff, get_tx2d_size(tx_size), x->skip_block,
+                          p->quant_fp, pd->dequant,
+                          (const cuml_bins_type_nuq *)p->cuml_bins_nuq[dq],
+                          (const dequant_val_type_nuq *)pd->dequant_val_nuq[dq],
+                          qcoeff, dqcoeff, eob, scan_order->scan, band);
+  } else {
+    quantize_fp_nuq(coeff, get_tx2d_size(tx_size), x->skip_block, p->quant_fp,
+                    pd->dequant,
+                    (const cuml_bins_type_nuq *)p->cuml_bins_nuq[dq],
+                    (const dequant_val_type_nuq *)pd->dequant_val_nuq[dq],
+                    qcoeff, dqcoeff, eob, scan_order->scan, band);
+  }
+}
+
+void vp10_xform_quant_dc_nuq(MACROBLOCK *x, int plane, int block, int blk_row,
+                             int blk_col, BLOCK_SIZE plane_bsize,
+                             TX_SIZE tx_size, int ctx) {
+  MACROBLOCKD *const xd = &x->e_mbd;
+  const struct macroblock_plane *const p = &x->plane[plane];
+  const struct macroblockd_plane *const pd = &xd->plane[plane];
+  PLANE_TYPE plane_type = (plane == 0) ? PLANE_TYPE_Y : PLANE_TYPE_UV;
+  TX_TYPE tx_type = get_tx_type(plane_type, xd, block, tx_size);
+  tran_low_t *const coeff = BLOCK_OFFSET(p->coeff, block);
+  tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block);
+  tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
+  uint16_t *const eob = &p->eobs[block];
+  const int diff_stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
+  const int16_t *src_diff;
+  int dq = get_dq_profile_from_ctx(ctx);
+
+  FWD_TXFM_PARAM fwd_txfm_param;
+
+  fwd_txfm_param.tx_type = tx_type;
+  fwd_txfm_param.tx_size = tx_size;
+  fwd_txfm_param.fwd_txfm_opt = fwd_txfm_opt_list[VP10_XFORM_QUANT_DC];
+  fwd_txfm_param.rd_transform = x->use_lp32x32fdct;
+  fwd_txfm_param.lossless = xd->lossless[xd->mi[0]->mbmi.segment_id];
+
+  src_diff = &p->src_diff[4 * (blk_row * diff_stride + blk_col)];
+
+// TODO(sarahparker) add all of these new quant quantize functions
+// to quant_func_list, just trying to get this expr to work for now
+#if CONFIG_VP9_HIGHBITDEPTH
+  fwd_txfm_param.bd = xd->bd;
+  if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+    highbd_fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
+    if (tx_size == TX_32X32) {
+      highbd_quantize_dc_32x32_nuq(
+          coeff, get_tx2d_size(tx_size), x->skip_block, p->quant[0],
+          p->quant_shift[0], pd->dequant[0], p->cuml_bins_nuq[dq][0],
+          pd->dequant_val_nuq[dq][0], qcoeff, dqcoeff, eob);
+    } else {
+      highbd_quantize_dc_nuq(coeff, get_tx2d_size(tx_size), x->skip_block,
+                             p->quant[0], p->quant_shift[0], pd->dequant[0],
+                             p->cuml_bins_nuq[dq][0],
+                             pd->dequant_val_nuq[dq][0], qcoeff, dqcoeff, eob);
+    }
+    return;
+  }
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+  fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
+  if (tx_size == TX_32X32) {
+    quantize_dc_32x32_nuq(coeff, get_tx2d_size(tx_size), x->skip_block,
+                          p->quant[0], p->quant_shift[0], pd->dequant[0],
+                          p->cuml_bins_nuq[dq][0], pd->dequant_val_nuq[dq][0],
+                          qcoeff, dqcoeff, eob);
+  } else {
+    quantize_dc_nuq(coeff, get_tx2d_size(tx_size), x->skip_block, p->quant[0],
+                    p->quant_shift[0], pd->dequant[0], p->cuml_bins_nuq[dq][0],
+                    pd->dequant_val_nuq[dq][0], qcoeff, dqcoeff, eob);
+  }
+}
+
+void vp10_xform_quant_dc_fp_nuq(MACROBLOCK *x, int plane, int block,
+                                int blk_row, int blk_col,
+                                BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
+                                int ctx) {
+  MACROBLOCKD *const xd = &x->e_mbd;
+  const struct macroblock_plane *const p = &x->plane[plane];
+  const struct macroblockd_plane *const pd = &xd->plane[plane];
+  PLANE_TYPE plane_type = (plane == 0) ? PLANE_TYPE_Y : PLANE_TYPE_UV;
+  TX_TYPE tx_type = get_tx_type(plane_type, xd, block, tx_size);
+  tran_low_t *const coeff = BLOCK_OFFSET(p->coeff, block);
+  tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block);
+  tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
+  uint16_t *const eob = &p->eobs[block];
+  const int diff_stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
+  const int16_t *src_diff;
+  int dq = get_dq_profile_from_ctx(ctx);
+
+  FWD_TXFM_PARAM fwd_txfm_param;
+
+  fwd_txfm_param.tx_type = tx_type;
+  fwd_txfm_param.tx_size = tx_size;
+  fwd_txfm_param.fwd_txfm_opt = fwd_txfm_opt_list[VP10_XFORM_QUANT_DC];
+  fwd_txfm_param.rd_transform = x->use_lp32x32fdct;
+  fwd_txfm_param.lossless = xd->lossless[xd->mi[0]->mbmi.segment_id];
+
+  src_diff = &p->src_diff[4 * (blk_row * diff_stride + blk_col)];
+
+// TODO(sarahparker) add all of these new quant quantize functions
+// to quant_func_list, just trying to get this expr to work for now
+#if CONFIG_VP9_HIGHBITDEPTH
+  fwd_txfm_param.bd = xd->bd;
+  if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+    highbd_fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
+    if (tx_size == TX_32X32) {
+      highbd_quantize_dc_32x32_fp_nuq(
+          coeff, get_tx2d_size(tx_size), x->skip_block, p->quant_fp[0],
+          pd->dequant[0], p->cuml_bins_nuq[dq][0], pd->dequant_val_nuq[dq][0],
+          qcoeff, dqcoeff, eob);
+    } else {
+      highbd_quantize_dc_fp_nuq(
+          coeff, get_tx2d_size(tx_size), x->skip_block, p->quant_fp[0],
+          pd->dequant[0], p->cuml_bins_nuq[dq][0], pd->dequant_val_nuq[dq][0],
+          qcoeff, dqcoeff, eob);
+    }
+    return;
+  }
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+  fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
+  if (tx_size == TX_32X32) {
+    quantize_dc_32x32_fp_nuq(coeff, get_tx2d_size(tx_size), x->skip_block,
+                             p->quant_fp[0], pd->dequant[0],
+                             p->cuml_bins_nuq[dq][0],
+                             pd->dequant_val_nuq[dq][0], qcoeff, dqcoeff, eob);
+  } else {
+    quantize_dc_fp_nuq(coeff, get_tx2d_size(tx_size), x->skip_block,
+                       p->quant_fp[0], pd->dequant[0], p->cuml_bins_nuq[dq][0],
+                       pd->dequant_val_nuq[dq][0], qcoeff, dqcoeff, eob);
+  }
+}
+#endif  // CONFIG_NEW_QUANT
+
+static void encode_block(int plane, int block, int blk_row, int blk_col,
+                         BLOCK_SIZE plane_bsize, TX_SIZE tx_size, void *arg) {
+  struct encode_b_args *const args = arg;
+  MACROBLOCK *const x = args->x;
+  MACROBLOCKD *const xd = &x->e_mbd;
+  int ctx;
+  struct macroblock_plane *const p = &x->plane[plane];
+  struct macroblockd_plane *const pd = &xd->plane[plane];
+  tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
+  uint8_t *dst;
+  ENTROPY_CONTEXT *a, *l;
+  INV_TXFM_PARAM inv_txfm_param;
+#if CONFIG_VAR_TX
+  int i;
+  const int bwl = b_width_log2_lookup[plane_bsize];
+#endif
+  dst = &pd->dst.buf[4 * blk_row * pd->dst.stride + 4 * blk_col];
+  a = &args->ta[blk_col];
+  l = &args->tl[blk_row];
+#if CONFIG_VAR_TX
+  ctx = get_entropy_context(tx_size, a, l);
+#else
+  ctx = combine_entropy_contexts(*a, *l);
+#endif
+
+#if CONFIG_VAR_TX
+  // Assert not magic number (uninitialised).
+  assert(x->blk_skip[plane][(blk_row << bwl) + blk_col] != 234);
+
+  if (x->blk_skip[plane][(blk_row << bwl) + blk_col] == 0) {
+#else
+  {
+#endif
+#if CONFIG_NEW_QUANT
+    vp10_xform_quant_fp_nuq(x, plane, block, blk_row, blk_col, plane_bsize,
+                            tx_size, ctx);
+#else
+    vp10_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size,
+                     VP10_XFORM_QUANT_FP);
+#endif  // CONFIG_NEW_QUANT
+  }
+#if CONFIG_VAR_TX
+  else {
+    p->eobs[block] = 0;
+  }
+#endif
+
+  if (p->eobs[block]) {
+    *a = *l = vp10_optimize_b(x, plane, block, tx_size, ctx) > 0;
+  } else {
+    *a = *l = p->eobs[block] > 0;
+  }
+
+#if CONFIG_VAR_TX
+  for (i = 0; i < num_4x4_blocks_wide_txsize_lookup[tx_size]; ++i) {
+    a[i] = a[0];
+  }
+  for (i = 0; i < num_4x4_blocks_high_txsize_lookup[tx_size]; ++i) {
+    l[i] = l[0];
+  }
+#endif
+
+  if (p->eobs[block]) *(args->skip) = 0;
+
+  if (p->eobs[block] == 0) return;
+
+  // inverse transform parameters
+  inv_txfm_param.tx_type = get_tx_type(pd->plane_type, xd, block, tx_size);
+  inv_txfm_param.tx_size = tx_size;
+  inv_txfm_param.eob = p->eobs[block];
+  inv_txfm_param.lossless = xd->lossless[xd->mi[0]->mbmi.segment_id];
+
+#if CONFIG_VP9_HIGHBITDEPTH
+  if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+    inv_txfm_param.bd = xd->bd;
+    highbd_inv_txfm_add(dqcoeff, dst, pd->dst.stride, &inv_txfm_param);
+    return;
+  }
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+  inv_txfm_add(dqcoeff, dst, pd->dst.stride, &inv_txfm_param);
+}
+
+#if CONFIG_VAR_TX
+static void encode_block_inter(int plane, int block, int blk_row, int blk_col,
+                               BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
+                               void *arg) {
+  struct encode_b_args *const args = arg;
+  MACROBLOCK *const x = args->x;
+  MACROBLOCKD *const xd = &x->e_mbd;
+  MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
+  const BLOCK_SIZE bsize = txsize_to_bsize[tx_size];
+  const struct macroblockd_plane *const pd = &xd->plane[plane];
+  const int tx_row = blk_row >> (1 - pd->subsampling_y);
+  const int tx_col = blk_col >> (1 - pd->subsampling_x);
+  TX_SIZE plane_tx_size;
+
+  int max_blocks_high = num_4x4_blocks_high_lookup[plane_bsize];
+  int max_blocks_wide = num_4x4_blocks_wide_lookup[plane_bsize];
+
+  if (xd->mb_to_bottom_edge < 0)
+    max_blocks_high += xd->mb_to_bottom_edge >> (5 + pd->subsampling_y);
+  if (xd->mb_to_right_edge < 0)
+    max_blocks_wide += xd->mb_to_right_edge >> (5 + pd->subsampling_x);
+
+  if (blk_row >= max_blocks_high || blk_col >= max_blocks_wide) return;
+
+  plane_tx_size = plane ? get_uv_tx_size_impl(
+                              mbmi->inter_tx_size[tx_row][tx_col], bsize, 0, 0)
+                        : mbmi->inter_tx_size[tx_row][tx_col];
+
+  if (tx_size == plane_tx_size) {
+    encode_block(plane, block, blk_row, blk_col, plane_bsize, tx_size, arg);
+  } else {
+    int bsl = b_width_log2_lookup[bsize];
+    int i;
+
+    assert(bsl > 0);
+    --bsl;
+
+#if CONFIG_EXT_TX
+    assert(tx_size < TX_SIZES);
+#endif  // CONFIG_EXT_TX
+
+    for (i = 0; i < 4; ++i) {
+      const int offsetr = blk_row + ((i >> 1) << bsl);
+      const int offsetc = blk_col + ((i & 0x01) << bsl);
+      int step = num_4x4_blocks_txsize_lookup[tx_size - 1];
+
+      if (offsetr >= max_blocks_high || offsetc >= max_blocks_wide) continue;
+
+      encode_block_inter(plane, block + i * step, offsetr, offsetc, plane_bsize,
+                         tx_size - 1, arg);
+    }
+  }
+}
+#endif
+
+static void encode_block_pass1(int plane, int block, int blk_row, int blk_col,
+                               BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
+                               void *arg) {
+  MACROBLOCK *const x = (MACROBLOCK *)arg;
+  MACROBLOCKD *const xd = &x->e_mbd;
+  struct macroblock_plane *const p = &x->plane[plane];
+  struct macroblockd_plane *const pd = &xd->plane[plane];
+  tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
+  uint8_t *dst;
+#if CONFIG_NEW_QUANT
+  int ctx;
+#endif  // CONFIG_NEW_QUANT
+  dst = &pd->dst.buf[4 * blk_row * pd->dst.stride + 4 * blk_col];
+
+#if CONFIG_NEW_QUANT
+  ctx = 0;
+  vp10_xform_quant_fp_nuq(x, plane, block, blk_row, blk_col, plane_bsize,
+                          tx_size, ctx);
+#else
+  vp10_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size,
+                   VP10_XFORM_QUANT_B);
+#endif  // CONFIG_NEW_QUANT
+
+  if (p->eobs[block] > 0) {
+#if CONFIG_VP9_HIGHBITDEPTH
+    if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+      if (xd->lossless[xd->mi[0]->mbmi.segment_id]) {
+        vp10_highbd_iwht4x4_add(dqcoeff, dst, pd->dst.stride, p->eobs[block],
+                                xd->bd);
+      } else {
+        vp10_highbd_idct4x4_add(dqcoeff, dst, pd->dst.stride, p->eobs[block],
+                                xd->bd);
+      }
+      return;
+    }
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+    if (xd->lossless[xd->mi[0]->mbmi.segment_id]) {
+      vp10_iwht4x4_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]);
+    } else {
+      vp10_idct4x4_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]);
+    }
+  }
+}
+
+void vp10_encode_sby_pass1(MACROBLOCK *x, BLOCK_SIZE bsize) {
+  vp10_subtract_plane(x, bsize, 0);
+  vp10_foreach_transformed_block_in_plane(&x->e_mbd, bsize, 0,
+                                          encode_block_pass1, x);
+}
+
+void vp10_encode_sb(MACROBLOCK *x, BLOCK_SIZE bsize) {
+  MACROBLOCKD *const xd = &x->e_mbd;
+  struct optimize_ctx ctx;
+  MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
+  struct encode_b_args arg = { x, &ctx, &mbmi->skip, NULL, NULL, 1 };
+  int plane;
+
+  mbmi->skip = 1;
+
+  if (x->skip) return;
+
+  for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
+#if CONFIG_VAR_TX
+    // TODO(jingning): Clean this up.
+    const struct macroblockd_plane *const pd = &xd->plane[plane];
+    const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd);
+    const int mi_width = num_4x4_blocks_wide_lookup[plane_bsize];
+    const int mi_height = num_4x4_blocks_high_lookup[plane_bsize];
+    const TX_SIZE max_tx_size = max_txsize_lookup[plane_bsize];
+    const BLOCK_SIZE txb_size = txsize_to_bsize[max_tx_size];
+    const int bh = num_4x4_blocks_wide_lookup[txb_size];
+    int idx, idy;
+    int block = 0;
+    int step = num_4x4_blocks_txsize_lookup[max_tx_size];
+#if CONFIG_EXT_TX && CONFIG_RECT_TX
+    const TX_SIZE tx_size = plane ? get_uv_tx_size(mbmi, pd) : mbmi->tx_size;
+#endif
+    vp10_get_entropy_contexts(bsize, TX_4X4, pd, ctx.ta[plane], ctx.tl[plane]);
+#else
+    const struct macroblockd_plane *const pd = &xd->plane[plane];
+    const TX_SIZE tx_size = plane ? get_uv_tx_size(mbmi, pd) : mbmi->tx_size;
+    vp10_get_entropy_contexts(bsize, tx_size, pd, ctx.ta[plane], ctx.tl[plane]);
+#endif
+    vp10_subtract_plane(x, bsize, plane);
+    arg.ta = ctx.ta[plane];
+    arg.tl = ctx.tl[plane];
+
+#if CONFIG_VAR_TX
+#if CONFIG_EXT_TX && CONFIG_RECT_TX
+    if (tx_size >= TX_SIZES) {
+      vp10_foreach_transformed_block_in_plane(xd, bsize, plane, encode_block,
+                                              &arg);
+    } else {
+#endif
+      for (idy = 0; idy < mi_height; idy += bh) {
+        for (idx = 0; idx < mi_width; idx += bh) {
+          encode_block_inter(plane, block, idy, idx, plane_bsize, max_tx_size,
+                             &arg);
+          block += step;
+        }
+      }
+#if CONFIG_EXT_TX && CONFIG_RECT_TX
+    }
+#endif
+#else
+    vp10_foreach_transformed_block_in_plane(xd, bsize, plane, encode_block,
+                                            &arg);
+#endif
+  }
+}
+
+#if CONFIG_SUPERTX
+void vp10_encode_sb_supertx(MACROBLOCK *x, BLOCK_SIZE bsize) {
+  MACROBLOCKD *const xd = &x->e_mbd;
+  struct optimize_ctx ctx;
+  MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
+  struct encode_b_args arg = { x, &ctx, &mbmi->skip, NULL, NULL, 1 };
+  int plane;
+
+  mbmi->skip = 1;
+  if (x->skip) return;
+
+  for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
+    const struct macroblockd_plane *const pd = &xd->plane[plane];
+#if CONFIG_VAR_TX
+    const TX_SIZE tx_size = TX_4X4;
+#else
+    const TX_SIZE tx_size = plane ? get_uv_tx_size(mbmi, pd) : mbmi->tx_size;
+#endif
+    vp10_subtract_plane(x, bsize, plane);
+    vp10_get_entropy_contexts(bsize, tx_size, pd, ctx.ta[plane], ctx.tl[plane]);
+    arg.ta = ctx.ta[plane];
+    arg.tl = ctx.tl[plane];
+    vp10_foreach_transformed_block_in_plane(xd, bsize, plane, encode_block,
+                                            &arg);
+  }
+}
+#endif  // CONFIG_SUPERTX
+
+void vp10_encode_block_intra(int plane, int block, int blk_row, int blk_col,
+                             BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
+                             void *arg) {
+  struct encode_b_args *const args = arg;
+  MACROBLOCK *const x = args->x;
+  MACROBLOCKD *const xd = &x->e_mbd;
+  MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
+  struct macroblock_plane *const p = &x->plane[plane];
+  struct macroblockd_plane *const pd = &xd->plane[plane];
+  tran_low_t *dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
+  PLANE_TYPE plane_type = (plane == 0) ? PLANE_TYPE_Y : PLANE_TYPE_UV;
+  const TX_TYPE tx_type = get_tx_type(plane_type, xd, block, tx_size);
+  PREDICTION_MODE mode;
+  const int bwl = b_width_log2_lookup[plane_bsize];
+  const int bhl = b_height_log2_lookup[plane_bsize];
+  const int diff_stride = 4 * (1 << bwl);
+  uint8_t *src, *dst;
+  int16_t *src_diff;
+  uint16_t *eob = &p->eobs[block];
+  const int src_stride = p->src.stride;
+  const int dst_stride = pd->dst.stride;
+  const int tx1d_width = num_4x4_blocks_wide_txsize_lookup[tx_size] << 2;
+  const int tx1d_height = num_4x4_blocks_high_txsize_lookup[tx_size] << 2;
+  ENTROPY_CONTEXT *a = NULL, *l = NULL;
+  int ctx;
+
+  INV_TXFM_PARAM inv_txfm_param;
+
+  assert(tx1d_width == tx1d_height);
+
+  dst = &pd->dst.buf[4 * (blk_row * dst_stride + blk_col)];
+  src = &p->src.buf[4 * (blk_row * src_stride + blk_col)];
+  src_diff = &p->src_diff[4 * (blk_row * diff_stride + blk_col)];
+
+  mode = plane == 0 ? get_y_mode(xd->mi[0], block) : mbmi->uv_mode;
+  vp10_predict_intra_block(xd, bwl, bhl, tx_size, mode, dst, dst_stride, dst,
+                           dst_stride, blk_col, blk_row, plane);
+#if CONFIG_VP9_HIGHBITDEPTH
+  if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+    vpx_highbd_subtract_block(tx1d_height, tx1d_width, src_diff, diff_stride,
+                              src, src_stride, dst, dst_stride, xd->bd);
+  } else {
+    vpx_subtract_block(tx1d_height, tx1d_width, src_diff, diff_stride, src,
+                       src_stride, dst, dst_stride);
+  }
+#else
+  vpx_subtract_block(tx1d_height, tx1d_width, src_diff, diff_stride, src,
+                     src_stride, dst, dst_stride);
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+  a = &args->ta[blk_col];
+  l = &args->tl[blk_row];
+  ctx = combine_entropy_contexts(*a, *l);
+
+  if (args->enable_optimize_b) {
+#if CONFIG_NEW_QUANT
+    vp10_xform_quant_fp_nuq(x, plane, block, blk_row, blk_col, plane_bsize,
+                            tx_size, ctx);
+#else   // CONFIG_NEW_QUANT
+    vp10_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size,
+                     VP10_XFORM_QUANT_FP);
+#endif  // CONFIG_NEW_QUANT
+    if (p->eobs[block]) {
+      *a = *l = vp10_optimize_b(x, plane, block, tx_size, ctx) > 0;
+    } else {
+      *a = *l = 0;
+    }
+  } else {
+    vp10_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size,
+                     VP10_XFORM_QUANT_B);
+    *a = *l = p->eobs[block] > 0;
+  }
+
+  if (*eob) {
+    // inverse transform
+    inv_txfm_param.tx_type = tx_type;
+    inv_txfm_param.tx_size = tx_size;
+    inv_txfm_param.eob = *eob;
+    inv_txfm_param.lossless = xd->lossless[mbmi->segment_id];
+#if CONFIG_VP9_HIGHBITDEPTH
+    inv_txfm_param.bd = xd->bd;
+    if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+      highbd_inv_txfm_add(dqcoeff, dst, dst_stride, &inv_txfm_param);
+    } else {
+      inv_txfm_add(dqcoeff, dst, dst_stride, &inv_txfm_param);
+    }
+#else
+    inv_txfm_add(dqcoeff, dst, dst_stride, &inv_txfm_param);
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+    *(args->skip) = 0;
+  }
+}
+
+void vp10_encode_intra_block_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane,
+                                   int enable_optimize_b) {
+  const MACROBLOCKD *const xd = &x->e_mbd;
+  ENTROPY_CONTEXT ta[2 * MAX_MIB_SIZE];
+  ENTROPY_CONTEXT tl[2 * MAX_MIB_SIZE];
+
+  struct encode_b_args arg = { x,  NULL, &xd->mi[0]->mbmi.skip,
+                               ta, tl,   enable_optimize_b };
+  if (enable_optimize_b) {
+    const struct macroblockd_plane *const pd = &xd->plane[plane];
+    const TX_SIZE tx_size =
+        plane ? get_uv_tx_size(&xd->mi[0]->mbmi, pd) : xd->mi[0]->mbmi.tx_size;
+    vp10_get_entropy_contexts(bsize, tx_size, pd, ta, tl);
+  }
+  vp10_foreach_transformed_block_in_plane(xd, bsize, plane,
+                                          vp10_encode_block_intra, &arg);
+}
diff --git a/av1/encoder/encodemb.c.orig b/av1/encoder/encodemb.c.orig
new file mode 100644
index 0000000..4c94032
--- /dev/null
+++ b/av1/encoder/encodemb.c.orig
@@ -0,0 +1,1158 @@
+/*
+ *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "./vp10_rtcd.h"
+#include "./vpx_config.h"
+#include "./vpx_dsp_rtcd.h"
+
+#include "aom_dsp/quantize.h"
+#include "aom_mem/vpx_mem.h"
+#include "aom_ports/mem.h"
+
+#include "av1/common/idct.h"
+#include "av1/common/reconinter.h"
+#include "av1/common/reconintra.h"
+#include "av1/common/scan.h"
+
+#include "av1/encoder/encodemb.h"
+#include "av1/encoder/hybrid_fwd_txfm.h"
+#include "av1/encoder/quantize.h"
+#include "av1/encoder/rd.h"
+#include "av1/encoder/tokenize.h"
+
+void vp10_subtract_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) {
+  struct macroblock_plane *const p = &x->plane[plane];
+  const struct macroblockd_plane *const pd = &x->e_mbd.plane[plane];
+  const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd);
+  const int bw = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
+  const int bh = 4 * num_4x4_blocks_high_lookup[plane_bsize];
+
+#if CONFIG_VP9_HIGHBITDEPTH
+  if (x->e_mbd.cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+    vpx_highbd_subtract_block(bh, bw, p->src_diff, bw, p->src.buf,
+                              p->src.stride, pd->dst.buf, pd->dst.stride,
+                              x->e_mbd.bd);
+    return;
+  }
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+  vpx_subtract_block(bh, bw, p->src_diff, bw, p->src.buf, p->src.stride,
+                     pd->dst.buf, pd->dst.stride);
+}
+
+typedef struct vp10_token_state {
+  int rate;
+  int64_t error;
+  int next;
+  int16_t token;
+  tran_low_t qc;
+  tran_low_t dqc;
+} vp10_token_state;
+
+// These numbers are empirically obtained.
+static const int plane_rd_mult[REF_TYPES][PLANE_TYPES] = {
+  { 10, 6 }, { 8, 5 },
+};
+
+#define UPDATE_RD_COST()                             \
+  {                                                  \
+    rd_cost0 = RDCOST(rdmult, rddiv, rate0, error0); \
+    rd_cost1 = RDCOST(rdmult, rddiv, rate1, error1); \
+  }
+
+int vp10_optimize_b(MACROBLOCK *mb, int plane, int block, TX_SIZE tx_size,
+                    int ctx) {
+  MACROBLOCKD *const xd = &mb->e_mbd;
+  struct macroblock_plane *const p = &mb->plane[plane];
+  struct macroblockd_plane *const pd = &xd->plane[plane];
+  const int ref = is_inter_block(&xd->mi[0]->mbmi);
+  vp10_token_state tokens[MAX_TX_SQUARE + 1][2];
+  unsigned best_index[MAX_TX_SQUARE + 1][2];
+  uint8_t token_cache[MAX_TX_SQUARE];
+  const tran_low_t *const coeff = BLOCK_OFFSET(mb->plane[plane].coeff, block);
+  tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block);
+  tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
+  const int eob = p->eobs[block];
+  const PLANE_TYPE type = pd->plane_type;
+  const int default_eob = get_tx2d_size(tx_size);
+  const int16_t *const dequant_ptr = pd->dequant;
+  const uint8_t *const band_translate = get_band_translate(tx_size);
+  TX_TYPE tx_type = get_tx_type(type, xd, block, tx_size);
+  const scan_order *const so =
+      get_scan(tx_size, tx_type, is_inter_block(&xd->mi[0]->mbmi));
+  const int16_t *const scan = so->scan;
+  const int16_t *const nb = so->neighbors;
+<<<<<<< HEAD
+  const int shift = get_tx_scale(xd, tx_type, tx_size);
+=======
+#if CONFIG_AOM_QM
+  int seg_id = xd->mi[0]->mbmi.segment_id;
+  int is_intra = !is_inter_block(&xd->mi[0]->mbmi);
+  const qm_val_t *iqmatrix = pd->seg_iqmatrix[seg_id][is_intra][tx_size];
+#endif
+#if CONFIG_AOM_QM
+  int seg_id = xd->mi[0]->mbmi.segment_id;
+  int is_intra = !is_inter_block(&xd->mi[0]->mbmi);
+  const qm_val_t *iqmatrix = pd->seg_iqmatrix[seg_id][is_intra][tx_size];
+#endif
+>>>>>>> 10d6f02... Port commits related to clpf and qm experiments
+#if CONFIG_NEW_QUANT
+  int dq = get_dq_profile_from_ctx(ctx);
+  const dequant_val_type_nuq *dequant_val = pd->dequant_val_nuq[dq];
+#else
+  const int dq_step[2] = { dequant_ptr[0] >> shift, dequant_ptr[1] >> shift };
+#endif  // CONFIG_NEW_QUANT
+  int next = eob, sz = 0;
+  const int64_t rdmult = (mb->rdmult * plane_rd_mult[ref][type]) >> 1;
+  const int64_t rddiv = mb->rddiv;
+  int64_t rd_cost0, rd_cost1;
+  int rate0, rate1;
+  int64_t error0, error1;
+  int16_t t0, t1;
+  int best, band = (eob < default_eob) ? band_translate[eob]
+                                       : band_translate[eob - 1];
+  int pt, i, final_eob;
+#if CONFIG_VP9_HIGHBITDEPTH
+  const int *cat6_high_cost = vp10_get_high_cost_table(xd->bd);
+#else
+  const int *cat6_high_cost = vp10_get_high_cost_table(8);
+#endif
+  unsigned int(*token_costs)[2][COEFF_CONTEXTS][ENTROPY_TOKENS] =
+      mb->token_costs[txsize_sqr_map[tx_size]][type][ref];
+  const uint16_t *band_counts = &band_count_table[tx_size][band];
+  uint16_t band_left = eob - band_cum_count_table[tx_size][band] + 1;
+  int shortcut = 0;
+  int next_shortcut = 0;
+
+  token_costs += band;
+
+  assert((!type && !plane) || (type && plane));
+  assert(eob <= default_eob);
+
+  /* Now set up a Viterbi trellis to evaluate alternative roundings. */
+  /* Initialize the sentinel node of the trellis. */
+  tokens[eob][0].rate = 0;
+  tokens[eob][0].error = 0;
+  tokens[eob][0].next = default_eob;
+  tokens[eob][0].token = EOB_TOKEN;
+  tokens[eob][0].qc = 0;
+  tokens[eob][1] = tokens[eob][0];
+
+  for (i = 0; i < eob; i++) {
+    const int rc = scan[i];
+    tokens[i][0].rate = vp10_get_token_cost(qcoeff[rc], &t0, cat6_high_cost);
+    tokens[i][0].token = t0;
+    token_cache[rc] = vp10_pt_energy_class[t0];
+  }
+
+  for (i = eob; i-- > 0;) {
+    int base_bits, dx;
+    int64_t d2;
+    const int rc = scan[i];
+#if CONFIG_AOM_QM
+    int iwt = iqmatrix[rc];
+#endif
+    int x = qcoeff[rc];
+    next_shortcut = shortcut;
+
+    /* Only add a trellis state for non-zero coefficients. */
+    if (UNLIKELY(x)) {
+      error0 = tokens[next][0].error;
+      error1 = tokens[next][1].error;
+      /* Evaluate the first possibility for this state. */
+      rate0 = tokens[next][0].rate;
+      rate1 = tokens[next][1].rate;
+
+      if (next_shortcut) {
+        /* Consider both possible successor states. */
+        if (next < default_eob) {
+          pt = get_coef_context(nb, token_cache, i + 1);
+          rate0 += (*token_costs)[0][pt][tokens[next][0].token];
+          rate1 += (*token_costs)[0][pt][tokens[next][1].token];
+        }
+        UPDATE_RD_COST();
+        /* And pick the best. */
+        best = rd_cost1 < rd_cost0;
+      } else {
+        if (next < default_eob) {
+          pt = get_coef_context(nb, token_cache, i + 1);
+          rate0 += (*token_costs)[0][pt][tokens[next][0].token];
+        }
+        best = 0;
+      }
+
+      dx = (dqcoeff[rc] - coeff[rc]) * (1 << shift);
+#if CONFIG_VP9_HIGHBITDEPTH
+      if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+        dx >>= xd->bd - 8;
+      }
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+      d2 = (int64_t)dx * dx;
+      tokens[i][0].rate += (best ? rate1 : rate0);
+      tokens[i][0].error = d2 + (best ? error1 : error0);
+      tokens[i][0].next = next;
+      tokens[i][0].qc = x;
+      tokens[i][0].dqc = dqcoeff[rc];
+      best_index[i][0] = best;
+
+      /* Evaluate the second possibility for this state. */
+      rate0 = tokens[next][0].rate;
+      rate1 = tokens[next][1].rate;
+
+      // The threshold of 3 is empirically obtained.
+      if (UNLIKELY(abs(x) > 3)) {
+        shortcut = 0;
+      } else {
+#if CONFIG_NEW_QUANT
+        shortcut = ((vp10_dequant_abscoeff_nuq(abs(x), dequant_ptr[rc != 0],
+                                               dequant_val[band_translate[i]]) >
+                     (abs(coeff[rc]) << shift)) &&
+                    (vp10_dequant_abscoeff_nuq(abs(x) - 1, dequant_ptr[rc != 0],
+                                               dequant_val[band_translate[i]]) <
+                     (abs(coeff[rc]) << shift)));
+#else   // CONFIG_NEW_QUANT
+#if CONFIG_AOM_QM
+        if ((abs(x) * dequant_ptr[rc != 0] * iwt >
+             ((abs(coeff[rc]) << shift) << AOM_QM_BITS)) &&
+            (abs(x) * dequant_ptr[rc != 0] * iwt <
+             (((abs(coeff[rc]) << shift) + dequant_ptr[rc != 0]) << AOM_QM_BITS)))
+#else
+        if ((abs(x) * dequant_ptr[rc != 0] > (abs(coeff[rc]) << shift)) &&
+            (abs(x) * dequant_ptr[rc != 0] <
+             (abs(coeff[rc]) << shift) + dequant_ptr[rc != 0]))
+#endif  // CONFIG_AOM_QM
+          shortcut = 1;
+        else
+          shortcut = 0;
+#endif  // CONFIG_NEW_QUANT
+      }
+
+      if (shortcut) {
+        sz = -(x < 0);
+        x -= 2 * sz + 1;
+      } else {
+        tokens[i][1] = tokens[i][0];
+        best_index[i][1] = best_index[i][0];
+        next = i;
+
+        if (UNLIKELY(!(--band_left))) {
+          --band_counts;
+          band_left = *band_counts;
+          --token_costs;
+        }
+        continue;
+      }
+
+      /* Consider both possible successor states. */
+      if (!x) {
+        /* If we reduced this coefficient to zero, check to see if
+         *  we need to move the EOB back here.
+         */
+        t0 = tokens[next][0].token == EOB_TOKEN ? EOB_TOKEN : ZERO_TOKEN;
+        t1 = tokens[next][1].token == EOB_TOKEN ? EOB_TOKEN : ZERO_TOKEN;
+        base_bits = 0;
+      } else {
+        base_bits = vp10_get_token_cost(x, &t0, cat6_high_cost);
+        t1 = t0;
+      }
+
+      if (next_shortcut) {
+        if (LIKELY(next < default_eob)) {
+          if (t0 != EOB_TOKEN) {
+            token_cache[rc] = vp10_pt_energy_class[t0];
+            pt = get_coef_context(nb, token_cache, i + 1);
+            rate0 += (*token_costs)[!x][pt][tokens[next][0].token];
+          }
+          if (t1 != EOB_TOKEN) {
+            token_cache[rc] = vp10_pt_energy_class[t1];
+            pt = get_coef_context(nb, token_cache, i + 1);
+            rate1 += (*token_costs)[!x][pt][tokens[next][1].token];
+          }
+        }
+
+        UPDATE_RD_COST();
+        /* And pick the best. */
+        best = rd_cost1 < rd_cost0;
+      } else {
+        // The two states in next stage are identical.
+        if (next < default_eob && t0 != EOB_TOKEN) {
+          token_cache[rc] = vp10_pt_energy_class[t0];
+          pt = get_coef_context(nb, token_cache, i + 1);
+          rate0 += (*token_costs)[!x][pt][tokens[next][0].token];
+        }
+        best = 0;
+      }
+
+#if CONFIG_NEW_QUANT
+      dx = vp10_dequant_coeff_nuq(x, dequant_ptr[rc != 0],
+                                  dequant_val[band_translate[i]]) -
+           (coeff[rc] << shift);
+#if CONFIG_VP9_HIGHBITDEPTH
+      if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+        dx >>= xd->bd - 8;
+      }
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+#else   // CONFIG_NEW_QUANT
+#if CONFIG_VP9_HIGHBITDEPTH
+      if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+        dx -= ((dequant_ptr[rc != 0] >> (xd->bd - 8)) + sz) ^ sz;
+      } else {
+        dx -= (dequant_ptr[rc != 0] + sz) ^ sz;
+      }
+#else
+      dx -= (dequant_ptr[rc != 0] + sz) ^ sz;
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_NEW_QUANT
+      d2 = (int64_t)dx * dx;
+
+      tokens[i][1].rate = base_bits + (best ? rate1 : rate0);
+      tokens[i][1].error = d2 + (best ? error1 : error0);
+      tokens[i][1].next = next;
+      tokens[i][1].token = best ? t1 : t0;
+      tokens[i][1].qc = x;
+
+      if (x) {
+#if CONFIG_NEW_QUANT
+        tokens[i][1].dqc = vp10_dequant_abscoeff_nuq(
+            abs(x), dequant_ptr[rc != 0], dequant_val[band_translate[i]]);
+        tokens[i][1].dqc = shift ? ROUND_POWER_OF_TWO(tokens[i][1].dqc, shift)
+                                 : tokens[i][1].dqc;
+        if (sz) tokens[i][1].dqc = -tokens[i][1].dqc;
+#else
+        tran_low_t offset = dq_step[rc != 0];
+        // The 32x32 transform coefficient uses half quantization step size.
+        // Account for the rounding difference in the dequantized coefficeint
+        // value when the quantization index is dropped from an even number
+        // to an odd number.
+        if (shift & x) offset += (dequant_ptr[rc != 0] & 0x01);
+
+        if (sz == 0)
+          tokens[i][1].dqc = dqcoeff[rc] - offset;
+        else
+          tokens[i][1].dqc = dqcoeff[rc] + offset;
+#endif  // CONFIG_NEW_QUANT
+      } else {
+        tokens[i][1].dqc = 0;
+      }
+
+      best_index[i][1] = best;
+      /* Finally, make this the new head of the trellis. */
+      next = i;
+    } else {
+      /* There's no choice to make for a zero coefficient, so we don't
+       *  add a new trellis node, but we do need to update the costs.
+       */
+      t0 = tokens[next][0].token;
+      t1 = tokens[next][1].token;
+      pt = get_coef_context(nb, token_cache, i + 1);
+      /* Update the cost of each path if we're past the EOB token. */
+      if (t0 != EOB_TOKEN) {
+        tokens[next][0].rate += (*token_costs)[1][pt][t0];
+        tokens[next][0].token = ZERO_TOKEN;
+      }
+      if (t1 != EOB_TOKEN) {
+        tokens[next][1].rate += (*token_costs)[1][pt][t1];
+        tokens[next][1].token = ZERO_TOKEN;
+      }
+      best_index[i][0] = best_index[i][1] = 0;
+      shortcut = (tokens[next][0].rate != tokens[next][1].rate);
+      /* Don't update next, because we didn't add a new node. */
+    }
+
+    if (UNLIKELY(!(--band_left))) {
+      --band_counts;
+      band_left = *band_counts;
+      --token_costs;
+    }
+  }
+
+  /* Now pick the best path through the whole trellis. */
+  rate0 = tokens[next][0].rate;
+  rate1 = tokens[next][1].rate;
+  error0 = tokens[next][0].error;
+  error1 = tokens[next][1].error;
+  t0 = tokens[next][0].token;
+  t1 = tokens[next][1].token;
+  rate0 += (*token_costs)[0][ctx][t0];
+  rate1 += (*token_costs)[0][ctx][t1];
+  UPDATE_RD_COST();
+  best = rd_cost1 < rd_cost0;
+
+  final_eob = -1;
+
+  for (i = next; i < eob; i = next) {
+    const int x = tokens[i][best].qc;
+    const int rc = scan[i];
+#if CONFIG_AOM_QM
+    const int iwt = iqmatrix[rc];
+    const int dequant =
+        (dequant_ptr[rc != 0] * iwt + (1 << (AOM_QM_BITS - 1))) >> AOM_QM_BITS;
+#endif
+
+    if (x) final_eob = i;
+    qcoeff[rc] = x;
+    dqcoeff[rc] = tokens[i][best].dqc;
+
+    next = tokens[i][best].next;
+    best = best_index[i][best];
+  }
+  final_eob++;
+
+  mb->plane[plane].eobs[block] = final_eob;
+  assert(final_eob <= default_eob);
+  return final_eob;
+}
+
+#if CONFIG_VP9_HIGHBITDEPTH
+typedef enum QUANT_FUNC {
+  QUANT_FUNC_LOWBD = 0,
+  QUANT_FUNC_HIGHBD = 1,
+  QUANT_FUNC_LAST = 2
+} QUANT_FUNC;
+
+static VP10_QUANT_FACADE
+    quant_func_list[VP10_XFORM_QUANT_LAST][QUANT_FUNC_LAST] = {
+      { vp10_quantize_fp_facade, vp10_highbd_quantize_fp_facade },
+      { vp10_quantize_b_facade, vp10_highbd_quantize_b_facade },
+      { vp10_quantize_dc_facade, vp10_highbd_quantize_dc_facade },
+      { NULL, NULL }
+    };
+
+#else
+typedef enum QUANT_FUNC {
+  QUANT_FUNC_LOWBD = 0,
+  QUANT_FUNC_LAST = 1
+} QUANT_FUNC;
+
+static VP10_QUANT_FACADE
+    quant_func_list[VP10_XFORM_QUANT_LAST][QUANT_FUNC_LAST] = {
+      { vp10_quantize_fp_facade },
+      { vp10_quantize_b_facade },
+      { vp10_quantize_dc_facade },
+      { NULL }
+    };
+#endif
+
+static FWD_TXFM_OPT fwd_txfm_opt_list[VP10_XFORM_QUANT_LAST] = {
+  FWD_TXFM_OPT_NORMAL, FWD_TXFM_OPT_NORMAL, FWD_TXFM_OPT_DC, FWD_TXFM_OPT_NORMAL
+};
+
+void vp10_xform_quant(MACROBLOCK *x, int plane, int block, int blk_row,
+                      int blk_col, BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
+                      VP10_XFORM_QUANT xform_quant_idx) {
+  MACROBLOCKD *const xd = &x->e_mbd;
+  const struct macroblock_plane *const p = &x->plane[plane];
+  const struct macroblockd_plane *const pd = &xd->plane[plane];
+  PLANE_TYPE plane_type = (plane == 0) ? PLANE_TYPE_Y : PLANE_TYPE_UV;
+  TX_TYPE tx_type = get_tx_type(plane_type, xd, block, tx_size);
+  const scan_order *const scan_order =
+      get_scan(tx_size, tx_type, is_inter_block(&xd->mi[0]->mbmi));
+  tran_low_t *const coeff = BLOCK_OFFSET(p->coeff, block);
+  tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block);
+  tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
+  uint16_t *const eob = &p->eobs[block];
+  const int diff_stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
+#if CONFIG_AOM_QM
+  int seg_id = xd->mi[0]->mbmi.segment_id;
+  int is_intra = !is_inter_block(&xd->mi[0]->mbmi);
+  const qm_val_t *qmatrix = pd->seg_qmatrix[seg_id][is_intra][tx_size];
+  const qm_val_t *iqmatrix = pd->seg_iqmatrix[seg_id][is_intra][tx_size];
+#endif
+  const int16_t *src_diff;
+  const int tx2d_size = get_tx2d_size(tx_size);
+
+  FWD_TXFM_PARAM fwd_txfm_param;
+  QUANT_PARAM qparam;
+
+  fwd_txfm_param.tx_type = tx_type;
+  fwd_txfm_param.tx_size = tx_size;
+  fwd_txfm_param.fwd_txfm_opt = fwd_txfm_opt_list[xform_quant_idx];
+  fwd_txfm_param.rd_transform = x->use_lp32x32fdct;
+  fwd_txfm_param.lossless = xd->lossless[xd->mi[0]->mbmi.segment_id];
+
+  src_diff = &p->src_diff[4 * (blk_row * diff_stride + blk_col)];
+
+  qparam.log_scale = get_tx_scale(xd, tx_type, tx_size);
+#if CONFIG_VP9_HIGHBITDEPTH
+  fwd_txfm_param.bd = xd->bd;
+  if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+    highbd_fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
+    if (xform_quant_idx != VP10_XFORM_QUANT_SKIP_QUANT) {
+      if (LIKELY(!x->skip_block)) {
+        quant_func_list[xform_quant_idx][QUANT_FUNC_HIGHBD](
+<<<<<<< HEAD
+            coeff, tx2d_size, p, qcoeff, pd, dqcoeff, eob, scan_order, &qparam);
+      } else {
+        vp10_quantize_skip(tx2d_size, qcoeff, dqcoeff, eob);
+=======
+            coeff, tx2d_size, p, qcoeff, pd, dqcoeff, eob, scan_order, &qparam
+#if CONFIG_AOM_QM
+            , qmatrix, iqmatrix
+#endif  // CONFIG_AOM_QM
+            );
+>>>>>>> 10d6f02... Port commits related to clpf and qm experiments
+      }
+    }
+    return;
+  }
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+  fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
+  if (xform_quant_idx != VP10_XFORM_QUANT_SKIP_QUANT) {
+    if (LIKELY(!x->skip_block)) {
+      quant_func_list[xform_quant_idx][QUANT_FUNC_LOWBD](
+<<<<<<< HEAD
+          coeff, tx2d_size, p, qcoeff, pd, dqcoeff, eob, scan_order, &qparam);
+    } else {
+      vp10_quantize_skip(tx2d_size, qcoeff, dqcoeff, eob);
+=======
+          coeff, tx2d_size, p, qcoeff, pd, dqcoeff, eob, scan_order, &qparam
+#if CONFIG_AOM_QM
+            , qmatrix, iqmatrix
+#endif  // CONFIG_AOM_QM
+          );
+>>>>>>> 10d6f02... Port commits related to clpf and qm experiments
+    }
+  }
+}
+
+#if CONFIG_NEW_QUANT
+void vp10_xform_quant_nuq(MACROBLOCK *x, int plane, int block, int blk_row,
+                          int blk_col, BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
+                          int ctx) {
+  MACROBLOCKD *const xd = &x->e_mbd;
+  const struct macroblock_plane *const p = &x->plane[plane];
+  const struct macroblockd_plane *const pd = &xd->plane[plane];
+  PLANE_TYPE plane_type = (plane == 0) ? PLANE_TYPE_Y : PLANE_TYPE_UV;
+  TX_TYPE tx_type = get_tx_type(plane_type, xd, block, tx_size);
+  const scan_order *const scan_order =
+      get_scan(tx_size, tx_type, is_inter_block(&xd->mi[0]->mbmi));
+  tran_low_t *const coeff = BLOCK_OFFSET(p->coeff, block);
+  tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block);
+  tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
+  int dq = get_dq_profile_from_ctx(ctx);
+  uint16_t *const eob = &p->eobs[block];
+  const int diff_stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
+  const int16_t *src_diff;
+  const uint8_t *band = get_band_translate(tx_size);
+
+  FWD_TXFM_PARAM fwd_txfm_param;
+
+  fwd_txfm_param.tx_type = tx_type;
+  fwd_txfm_param.tx_size = tx_size;
+  fwd_txfm_param.fwd_txfm_opt = fwd_txfm_opt_list[VP10_XFORM_QUANT_FP];
+  fwd_txfm_param.rd_transform = x->use_lp32x32fdct;
+  fwd_txfm_param.lossless = xd->lossless[xd->mi[0]->mbmi.segment_id];
+
+  src_diff = &p->src_diff[4 * (blk_row * diff_stride + blk_col)];
+
+// TODO(sarahparker) add all of these new quant quantize functions
+// to quant_func_list, just trying to get this expr to work for now
+#if CONFIG_VP9_HIGHBITDEPTH
+  fwd_txfm_param.bd = xd->bd;
+  if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+    highbd_fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
+    if (tx_size == TX_32X32) {
+      highbd_quantize_32x32_nuq(
+          coeff, get_tx2d_size(tx_size), x->skip_block, p->quant,
+          p->quant_shift, pd->dequant,
+          (const cuml_bins_type_nuq *)p->cuml_bins_nuq[dq],
+          (const dequant_val_type_nuq *)pd->dequant_val_nuq[dq], qcoeff,
+          dqcoeff, eob, scan_order->scan, band);
+    } else {
+      highbd_quantize_nuq(coeff, get_tx2d_size(tx_size), x->skip_block,
+                          p->quant, p->quant_shift, pd->dequant,
+                          (const cuml_bins_type_nuq *)p->cuml_bins_nuq[dq],
+                          (const dequant_val_type_nuq *)pd->dequant_val_nuq[dq],
+                          qcoeff, dqcoeff, eob, scan_order->scan, band);
+    }
+    return;
+  }
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+  fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
+  if (tx_size == TX_32X32) {
+    quantize_32x32_nuq(coeff, 1024, x->skip_block, p->quant, p->quant_shift,
+                       pd->dequant,
+                       (const cuml_bins_type_nuq *)p->cuml_bins_nuq[dq],
+                       (const dequant_val_type_nuq *)pd->dequant_val_nuq[dq],
+                       qcoeff, dqcoeff, eob, scan_order->scan, band);
+  } else {
+    quantize_nuq(coeff, get_tx2d_size(tx_size), x->skip_block, p->quant,
+                 p->quant_shift, pd->dequant,
+                 (const cuml_bins_type_nuq *)p->cuml_bins_nuq[dq],
+                 (const dequant_val_type_nuq *)pd->dequant_val_nuq[dq], qcoeff,
+                 dqcoeff, eob, scan_order->scan, band);
+  }
+}
+
+void vp10_xform_quant_fp_nuq(MACROBLOCK *x, int plane, int block, int blk_row,
+                             int blk_col, BLOCK_SIZE plane_bsize,
+                             TX_SIZE tx_size, int ctx) {
+  MACROBLOCKD *const xd = &x->e_mbd;
+  const struct macroblock_plane *const p = &x->plane[plane];
+  const struct macroblockd_plane *const pd = &xd->plane[plane];
+  int dq = get_dq_profile_from_ctx(ctx);
+  PLANE_TYPE plane_type = (plane == 0) ? PLANE_TYPE_Y : PLANE_TYPE_UV;
+  TX_TYPE tx_type = get_tx_type(plane_type, xd, block, tx_size);
+  const scan_order *const scan_order =
+      get_scan(tx_size, tx_type, is_inter_block(&xd->mi[0]->mbmi));
+  tran_low_t *const coeff = BLOCK_OFFSET(p->coeff, block);
+  tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block);
+  tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
+  uint16_t *const eob = &p->eobs[block];
+  const int diff_stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
+  const int16_t *src_diff;
+  const uint8_t *band = get_band_translate(tx_size);
+
+  FWD_TXFM_PARAM fwd_txfm_param;
+
+  fwd_txfm_param.tx_type = tx_type;
+  fwd_txfm_param.tx_size = tx_size;
+  fwd_txfm_param.fwd_txfm_opt = fwd_txfm_opt_list[VP10_XFORM_QUANT_FP];
+  fwd_txfm_param.rd_transform = x->use_lp32x32fdct;
+  fwd_txfm_param.lossless = xd->lossless[xd->mi[0]->mbmi.segment_id];
+
+  src_diff = &p->src_diff[4 * (blk_row * diff_stride + blk_col)];
+
+// TODO(sarahparker) add all of these new quant quantize functions
+// to quant_func_list, just trying to get this expr to work for now
+#if CONFIG_VP9_HIGHBITDEPTH
+  fwd_txfm_param.bd = xd->bd;
+  if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+    highbd_fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
+    if (tx_size == TX_32X32) {
+      highbd_quantize_32x32_fp_nuq(
+          coeff, get_tx2d_size(tx_size), x->skip_block, p->quant_fp,
+          pd->dequant, (const cuml_bins_type_nuq *)p->cuml_bins_nuq[dq],
+          (const dequant_val_type_nuq *)pd->dequant_val_nuq[dq], qcoeff,
+          dqcoeff, eob, scan_order->scan, band);
+    } else {
+      highbd_quantize_fp_nuq(
+          coeff, get_tx2d_size(tx_size), x->skip_block, p->quant_fp,
+          pd->dequant, (const cuml_bins_type_nuq *)p->cuml_bins_nuq[dq],
+          (const dequant_val_type_nuq *)pd->dequant_val_nuq[dq], qcoeff,
+          dqcoeff, eob, scan_order->scan, band);
+    }
+    return;
+  }
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+  fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
+  if (tx_size == TX_32X32) {
+    quantize_32x32_fp_nuq(coeff, get_tx2d_size(tx_size), x->skip_block,
+                          p->quant_fp, pd->dequant,
+                          (const cuml_bins_type_nuq *)p->cuml_bins_nuq[dq],
+                          (const dequant_val_type_nuq *)pd->dequant_val_nuq[dq],
+                          qcoeff, dqcoeff, eob, scan_order->scan, band);
+  } else {
+    quantize_fp_nuq(coeff, get_tx2d_size(tx_size), x->skip_block, p->quant_fp,
+                    pd->dequant,
+                    (const cuml_bins_type_nuq *)p->cuml_bins_nuq[dq],
+                    (const dequant_val_type_nuq *)pd->dequant_val_nuq[dq],
+                    qcoeff, dqcoeff, eob, scan_order->scan, band);
+  }
+}
+
+void vp10_xform_quant_dc_nuq(MACROBLOCK *x, int plane, int block, int blk_row,
+                             int blk_col, BLOCK_SIZE plane_bsize,
+                             TX_SIZE tx_size, int ctx) {
+  MACROBLOCKD *const xd = &x->e_mbd;
+  const struct macroblock_plane *const p = &x->plane[plane];
+  const struct macroblockd_plane *const pd = &xd->plane[plane];
+  PLANE_TYPE plane_type = (plane == 0) ? PLANE_TYPE_Y : PLANE_TYPE_UV;
+  TX_TYPE tx_type = get_tx_type(plane_type, xd, block, tx_size);
+  tran_low_t *const coeff = BLOCK_OFFSET(p->coeff, block);
+  tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block);
+  tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
+  uint16_t *const eob = &p->eobs[block];
+  const int diff_stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
+  const int16_t *src_diff;
+  int dq = get_dq_profile_from_ctx(ctx);
+
+  FWD_TXFM_PARAM fwd_txfm_param;
+
+  fwd_txfm_param.tx_type = tx_type;
+  fwd_txfm_param.tx_size = tx_size;
+  fwd_txfm_param.fwd_txfm_opt = fwd_txfm_opt_list[VP10_XFORM_QUANT_DC];
+  fwd_txfm_param.rd_transform = x->use_lp32x32fdct;
+  fwd_txfm_param.lossless = xd->lossless[xd->mi[0]->mbmi.segment_id];
+
+  src_diff = &p->src_diff[4 * (blk_row * diff_stride + blk_col)];
+
+// TODO(sarahparker) add all of these new quant quantize functions
+// to quant_func_list, just trying to get this expr to work for now
+#if CONFIG_VP9_HIGHBITDEPTH
+  fwd_txfm_param.bd = xd->bd;
+  if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+    highbd_fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
+    if (tx_size == TX_32X32) {
+      highbd_quantize_dc_32x32_nuq(
+          coeff, get_tx2d_size(tx_size), x->skip_block, p->quant[0],
+          p->quant_shift[0], pd->dequant[0], p->cuml_bins_nuq[dq][0],
+          pd->dequant_val_nuq[dq][0], qcoeff, dqcoeff, eob);
+    } else {
+      highbd_quantize_dc_nuq(coeff, get_tx2d_size(tx_size), x->skip_block,
+                             p->quant[0], p->quant_shift[0], pd->dequant[0],
+                             p->cuml_bins_nuq[dq][0],
+                             pd->dequant_val_nuq[dq][0], qcoeff, dqcoeff, eob);
+    }
+    return;
+  }
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+  fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
+  if (tx_size == TX_32X32) {
+    quantize_dc_32x32_nuq(coeff, get_tx2d_size(tx_size), x->skip_block,
+                          p->quant[0], p->quant_shift[0], pd->dequant[0],
+                          p->cuml_bins_nuq[dq][0], pd->dequant_val_nuq[dq][0],
+                          qcoeff, dqcoeff, eob);
+  } else {
+    quantize_dc_nuq(coeff, get_tx2d_size(tx_size), x->skip_block, p->quant[0],
+                    p->quant_shift[0], pd->dequant[0], p->cuml_bins_nuq[dq][0],
+                    pd->dequant_val_nuq[dq][0], qcoeff, dqcoeff, eob);
+  }
+}
+
+void vp10_xform_quant_dc_fp_nuq(MACROBLOCK *x, int plane, int block,
+                                int blk_row, int blk_col,
+                                BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
+                                int ctx) {
+  MACROBLOCKD *const xd = &x->e_mbd;
+  const struct macroblock_plane *const p = &x->plane[plane];
+  const struct macroblockd_plane *const pd = &xd->plane[plane];
+  PLANE_TYPE plane_type = (plane == 0) ? PLANE_TYPE_Y : PLANE_TYPE_UV;
+  TX_TYPE tx_type = get_tx_type(plane_type, xd, block, tx_size);
+  tran_low_t *const coeff = BLOCK_OFFSET(p->coeff, block);
+  tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block);
+  tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
+  uint16_t *const eob = &p->eobs[block];
+  const int diff_stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
+  const int16_t *src_diff;
+  int dq = get_dq_profile_from_ctx(ctx);
+
+  FWD_TXFM_PARAM fwd_txfm_param;
+
+  fwd_txfm_param.tx_type = tx_type;
+  fwd_txfm_param.tx_size = tx_size;
+  fwd_txfm_param.fwd_txfm_opt = fwd_txfm_opt_list[VP10_XFORM_QUANT_DC];
+  fwd_txfm_param.rd_transform = x->use_lp32x32fdct;
+  fwd_txfm_param.lossless = xd->lossless[xd->mi[0]->mbmi.segment_id];
+
+  src_diff = &p->src_diff[4 * (blk_row * diff_stride + blk_col)];
+
+// TODO(sarahparker) add all of these new quant quantize functions
+// to quant_func_list, just trying to get this expr to work for now
+#if CONFIG_VP9_HIGHBITDEPTH
+  fwd_txfm_param.bd = xd->bd;
+  if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+    highbd_fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
+    if (tx_size == TX_32X32) {
+      highbd_quantize_dc_32x32_fp_nuq(
+          coeff, get_tx2d_size(tx_size), x->skip_block, p->quant_fp[0],
+          pd->dequant[0], p->cuml_bins_nuq[dq][0], pd->dequant_val_nuq[dq][0],
+          qcoeff, dqcoeff, eob);
+    } else {
+      highbd_quantize_dc_fp_nuq(
+          coeff, get_tx2d_size(tx_size), x->skip_block, p->quant_fp[0],
+          pd->dequant[0], p->cuml_bins_nuq[dq][0], pd->dequant_val_nuq[dq][0],
+          qcoeff, dqcoeff, eob);
+    }
+    return;
+  }
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+  fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
+  if (tx_size == TX_32X32) {
+    quantize_dc_32x32_fp_nuq(coeff, get_tx2d_size(tx_size), x->skip_block,
+                             p->quant_fp[0], pd->dequant[0],
+                             p->cuml_bins_nuq[dq][0],
+                             pd->dequant_val_nuq[dq][0], qcoeff, dqcoeff, eob);
+  } else {
+    quantize_dc_fp_nuq(coeff, get_tx2d_size(tx_size), x->skip_block,
+                       p->quant_fp[0], pd->dequant[0], p->cuml_bins_nuq[dq][0],
+                       pd->dequant_val_nuq[dq][0], qcoeff, dqcoeff, eob);
+  }
+}
+#endif  // CONFIG_NEW_QUANT
+
+static void encode_block(int plane, int block, int blk_row, int blk_col,
+                         BLOCK_SIZE plane_bsize, TX_SIZE tx_size, void *arg) {
+  struct encode_b_args *const args = arg;
+  MACROBLOCK *const x = args->x;
+  MACROBLOCKD *const xd = &x->e_mbd;
+  int ctx;
+  struct macroblock_plane *const p = &x->plane[plane];
+  struct macroblockd_plane *const pd = &xd->plane[plane];
+  tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
+  uint8_t *dst;
+  ENTROPY_CONTEXT *a, *l;
+  INV_TXFM_PARAM inv_txfm_param;
+#if CONFIG_VAR_TX
+  int i;
+  const int bwl = b_width_log2_lookup[plane_bsize];
+#endif
+  dst = &pd->dst.buf[4 * blk_row * pd->dst.stride + 4 * blk_col];
+  a = &args->ta[blk_col];
+  l = &args->tl[blk_row];
+#if CONFIG_VAR_TX
+  ctx = get_entropy_context(tx_size, a, l);
+#else
+  ctx = combine_entropy_contexts(*a, *l);
+#endif
+
+#if CONFIG_VAR_TX
+  // Assert not magic number (uninitialised).
+  assert(x->blk_skip[plane][(blk_row << bwl) + blk_col] != 234);
+
+  if (x->blk_skip[plane][(blk_row << bwl) + blk_col] == 0) {
+#else
+  {
+#endif
+#if CONFIG_NEW_QUANT
+    vp10_xform_quant_fp_nuq(x, plane, block, blk_row, blk_col, plane_bsize,
+                            tx_size, ctx);
+#else
+    vp10_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size,
+                     VP10_XFORM_QUANT_FP);
+#endif  // CONFIG_NEW_QUANT
+  }
+#if CONFIG_VAR_TX
+  else {
+    p->eobs[block] = 0;
+  }
+#endif
+
+  if (p->eobs[block]) {
+    *a = *l = vp10_optimize_b(x, plane, block, tx_size, ctx) > 0;
+  } else {
+    *a = *l = p->eobs[block] > 0;
+  }
+
+#if CONFIG_VAR_TX
+  for (i = 0; i < num_4x4_blocks_wide_txsize_lookup[tx_size]; ++i) {
+    a[i] = a[0];
+  }
+  for (i = 0; i < num_4x4_blocks_high_txsize_lookup[tx_size]; ++i) {
+    l[i] = l[0];
+  }
+#endif
+
+  if (p->eobs[block]) *(args->skip) = 0;
+
+  if (p->eobs[block] == 0) return;
+
+  // inverse transform parameters
+  inv_txfm_param.tx_type = get_tx_type(pd->plane_type, xd, block, tx_size);
+  inv_txfm_param.tx_size = tx_size;
+  inv_txfm_param.eob = p->eobs[block];
+  inv_txfm_param.lossless = xd->lossless[xd->mi[0]->mbmi.segment_id];
+
+#if CONFIG_VP9_HIGHBITDEPTH
+  if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+    inv_txfm_param.bd = xd->bd;
+    highbd_inv_txfm_add(dqcoeff, dst, pd->dst.stride, &inv_txfm_param);
+    return;
+  }
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+  inv_txfm_add(dqcoeff, dst, pd->dst.stride, &inv_txfm_param);
+}
+
+#if CONFIG_VAR_TX
+static void encode_block_inter(int plane, int block, int blk_row, int blk_col,
+                               BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
+                               void *arg) {
+  struct encode_b_args *const args = arg;
+  MACROBLOCK *const x = args->x;
+  MACROBLOCKD *const xd = &x->e_mbd;
+  MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
+  const BLOCK_SIZE bsize = txsize_to_bsize[tx_size];
+  const struct macroblockd_plane *const pd = &xd->plane[plane];
+  const int tx_row = blk_row >> (1 - pd->subsampling_y);
+  const int tx_col = blk_col >> (1 - pd->subsampling_x);
+  const TX_SIZE plane_tx_size =
+      plane ? get_uv_tx_size_impl(mbmi->inter_tx_size[tx_row][tx_col], bsize, 0,
+                                  0)
+            : mbmi->inter_tx_size[tx_row][tx_col];
+
+  int max_blocks_high = num_4x4_blocks_high_lookup[plane_bsize];
+  int max_blocks_wide = num_4x4_blocks_wide_lookup[plane_bsize];
+
+  if (xd->mb_to_bottom_edge < 0)
+    max_blocks_high += xd->mb_to_bottom_edge >> (5 + pd->subsampling_y);
+  if (xd->mb_to_right_edge < 0)
+    max_blocks_wide += xd->mb_to_right_edge >> (5 + pd->subsampling_x);
+
+  if (blk_row >= max_blocks_high || blk_col >= max_blocks_wide) return;
+
+  if (tx_size == plane_tx_size) {
+    encode_block(plane, block, blk_row, blk_col, plane_bsize, tx_size, arg);
+  } else {
+    int bsl = b_width_log2_lookup[bsize];
+    int i;
+
+    assert(bsl > 0);
+    --bsl;
+
+#if CONFIG_EXT_TX
+    assert(tx_size < TX_SIZES);
+#endif  // CONFIG_EXT_TX
+
+    for (i = 0; i < 4; ++i) {
+      const int offsetr = blk_row + ((i >> 1) << bsl);
+      const int offsetc = blk_col + ((i & 0x01) << bsl);
+      int step = num_4x4_blocks_txsize_lookup[tx_size - 1];
+
+      if (offsetr >= max_blocks_high || offsetc >= max_blocks_wide) continue;
+
+      encode_block_inter(plane, block + i * step, offsetr, offsetc, plane_bsize,
+                         tx_size - 1, arg);
+    }
+  }
+}
+#endif
+
+static void encode_block_pass1(int plane, int block, int blk_row, int blk_col,
+                               BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
+                               void *arg) {
+  MACROBLOCK *const x = (MACROBLOCK *)arg;
+  MACROBLOCKD *const xd = &x->e_mbd;
+  struct macroblock_plane *const p = &x->plane[plane];
+  struct macroblockd_plane *const pd = &xd->plane[plane];
+  tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
+  uint8_t *dst;
+#if CONFIG_NEW_QUANT
+  int ctx;
+#endif  // CONFIG_NEW_QUANT
+  dst = &pd->dst.buf[4 * blk_row * pd->dst.stride + 4 * blk_col];
+
+#if CONFIG_NEW_QUANT
+  ctx = 0;
+  vp10_xform_quant_fp_nuq(x, plane, block, blk_row, blk_col, plane_bsize,
+                          tx_size, ctx);
+#else
+  vp10_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size,
+                   VP10_XFORM_QUANT_B);
+#endif  // CONFIG_NEW_QUANT
+
+  if (p->eobs[block] > 0) {
+#if CONFIG_VP9_HIGHBITDEPTH
+    if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+      if (xd->lossless[xd->mi[0]->mbmi.segment_id]) {
+        vp10_highbd_iwht4x4_add(dqcoeff, dst, pd->dst.stride, p->eobs[block],
+                                xd->bd);
+      } else {
+        vp10_highbd_idct4x4_add(dqcoeff, dst, pd->dst.stride, p->eobs[block],
+                                xd->bd);
+      }
+      return;
+    }
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+    if (xd->lossless[xd->mi[0]->mbmi.segment_id]) {
+      vp10_iwht4x4_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]);
+    } else {
+      vp10_idct4x4_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]);
+    }
+  }
+}
+
+void vp10_encode_sby_pass1(MACROBLOCK *x, BLOCK_SIZE bsize) {
+  vp10_subtract_plane(x, bsize, 0);
+  vp10_foreach_transformed_block_in_plane(&x->e_mbd, bsize, 0,
+                                          encode_block_pass1, x);
+}
+
+void vp10_encode_sb(MACROBLOCK *x, BLOCK_SIZE bsize) {
+  MACROBLOCKD *const xd = &x->e_mbd;
+  struct optimize_ctx ctx;
+  MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
+  struct encode_b_args arg = { x, &ctx, &mbmi->skip, NULL, NULL, 1 };
+  int plane;
+
+  mbmi->skip = 1;
+
+  if (x->skip) return;
+
+  for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
+#if CONFIG_VAR_TX
+    // TODO(jingning): Clean this up.
+    const struct macroblockd_plane *const pd = &xd->plane[plane];
+    const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd);
+    const int mi_width = num_4x4_blocks_wide_lookup[plane_bsize];
+    const int mi_height = num_4x4_blocks_high_lookup[plane_bsize];
+    const TX_SIZE max_tx_size = max_txsize_lookup[plane_bsize];
+    const BLOCK_SIZE txb_size = txsize_to_bsize[max_tx_size];
+    const int bh = num_4x4_blocks_wide_lookup[txb_size];
+    int idx, idy;
+    int block = 0;
+    int step = num_4x4_blocks_txsize_lookup[max_tx_size];
+    vp10_get_entropy_contexts(bsize, TX_4X4, pd, ctx.ta[plane], ctx.tl[plane]);
+#else
+    const struct macroblockd_plane *const pd = &xd->plane[plane];
+    const TX_SIZE tx_size = plane ? get_uv_tx_size(mbmi, pd) : mbmi->tx_size;
+    vp10_get_entropy_contexts(bsize, tx_size, pd, ctx.ta[plane], ctx.tl[plane]);
+#endif
+    vp10_subtract_plane(x, bsize, plane);
+    arg.ta = ctx.ta[plane];
+    arg.tl = ctx.tl[plane];
+
+#if CONFIG_VAR_TX
+    for (idy = 0; idy < mi_height; idy += bh) {
+      for (idx = 0; idx < mi_width; idx += bh) {
+        encode_block_inter(plane, block, idy, idx, plane_bsize, max_tx_size,
+                           &arg);
+        block += step;
+      }
+    }
+#else
+    vp10_foreach_transformed_block_in_plane(xd, bsize, plane, encode_block,
+                                            &arg);
+#endif
+  }
+}
+
+#if CONFIG_SUPERTX
+void vp10_encode_sb_supertx(MACROBLOCK *x, BLOCK_SIZE bsize) {
+  MACROBLOCKD *const xd = &x->e_mbd;
+  struct optimize_ctx ctx;
+  MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
+  struct encode_b_args arg = { x, &ctx, &mbmi->skip, NULL, NULL, 1 };
+  int plane;
+
+  mbmi->skip = 1;
+  if (x->skip) return;
+
+  for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
+    const struct macroblockd_plane *const pd = &xd->plane[plane];
+#if CONFIG_VAR_TX
+    const TX_SIZE tx_size = TX_4X4;
+#else
+    const TX_SIZE tx_size = plane ? get_uv_tx_size(mbmi, pd) : mbmi->tx_size;
+#endif
+    vp10_subtract_plane(x, bsize, plane);
+    vp10_get_entropy_contexts(bsize, tx_size, pd, ctx.ta[plane], ctx.tl[plane]);
+    arg.ta = ctx.ta[plane];
+    arg.tl = ctx.tl[plane];
+    vp10_foreach_transformed_block_in_plane(xd, bsize, plane, encode_block,
+                                            &arg);
+  }
+}
+#endif  // CONFIG_SUPERTX
+
+void vp10_encode_block_intra(int plane, int block, int blk_row, int blk_col,
+                             BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
+                             void *arg) {
+  struct encode_b_args *const args = arg;
+  MACROBLOCK *const x = args->x;
+  MACROBLOCKD *const xd = &x->e_mbd;
+  MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
+  struct macroblock_plane *const p = &x->plane[plane];
+  struct macroblockd_plane *const pd = &xd->plane[plane];
+  tran_low_t *dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
+  PLANE_TYPE plane_type = (plane == 0) ? PLANE_TYPE_Y : PLANE_TYPE_UV;
+  const TX_TYPE tx_type = get_tx_type(plane_type, xd, block, tx_size);
+  PREDICTION_MODE mode;
+  const int bwl = b_width_log2_lookup[plane_bsize];
+  const int bhl = b_height_log2_lookup[plane_bsize];
+  const int diff_stride = 4 * (1 << bwl);
+  uint8_t *src, *dst;
+  int16_t *src_diff;
+  uint16_t *eob = &p->eobs[block];
+  const int src_stride = p->src.stride;
+  const int dst_stride = pd->dst.stride;
+  const int tx1d_width = num_4x4_blocks_wide_txsize_lookup[tx_size] << 2;
+  const int tx1d_height = num_4x4_blocks_high_txsize_lookup[tx_size] << 2;
+  ENTROPY_CONTEXT *a = NULL, *l = NULL;
+  int ctx;
+
+  INV_TXFM_PARAM inv_txfm_param;
+
+  assert(tx1d_width == tx1d_height);
+
+  dst = &pd->dst.buf[4 * (blk_row * dst_stride + blk_col)];
+  src = &p->src.buf[4 * (blk_row * src_stride + blk_col)];
+  src_diff = &p->src_diff[4 * (blk_row * diff_stride + blk_col)];
+
+  mode = plane == 0 ? get_y_mode(xd->mi[0], block) : mbmi->uv_mode;
+  vp10_predict_intra_block(xd, bwl, bhl, tx_size, mode, dst, dst_stride, dst,
+                           dst_stride, blk_col, blk_row, plane);
+#if CONFIG_VP9_HIGHBITDEPTH
+  if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+    vpx_highbd_subtract_block(tx1d_height, tx1d_width, src_diff, diff_stride,
+                              src, src_stride, dst, dst_stride, xd->bd);
+  } else {
+    vpx_subtract_block(tx1d_height, tx1d_width, src_diff, diff_stride, src,
+                       src_stride, dst, dst_stride);
+  }
+#else
+  vpx_subtract_block(tx1d_height, tx1d_width, src_diff, diff_stride, src,
+                     src_stride, dst, dst_stride);
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+  a = &args->ta[blk_col];
+  l = &args->tl[blk_row];
+  ctx = combine_entropy_contexts(*a, *l);
+
+  if (args->enable_optimize_b) {
+#if CONFIG_NEW_QUANT
+    vp10_xform_quant_fp_nuq(x, plane, block, blk_row, blk_col, plane_bsize,
+                            tx_size, ctx);
+#else   // CONFIG_NEW_QUANT
+    vp10_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size,
+                     VP10_XFORM_QUANT_FP);
+#endif  // CONFIG_NEW_QUANT
+    if (p->eobs[block]) {
+      *a = *l = vp10_optimize_b(x, plane, block, tx_size, ctx) > 0;
+    } else {
+      *a = *l = 0;
+    }
+  } else {
+    vp10_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size,
+                     VP10_XFORM_QUANT_B);
+    *a = *l = p->eobs[block] > 0;
+  }
+
+  if (*eob) {
+    // inverse transform
+    inv_txfm_param.tx_type = tx_type;
+    inv_txfm_param.tx_size = tx_size;
+    inv_txfm_param.eob = *eob;
+    inv_txfm_param.lossless = xd->lossless[mbmi->segment_id];
+#if CONFIG_VP9_HIGHBITDEPTH
+    inv_txfm_param.bd = xd->bd;
+    if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+      highbd_inv_txfm_add(dqcoeff, dst, dst_stride, &inv_txfm_param);
+    } else {
+      inv_txfm_add(dqcoeff, dst, dst_stride, &inv_txfm_param);
+    }
+#else
+    inv_txfm_add(dqcoeff, dst, dst_stride, &inv_txfm_param);
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+    *(args->skip) = 0;
+  }
+}
+
+void vp10_encode_intra_block_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane,
+                                   int enable_optimize_b) {
+  const MACROBLOCKD *const xd = &x->e_mbd;
+  ENTROPY_CONTEXT ta[2 * MAX_MIB_SIZE];
+  ENTROPY_CONTEXT tl[2 * MAX_MIB_SIZE];
+
+  struct encode_b_args arg = { x,  NULL, &xd->mi[0]->mbmi.skip,
+                               ta, tl,   enable_optimize_b };
+  if (enable_optimize_b) {
+    const struct macroblockd_plane *const pd = &xd->plane[plane];
+    const TX_SIZE tx_size =
+        plane ? get_uv_tx_size(&xd->mi[0]->mbmi, pd) : xd->mi[0]->mbmi.tx_size;
+    vp10_get_entropy_contexts(bsize, tx_size, pd, ta, tl);
+  }
+  vp10_foreach_transformed_block_in_plane(xd, bsize, plane,
+                                          vp10_encode_block_intra, &arg);
+}
diff --git a/av1/encoder/encodemb.h b/av1/encoder/encodemb.h
new file mode 100644
index 0000000..4b88831
--- /dev/null
+++ b/av1/encoder/encodemb.h
@@ -0,0 +1,83 @@
+/*
+ *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP10_ENCODER_ENCODEMB_H_
+#define VP10_ENCODER_ENCODEMB_H_
+
+#include "./vpx_config.h"
+#include "av1/encoder/block.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct optimize_ctx {
+  ENTROPY_CONTEXT ta[MAX_MB_PLANE][2 * MAX_MIB_SIZE];
+  ENTROPY_CONTEXT tl[MAX_MB_PLANE][2 * MAX_MIB_SIZE];
+};
+
+struct encode_b_args {
+  MACROBLOCK *x;
+  struct optimize_ctx *ctx;
+  int8_t *skip;
+  ENTROPY_CONTEXT *ta;
+  ENTROPY_CONTEXT *tl;
+  int8_t enable_optimize_b;
+};
+
+typedef enum VP10_XFORM_QUANT {
+  VP10_XFORM_QUANT_FP = 0,
+  VP10_XFORM_QUANT_B = 1,
+  VP10_XFORM_QUANT_DC = 2,
+  VP10_XFORM_QUANT_SKIP_QUANT = 3,
+  VP10_XFORM_QUANT_LAST = 4
+} VP10_XFORM_QUANT;
+
+void vp10_encode_sb(MACROBLOCK *x, BLOCK_SIZE bsize);
+#if CONFIG_SUPERTX
+void vp10_encode_sb_supertx(MACROBLOCK *x, BLOCK_SIZE bsize);
+#endif  // CONFIG_SUPERTX
+void vp10_encode_sby_pass1(MACROBLOCK *x, BLOCK_SIZE bsize);
+void vp10_xform_quant(MACROBLOCK *x, int plane, int block, int blk_row,
+                      int blk_col, BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
+                      VP10_XFORM_QUANT xform_quant_idx);
+#if CONFIG_NEW_QUANT
+void vp10_xform_quant_nuq(MACROBLOCK *x, int plane, int block, int blk_row,
+                          int blk_col, BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
+                          int ctx);
+void vp10_xform_quant_dc_nuq(MACROBLOCK *x, int plane, int block, int blk_row,
+                             int blk_col, BLOCK_SIZE plane_bsize,
+                             TX_SIZE tx_size, int ctx);
+void vp10_xform_quant_fp_nuq(MACROBLOCK *x, int plane, int block, int blk_row,
+                             int blk_col, BLOCK_SIZE plane_bsize,
+                             TX_SIZE tx_size, int ctx);
+void vp10_xform_quant_dc_fp_nuq(MACROBLOCK *x, int plane, int block,
+                                int blk_row, int blk_col,
+                                BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
+                                int ctx);
+#endif
+
+int vp10_optimize_b(MACROBLOCK *mb, int plane, int block, TX_SIZE tx_size,
+                    int ctx);
+
+void vp10_subtract_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane);
+
+void vp10_encode_block_intra(int plane, int block, int blk_row, int blk_col,
+                             BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
+                             void *arg);
+
+void vp10_encode_intra_block_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane,
+                                   int enable_optimize_b);
+
+#ifdef __cplusplus
+}  // extern "C"
+#endif
+
+#endif  // VP10_ENCODER_ENCODEMB_H_
diff --git a/av1/encoder/encodemv.c b/av1/encoder/encodemv.c
new file mode 100644
index 0000000..78da2b7
--- /dev/null
+++ b/av1/encoder/encodemv.c
@@ -0,0 +1,454 @@
+/*
+ *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <math.h>
+
+#include "av1/common/common.h"
+#include "av1/common/entropymode.h"
+
+#include "av1/encoder/cost.h"
+#include "av1/encoder/encodemv.h"
+#include "av1/encoder/subexp.h"
+
+#include "aom_dsp/vpx_dsp_common.h"
+
+static struct vp10_token mv_joint_encodings[MV_JOINTS];
+static struct vp10_token mv_class_encodings[MV_CLASSES];
+static struct vp10_token mv_fp_encodings[MV_FP_SIZE];
+static struct vp10_token mv_class0_encodings[CLASS0_SIZE];
+
+void vp10_entropy_mv_init(void) {
+  vp10_tokens_from_tree(mv_joint_encodings, vp10_mv_joint_tree);
+  vp10_tokens_from_tree(mv_class_encodings, vp10_mv_class_tree);
+  vp10_tokens_from_tree(mv_class0_encodings, vp10_mv_class0_tree);
+  vp10_tokens_from_tree(mv_fp_encodings, vp10_mv_fp_tree);
+}
+
+static void encode_mv_component(vp10_writer *w, int comp,
+                                const nmv_component *mvcomp, int usehp) {
+  int offset;
+  const int sign = comp < 0;
+  const int mag = sign ? -comp : comp;
+  const int mv_class = vp10_get_mv_class(mag - 1, &offset);
+  const int d = offset >> 3;         // int mv data
+  const int fr = (offset >> 1) & 3;  // fractional mv data
+  const int hp = offset & 1;         // high precision mv data
+
+  assert(comp != 0);
+
+  // Sign
+  vp10_write(w, sign, mvcomp->sign);
+
+  // Class
+  vp10_write_token(w, vp10_mv_class_tree, mvcomp->classes,
+                   &mv_class_encodings[mv_class]);
+
+  // Integer bits
+  if (mv_class == MV_CLASS_0) {
+    vp10_write_token(w, vp10_mv_class0_tree, mvcomp->class0,
+                     &mv_class0_encodings[d]);
+  } else {
+    int i;
+    const int n = mv_class + CLASS0_BITS - 1;  // number of bits
+    for (i = 0; i < n; ++i) vp10_write(w, (d >> i) & 1, mvcomp->bits[i]);
+  }
+
+  // Fractional bits
+  vp10_write_token(w, vp10_mv_fp_tree,
+                   mv_class == MV_CLASS_0 ? mvcomp->class0_fp[d] : mvcomp->fp,
+                   &mv_fp_encodings[fr]);
+
+  // High precision bit
+  if (usehp)
+    vp10_write(w, hp, mv_class == MV_CLASS_0 ? mvcomp->class0_hp : mvcomp->hp);
+}
+
+static void build_nmv_component_cost_table(int *mvcost,
+                                           const nmv_component *const mvcomp,
+                                           int usehp) {
+  int i, v;
+  int sign_cost[2], class_cost[MV_CLASSES], class0_cost[CLASS0_SIZE];
+  int bits_cost[MV_OFFSET_BITS][2];
+  int class0_fp_cost[CLASS0_SIZE][MV_FP_SIZE], fp_cost[MV_FP_SIZE];
+  int class0_hp_cost[2], hp_cost[2];
+
+  sign_cost[0] = vp10_cost_zero(mvcomp->sign);
+  sign_cost[1] = vp10_cost_one(mvcomp->sign);
+  vp10_cost_tokens(class_cost, mvcomp->classes, vp10_mv_class_tree);
+  vp10_cost_tokens(class0_cost, mvcomp->class0, vp10_mv_class0_tree);
+  for (i = 0; i < MV_OFFSET_BITS; ++i) {
+    bits_cost[i][0] = vp10_cost_zero(mvcomp->bits[i]);
+    bits_cost[i][1] = vp10_cost_one(mvcomp->bits[i]);
+  }
+
+  for (i = 0; i < CLASS0_SIZE; ++i)
+    vp10_cost_tokens(class0_fp_cost[i], mvcomp->class0_fp[i], vp10_mv_fp_tree);
+  vp10_cost_tokens(fp_cost, mvcomp->fp, vp10_mv_fp_tree);
+
+  if (usehp) {
+    class0_hp_cost[0] = vp10_cost_zero(mvcomp->class0_hp);
+    class0_hp_cost[1] = vp10_cost_one(mvcomp->class0_hp);
+    hp_cost[0] = vp10_cost_zero(mvcomp->hp);
+    hp_cost[1] = vp10_cost_one(mvcomp->hp);
+  }
+  mvcost[0] = 0;
+  for (v = 1; v <= MV_MAX; ++v) {
+    int z, c, o, d, e, f, cost = 0;
+    z = v - 1;
+    c = vp10_get_mv_class(z, &o);
+    cost += class_cost[c];
+    d = (o >> 3);     /* int mv data */
+    f = (o >> 1) & 3; /* fractional pel mv data */
+    e = (o & 1);      /* high precision mv data */
+    if (c == MV_CLASS_0) {
+      cost += class0_cost[d];
+    } else {
+      int i, b;
+      b = c + CLASS0_BITS - 1; /* number of bits */
+      for (i = 0; i < b; ++i) cost += bits_cost[i][((d >> i) & 1)];
+    }
+    if (c == MV_CLASS_0) {
+      cost += class0_fp_cost[d][f];
+    } else {
+      cost += fp_cost[f];
+    }
+    if (usehp) {
+      if (c == MV_CLASS_0) {
+        cost += class0_hp_cost[e];
+      } else {
+        cost += hp_cost[e];
+      }
+    }
+    mvcost[v] = cost + sign_cost[0];
+    mvcost[-v] = cost + sign_cost[1];
+  }
+}
+
+static void update_mv(vp10_writer *w, const unsigned int ct[2], vpx_prob *cur_p,
+                      vpx_prob upd_p) {
+  (void)upd_p;
+  vp10_cond_prob_diff_update(w, cur_p, ct);
+}
+
+static void write_mv_update(const vpx_tree_index *tree,
+                            vpx_prob probs[/*n - 1*/],
+                            const unsigned int counts[/*n - 1*/], int n,
+                            vp10_writer *w) {
+  int i;
+  unsigned int branch_ct[32][2];
+
+  // Assuming max number of probabilities <= 32
+  assert(n <= 32);
+
+  vp10_tree_probs_from_distribution(tree, branch_ct, counts);
+  for (i = 0; i < n - 1; ++i)
+    update_mv(w, branch_ct[i], &probs[i], MV_UPDATE_PROB);
+}
+
+void vp10_write_nmv_probs(VP10_COMMON *cm, int usehp, vp10_writer *w,
+                          nmv_context_counts *const nmv_counts) {
+  int i, j;
+#if CONFIG_REF_MV
+  int nmv_ctx = 0;
+  for (nmv_ctx = 0; nmv_ctx < NMV_CONTEXTS; ++nmv_ctx) {
+    nmv_context *const mvc = &cm->fc->nmvc[nmv_ctx];
+    nmv_context_counts *const counts = &nmv_counts[nmv_ctx];
+    write_mv_update(vp10_mv_joint_tree, mvc->joints, counts->joints, MV_JOINTS,
+                    w);
+
+    vp10_cond_prob_diff_update(w, &mvc->zero_rmv, counts->zero_rmv);
+
+    for (i = 0; i < 2; ++i) {
+      nmv_component *comp = &mvc->comps[i];
+      nmv_component_counts *comp_counts = &counts->comps[i];
+
+      update_mv(w, comp_counts->sign, &comp->sign, MV_UPDATE_PROB);
+      write_mv_update(vp10_mv_class_tree, comp->classes, comp_counts->classes,
+                      MV_CLASSES, w);
+      write_mv_update(vp10_mv_class0_tree, comp->class0, comp_counts->class0,
+                      CLASS0_SIZE, w);
+      for (j = 0; j < MV_OFFSET_BITS; ++j)
+        update_mv(w, comp_counts->bits[j], &comp->bits[j], MV_UPDATE_PROB);
+    }
+
+    for (i = 0; i < 2; ++i) {
+      for (j = 0; j < CLASS0_SIZE; ++j)
+        write_mv_update(vp10_mv_fp_tree, mvc->comps[i].class0_fp[j],
+                        counts->comps[i].class0_fp[j], MV_FP_SIZE, w);
+
+      write_mv_update(vp10_mv_fp_tree, mvc->comps[i].fp, counts->comps[i].fp,
+                      MV_FP_SIZE, w);
+    }
+
+    if (usehp) {
+      for (i = 0; i < 2; ++i) {
+        update_mv(w, counts->comps[i].class0_hp, &mvc->comps[i].class0_hp,
+                  MV_UPDATE_PROB);
+        update_mv(w, counts->comps[i].hp, &mvc->comps[i].hp, MV_UPDATE_PROB);
+      }
+    }
+  }
+#else
+  nmv_context *const mvc = &cm->fc->nmvc;
+  nmv_context_counts *const counts = nmv_counts;
+
+  write_mv_update(vp10_mv_joint_tree, mvc->joints, counts->joints, MV_JOINTS,
+                  w);
+
+  for (i = 0; i < 2; ++i) {
+    nmv_component *comp = &mvc->comps[i];
+    nmv_component_counts *comp_counts = &counts->comps[i];
+
+    update_mv(w, comp_counts->sign, &comp->sign, MV_UPDATE_PROB);
+    write_mv_update(vp10_mv_class_tree, comp->classes, comp_counts->classes,
+                    MV_CLASSES, w);
+    write_mv_update(vp10_mv_class0_tree, comp->class0, comp_counts->class0,
+                    CLASS0_SIZE, w);
+    for (j = 0; j < MV_OFFSET_BITS; ++j)
+      update_mv(w, comp_counts->bits[j], &comp->bits[j], MV_UPDATE_PROB);
+  }
+
+  for (i = 0; i < 2; ++i) {
+    for (j = 0; j < CLASS0_SIZE; ++j)
+      write_mv_update(vp10_mv_fp_tree, mvc->comps[i].class0_fp[j],
+                      counts->comps[i].class0_fp[j], MV_FP_SIZE, w);
+
+    write_mv_update(vp10_mv_fp_tree, mvc->comps[i].fp, counts->comps[i].fp,
+                    MV_FP_SIZE, w);
+  }
+
+  if (usehp) {
+    for (i = 0; i < 2; ++i) {
+      update_mv(w, counts->comps[i].class0_hp, &mvc->comps[i].class0_hp,
+                MV_UPDATE_PROB);
+      update_mv(w, counts->comps[i].hp, &mvc->comps[i].hp, MV_UPDATE_PROB);
+    }
+  }
+#endif
+}
+
+void vp10_encode_mv(VP10_COMP *cpi, vp10_writer *w, const MV *mv, const MV *ref,
+#if CONFIG_REF_MV
+                    int is_compound,
+#endif
+                    const nmv_context *mvctx, int usehp) {
+  const MV diff = { mv->row - ref->row, mv->col - ref->col };
+  const MV_JOINT_TYPE j = vp10_get_mv_joint(&diff);
+  usehp = usehp && vp10_use_mv_hp(ref);
+
+#if CONFIG_REF_MV && !CONFIG_EXT_INTER
+  if (is_compound) {
+    vp10_write(w, (j == MV_JOINT_ZERO), mvctx->zero_rmv);
+    if (j == MV_JOINT_ZERO) return;
+  } else {
+    if (j == MV_JOINT_ZERO) assert(0);
+  }
+#endif
+
+#if CONFIG_REF_MV && CONFIG_EXT_INTER
+  (void)is_compound;
+#endif
+
+  vp10_write_token(w, vp10_mv_joint_tree, mvctx->joints,
+                   &mv_joint_encodings[j]);
+  if (mv_joint_vertical(j))
+    encode_mv_component(w, diff.row, &mvctx->comps[0], usehp);
+
+  if (mv_joint_horizontal(j))
+    encode_mv_component(w, diff.col, &mvctx->comps[1], usehp);
+
+  // If auto_mv_step_size is enabled then keep track of the largest
+  // motion vector component used.
+  if (cpi->sf.mv.auto_mv_step_size) {
+    unsigned int maxv = VPXMAX(abs(mv->row), abs(mv->col)) >> 3;
+    cpi->max_mv_magnitude = VPXMAX(maxv, cpi->max_mv_magnitude);
+  }
+}
+
+void vp10_build_nmv_cost_table(int *mvjoint, int *mvcost[2],
+                               const nmv_context *ctx, int usehp) {
+  vp10_cost_tokens(mvjoint, ctx->joints, vp10_mv_joint_tree);
+  build_nmv_component_cost_table(mvcost[0], &ctx->comps[0], usehp);
+  build_nmv_component_cost_table(mvcost[1], &ctx->comps[1], usehp);
+}
+
+#if CONFIG_EXT_INTER
+static void inc_mvs(const MB_MODE_INFO *mbmi, const MB_MODE_INFO_EXT *mbmi_ext,
+                    const int_mv mvs[2],
+#if CONFIG_REF_MV
+                    const int_mv pred_mvs[2],
+#endif
+                    nmv_context_counts *nmv_counts) {
+  int i;
+  PREDICTION_MODE mode = mbmi->mode;
+  int mv_idx = (mode == NEWFROMNEARMV);
+#if !CONFIG_REF_MV
+  nmv_context_counts *counts = nmv_counts;
+#endif
+
+  if (mode == NEWMV || mode == NEWFROMNEARMV || mode == NEW_NEWMV) {
+    for (i = 0; i < 1 + has_second_ref(mbmi); ++i) {
+      const MV *ref = &mbmi_ext->ref_mvs[mbmi->ref_frame[i]][mv_idx].as_mv;
+      const MV diff = { mvs[i].as_mv.row - ref->row,
+                        mvs[i].as_mv.col - ref->col };
+#if CONFIG_REF_MV
+      int nmv_ctx = vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[i]],
+                                 mbmi_ext->ref_mv_stack[mbmi->ref_frame[i]]);
+      nmv_context_counts *counts = &nmv_counts[nmv_ctx];
+      (void)pred_mvs;
+#endif
+      vp10_inc_mv(&diff, counts, vp10_use_mv_hp(ref));
+    }
+  } else if (mode == NEAREST_NEWMV || mode == NEAR_NEWMV) {
+    const MV *ref = &mbmi_ext->ref_mvs[mbmi->ref_frame[1]][0].as_mv;
+    const MV diff = { mvs[1].as_mv.row - ref->row,
+                      mvs[1].as_mv.col - ref->col };
+#if CONFIG_REF_MV
+    int nmv_ctx = vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[1]],
+                               mbmi_ext->ref_mv_stack[mbmi->ref_frame[1]]);
+    nmv_context_counts *counts = &nmv_counts[nmv_ctx];
+#endif
+    vp10_inc_mv(&diff, counts, vp10_use_mv_hp(ref));
+  } else if (mode == NEW_NEARESTMV || mode == NEW_NEARMV) {
+    const MV *ref = &mbmi_ext->ref_mvs[mbmi->ref_frame[0]][0].as_mv;
+    const MV diff = { mvs[0].as_mv.row - ref->row,
+                      mvs[0].as_mv.col - ref->col };
+#if CONFIG_REF_MV
+    int nmv_ctx = vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[0]],
+                               mbmi_ext->ref_mv_stack[mbmi->ref_frame[0]]);
+    nmv_context_counts *counts = &nmv_counts[nmv_ctx];
+#endif
+    vp10_inc_mv(&diff, counts, vp10_use_mv_hp(ref));
+  }
+}
+
+static void inc_mvs_sub8x8(const MODE_INFO *mi, int block, const int_mv mvs[2],
+#if CONFIG_REF_MV
+                           const MB_MODE_INFO_EXT *mbmi_ext,
+#endif
+                           nmv_context_counts *nmv_counts) {
+  int i;
+  PREDICTION_MODE mode = mi->bmi[block].as_mode;
+#if CONFIG_REF_MV
+  const MB_MODE_INFO *mbmi = &mi->mbmi;
+#else
+  nmv_context_counts *counts = nmv_counts;
+#endif
+
+  if (mode == NEWMV || mode == NEWFROMNEARMV || mode == NEW_NEWMV) {
+    for (i = 0; i < 1 + has_second_ref(&mi->mbmi); ++i) {
+      const MV *ref = &mi->bmi[block].ref_mv[i].as_mv;
+      const MV diff = { mvs[i].as_mv.row - ref->row,
+                        mvs[i].as_mv.col - ref->col };
+#if CONFIG_REF_MV
+      int nmv_ctx = vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[i]],
+                                 mbmi_ext->ref_mv_stack[mbmi->ref_frame[i]]);
+      nmv_context_counts *counts = &nmv_counts[nmv_ctx];
+#endif
+      vp10_inc_mv(&diff, counts, vp10_use_mv_hp(ref));
+    }
+  } else if (mode == NEAREST_NEWMV || mode == NEAR_NEWMV) {
+    const MV *ref = &mi->bmi[block].ref_mv[1].as_mv;
+    const MV diff = { mvs[1].as_mv.row - ref->row,
+                      mvs[1].as_mv.col - ref->col };
+#if CONFIG_REF_MV
+    int nmv_ctx = vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[1]],
+                               mbmi_ext->ref_mv_stack[mbmi->ref_frame[1]]);
+    nmv_context_counts *counts = &nmv_counts[nmv_ctx];
+#endif
+    vp10_inc_mv(&diff, counts, vp10_use_mv_hp(ref));
+  } else if (mode == NEW_NEARESTMV || mode == NEW_NEARMV) {
+    const MV *ref = &mi->bmi[block].ref_mv[0].as_mv;
+    const MV diff = { mvs[0].as_mv.row - ref->row,
+                      mvs[0].as_mv.col - ref->col };
+#if CONFIG_REF_MV
+    int nmv_ctx = vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[0]],
+                               mbmi_ext->ref_mv_stack[mbmi->ref_frame[0]]);
+    nmv_context_counts *counts = &nmv_counts[nmv_ctx];
+#endif
+    vp10_inc_mv(&diff, counts, vp10_use_mv_hp(ref));
+  }
+}
+#else
+static void inc_mvs(const MB_MODE_INFO *mbmi, const MB_MODE_INFO_EXT *mbmi_ext,
+                    const int_mv mvs[2],
+#if CONFIG_REF_MV
+                    const int_mv pred_mvs[2],
+#endif
+                    nmv_context_counts *nmv_counts) {
+  int i;
+#if !CONFIG_REF_MV
+  nmv_context_counts *counts = nmv_counts;
+#endif
+
+  for (i = 0; i < 1 + has_second_ref(mbmi); ++i) {
+#if CONFIG_REF_MV
+    int nmv_ctx = vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[i]],
+                               mbmi_ext->ref_mv_stack[mbmi->ref_frame[i]]);
+    nmv_context_counts *counts = &nmv_counts[nmv_ctx];
+    const MV *ref = &pred_mvs[i].as_mv;
+#else
+    const MV *ref = &mbmi_ext->ref_mvs[mbmi->ref_frame[i]][0].as_mv;
+#endif
+    const MV diff = { mvs[i].as_mv.row - ref->row,
+                      mvs[i].as_mv.col - ref->col };
+    vp10_inc_mv(&diff, counts, vp10_use_mv_hp(ref));
+  }
+}
+#endif  // CONFIG_EXT_INTER
+
+void vp10_update_mv_count(ThreadData *td) {
+  const MACROBLOCKD *xd = &td->mb.e_mbd;
+  const MODE_INFO *mi = xd->mi[0];
+  const MB_MODE_INFO *const mbmi = &mi->mbmi;
+  const MB_MODE_INFO_EXT *mbmi_ext = td->mb.mbmi_ext;
+
+  if (mbmi->sb_type < BLOCK_8X8) {
+    const int num_4x4_w = num_4x4_blocks_wide_lookup[mbmi->sb_type];
+    const int num_4x4_h = num_4x4_blocks_high_lookup[mbmi->sb_type];
+    int idx, idy;
+
+    for (idy = 0; idy < 2; idy += num_4x4_h) {
+      for (idx = 0; idx < 2; idx += num_4x4_w) {
+        const int i = idy * 2 + idx;
+
+#if CONFIG_EXT_INTER
+        if (have_newmv_in_inter_mode(mi->bmi[i].as_mode))
+          inc_mvs_sub8x8(mi, i, mi->bmi[i].as_mv,
+#if CONFIG_REF_MV
+                         mbmi_ext, td->counts->mv);
+#else
+                         &td->counts->mv);
+#endif
+#else
+        if (mi->bmi[i].as_mode == NEWMV)
+          inc_mvs(mbmi, mbmi_ext, mi->bmi[i].as_mv,
+#if CONFIG_REF_MV
+                  mi->bmi[i].pred_mv_s8, td->counts->mv);
+#else
+                  &td->counts->mv);
+#endif
+#endif  // CONFIG_EXT_INTER
+      }
+    }
+  } else {
+#if CONFIG_EXT_INTER
+    if (have_newmv_in_inter_mode(mbmi->mode))
+#else
+    if (mbmi->mode == NEWMV)
+#endif  // CONFIG_EXT_INTER
+      inc_mvs(mbmi, mbmi_ext, mbmi->mv,
+#if CONFIG_REF_MV
+              mbmi->pred_mv, td->counts->mv);
+#else
+              &td->counts->mv);
+#endif
+  }
+}
diff --git a/av1/encoder/encodemv.h b/av1/encoder/encodemv.h
new file mode 100644
index 0000000..6cb57c2
--- /dev/null
+++ b/av1/encoder/encodemv.h
@@ -0,0 +1,40 @@
+/*
+ *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP10_ENCODER_ENCODEMV_H_
+#define VP10_ENCODER_ENCODEMV_H_
+
+#include "av1/encoder/encoder.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void vp10_entropy_mv_init(void);
+
+void vp10_write_nmv_probs(VP10_COMMON *cm, int usehp, vp10_writer *w,
+                          nmv_context_counts *const counts);
+
+void vp10_encode_mv(VP10_COMP *cpi, vp10_writer *w, const MV *mv, const MV *ref,
+#if CONFIG_REF_MV
+                    int is_compound,
+#endif
+                    const nmv_context *mvctx, int usehp);
+
+void vp10_build_nmv_cost_table(int *mvjoint, int *mvcost[2],
+                               const nmv_context *mvctx, int usehp);
+
+void vp10_update_mv_count(ThreadData *td);
+
+#ifdef __cplusplus
+}  // extern "C"
+#endif
+
+#endif  // VP10_ENCODER_ENCODEMV_H_
diff --git a/av1/encoder/encoder.c b/av1/encoder/encoder.c
new file mode 100644
index 0000000..5196d9c
--- /dev/null
+++ b/av1/encoder/encoder.c
@@ -0,0 +1,5607 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <limits.h>
+#include <math.h>
+#include <stdio.h>
+
+#include "./vpx_config.h"
+
+#include "av1/common/alloccommon.h"
+#if CONFIG_CLPF
+#include "av1/common/clpf.h"
+#endif
+#if CONFIG_DERING
+#include "av1/common/dering.h"
+#endif  // CONFIG_DERING
+#include "av1/common/filter.h"
+#include "av1/common/idct.h"
+#include "av1/common/reconinter.h"
+#include "av1/common/reconintra.h"
+#include "av1/common/tile_common.h"
+
+#include "av1/encoder/aq_complexity.h"
+#include "av1/encoder/aq_cyclicrefresh.h"
+#include "av1/encoder/aq_variance.h"
+#include "av1/encoder/bitstream.h"
+#if CONFIG_ANS
+#include "av1/encoder/buf_ans.h"
+#endif
+#include "av1/encoder/context_tree.h"
+#include "av1/encoder/encodeframe.h"
+#include "av1/encoder/encodemv.h"
+#include "av1/encoder/encoder.h"
+#include "av1/encoder/ethread.h"
+#include "av1/encoder/firstpass.h"
+#include "av1/encoder/mbgraph.h"
+#include "av1/encoder/picklpf.h"
+#if CONFIG_LOOP_RESTORATION
+#include "av1/encoder/pickrst.h"
+#endif  // CONFIG_LOOP_RESTORATION
+#include "av1/encoder/ratectrl.h"
+#include "av1/encoder/rd.h"
+#include "av1/encoder/resize.h"
+#include "av1/encoder/segmentation.h"
+#include "av1/encoder/speed_features.h"
+#include "av1/encoder/temporal_filter.h"
+
+#include "./vp10_rtcd.h"
+#include "./vpx_dsp_rtcd.h"
+#include "./vpx_scale_rtcd.h"
+#include "aom_dsp/psnr.h"
+#if CONFIG_INTERNAL_STATS
+#include "aom_dsp/ssim.h"
+#endif
+#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/vpx_filter.h"
+#include "aom_ports/mem.h"
+#include "aom_ports/system_state.h"
+#include "aom_ports/vpx_timer.h"
+#include "aom_scale/vpx_scale.h"
+#include "aom_util/debug_util.h"
+
+#define AM_SEGMENT_ID_INACTIVE 7
+#define AM_SEGMENT_ID_ACTIVE 0
+
+#define SHARP_FILTER_QTHRESH 0 /* Q threshold for 8-tap sharp filter */
+
+#define ALTREF_HIGH_PRECISION_MV 1     // Whether to use high precision mv
+                                       //  for altref computation.
+#define HIGH_PRECISION_MV_QTHRESH 200  // Q threshold for high precision
+                                       // mv. Choose a very high value for
+                                       // now so that HIGH_PRECISION is always
+                                       // chosen.
+// #define OUTPUT_YUV_REC
+#ifdef OUTPUT_YUV_DENOISED
+FILE *yuv_denoised_file = NULL;
+#endif
+#ifdef OUTPUT_YUV_SKINMAP
+FILE *yuv_skinmap_file = NULL;
+#endif
+#ifdef OUTPUT_YUV_REC
+FILE *yuv_rec_file;
+#define FILE_NAME_LEN 100
+#endif
+
+#if 0
+FILE *framepsnr;
+FILE *kf_list;
+FILE *keyfile;
+#endif
+
+static INLINE void Scale2Ratio(VPX_SCALING mode, int *hr, int *hs) {
+  switch (mode) {
+    case NORMAL:
+      *hr = 1;
+      *hs = 1;
+      break;
+    case FOURFIVE:
+      *hr = 4;
+      *hs = 5;
+      break;
+    case THREEFIVE:
+      *hr = 3;
+      *hs = 5;
+      break;
+    case ONETWO:
+      *hr = 1;
+      *hs = 2;
+      break;
+    default:
+      *hr = 1;
+      *hs = 1;
+      assert(0);
+      break;
+  }
+}
+
+// Mark all inactive blocks as active. Other segmentation features may be set
+// so memset cannot be used, instead only inactive blocks should be reset.
+static void suppress_active_map(VP10_COMP *cpi) {
+  unsigned char *const seg_map = cpi->segmentation_map;
+  int i;
+  if (cpi->active_map.enabled || cpi->active_map.update)
+    for (i = 0; i < cpi->common.mi_rows * cpi->common.mi_cols; ++i)
+      if (seg_map[i] == AM_SEGMENT_ID_INACTIVE)
+        seg_map[i] = AM_SEGMENT_ID_ACTIVE;
+}
+
+static void apply_active_map(VP10_COMP *cpi) {
+  struct segmentation *const seg = &cpi->common.seg;
+  unsigned char *const seg_map = cpi->segmentation_map;
+  const unsigned char *const active_map = cpi->active_map.map;
+  int i;
+
+  assert(AM_SEGMENT_ID_ACTIVE == CR_SEGMENT_ID_BASE);
+
+  if (frame_is_intra_only(&cpi->common)) {
+    cpi->active_map.enabled = 0;
+    cpi->active_map.update = 1;
+  }
+
+  if (cpi->active_map.update) {
+    if (cpi->active_map.enabled) {
+      for (i = 0; i < cpi->common.mi_rows * cpi->common.mi_cols; ++i)
+        if (seg_map[i] == AM_SEGMENT_ID_ACTIVE) seg_map[i] = active_map[i];
+      vp10_enable_segmentation(seg);
+      vp10_enable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_SKIP);
+      vp10_enable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_ALT_LF);
+      // Setting the data to -MAX_LOOP_FILTER will result in the computed loop
+      // filter level being zero regardless of the value of seg->abs_delta.
+      vp10_set_segdata(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_ALT_LF,
+                       -MAX_LOOP_FILTER);
+    } else {
+      vp10_disable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_SKIP);
+      vp10_disable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_ALT_LF);
+      if (seg->enabled) {
+        seg->update_data = 1;
+        seg->update_map = 1;
+      }
+    }
+    cpi->active_map.update = 0;
+  }
+}
+
+int vp10_set_active_map(VP10_COMP *cpi, unsigned char *new_map_16x16, int rows,
+                        int cols) {
+  if (rows == cpi->common.mb_rows && cols == cpi->common.mb_cols) {
+    unsigned char *const active_map_8x8 = cpi->active_map.map;
+    const int mi_rows = cpi->common.mi_rows;
+    const int mi_cols = cpi->common.mi_cols;
+    cpi->active_map.update = 1;
+    if (new_map_16x16) {
+      int r, c;
+      for (r = 0; r < mi_rows; ++r) {
+        for (c = 0; c < mi_cols; ++c) {
+          active_map_8x8[r * mi_cols + c] =
+              new_map_16x16[(r >> 1) * cols + (c >> 1)]
+                  ? AM_SEGMENT_ID_ACTIVE
+                  : AM_SEGMENT_ID_INACTIVE;
+        }
+      }
+      cpi->active_map.enabled = 1;
+    } else {
+      cpi->active_map.enabled = 0;
+    }
+    return 0;
+  } else {
+    return -1;
+  }
+}
+
+int vp10_get_active_map(VP10_COMP *cpi, unsigned char *new_map_16x16, int rows,
+                        int cols) {
+  if (rows == cpi->common.mb_rows && cols == cpi->common.mb_cols &&
+      new_map_16x16) {
+    unsigned char *const seg_map_8x8 = cpi->segmentation_map;
+    const int mi_rows = cpi->common.mi_rows;
+    const int mi_cols = cpi->common.mi_cols;
+    memset(new_map_16x16, !cpi->active_map.enabled, rows * cols);
+    if (cpi->active_map.enabled) {
+      int r, c;
+      for (r = 0; r < mi_rows; ++r) {
+        for (c = 0; c < mi_cols; ++c) {
+          // Cyclic refresh segments are considered active despite not having
+          // AM_SEGMENT_ID_ACTIVE
+          new_map_16x16[(r >> 1) * cols + (c >> 1)] |=
+              seg_map_8x8[r * mi_cols + c] != AM_SEGMENT_ID_INACTIVE;
+        }
+      }
+    }
+    return 0;
+  } else {
+    return -1;
+  }
+}
+
+void vp10_set_high_precision_mv(VP10_COMP *cpi, int allow_high_precision_mv) {
+  MACROBLOCK *const mb = &cpi->td.mb;
+  cpi->common.allow_high_precision_mv = allow_high_precision_mv;
+
+#if CONFIG_REF_MV
+  if (cpi->common.allow_high_precision_mv) {
+    int i;
+    for (i = 0; i < NMV_CONTEXTS; ++i) {
+      mb->mv_cost_stack[i] = mb->nmvcost_hp[i];
+      mb->mvsadcost = mb->nmvsadcost_hp;
+    }
+  } else {
+    int i;
+    for (i = 0; i < NMV_CONTEXTS; ++i) {
+      mb->mv_cost_stack[i] = mb->nmvcost[i];
+      mb->mvsadcost = mb->nmvsadcost;
+    }
+  }
+#else
+  if (cpi->common.allow_high_precision_mv) {
+    mb->mvcost = mb->nmvcost_hp;
+    mb->mvsadcost = mb->nmvcost_hp;
+  } else {
+    mb->mvcost = mb->nmvcost;
+    mb->mvsadcost = mb->nmvcost;
+  }
+#endif
+}
+
+static BLOCK_SIZE select_sb_size(const VP10_COMP *const cpi) {
+#if CONFIG_EXT_PARTITION
+  if (cpi->oxcf.superblock_size == VPX_SUPERBLOCK_SIZE_64X64)
+    return BLOCK_64X64;
+
+  if (cpi->oxcf.superblock_size == VPX_SUPERBLOCK_SIZE_128X128)
+    return BLOCK_128X128;
+
+  assert(cpi->oxcf.superblock_size == VPX_SUPERBLOCK_SIZE_DYNAMIC);
+
+  assert(IMPLIES(cpi->common.tile_cols > 1,
+                 cpi->common.tile_width % MAX_MIB_SIZE == 0));
+  assert(IMPLIES(cpi->common.tile_rows > 1,
+                 cpi->common.tile_height % MAX_MIB_SIZE == 0));
+
+  // TODO(any): Possibly could improve this with a heuristic.
+  return BLOCK_128X128;
+#else
+  (void)cpi;
+  return BLOCK_64X64;
+#endif  //  CONFIG_EXT_PARTITION
+}
+
+static void setup_frame(VP10_COMP *cpi) {
+  VP10_COMMON *const cm = &cpi->common;
+  // Set up entropy context depending on frame type. The decoder mandates
+  // the use of the default context, index 0, for keyframes and inter
+  // frames where the error_resilient_mode or intra_only flag is set. For
+  // other inter-frames the encoder currently uses only two contexts;
+  // context 1 for ALTREF frames and context 0 for the others.
+  if (frame_is_intra_only(cm) || cm->error_resilient_mode) {
+    vp10_setup_past_independence(cm);
+  } else {
+#if CONFIG_EXT_REFS
+    const GF_GROUP *gf_group = &cpi->twopass.gf_group;
+    if (gf_group->rf_level[gf_group->index] == GF_ARF_LOW)
+      cm->frame_context_idx = EXT_ARF_FRAME;
+    else if (cpi->refresh_alt_ref_frame)
+      cm->frame_context_idx = ARF_FRAME;
+#else
+    if (cpi->refresh_alt_ref_frame) cm->frame_context_idx = ARF_FRAME;
+#endif
+    else if (cpi->rc.is_src_frame_alt_ref)
+      cm->frame_context_idx = OVERLAY_FRAME;
+    else if (cpi->refresh_golden_frame)
+      cm->frame_context_idx = GLD_FRAME;
+#if CONFIG_EXT_REFS
+    else if (cpi->refresh_bwd_ref_frame)
+      cm->frame_context_idx = BRF_FRAME;
+#endif
+    else
+      cm->frame_context_idx = REGULAR_FRAME;
+  }
+
+  if (cm->frame_type == KEY_FRAME) {
+    cpi->refresh_golden_frame = 1;
+    cpi->refresh_alt_ref_frame = 1;
+    vp10_zero(cpi->interp_filter_selected);
+  } else {
+    *cm->fc = cm->frame_contexts[cm->frame_context_idx];
+    vp10_zero(cpi->interp_filter_selected[0]);
+  }
+
+  cpi->vaq_refresh = 0;
+
+  set_sb_size(cm, select_sb_size(cpi));
+}
+
+static void vp10_enc_setup_mi(VP10_COMMON *cm) {
+  int i;
+  cm->mi = cm->mip + cm->mi_stride + 1;
+  memset(cm->mip, 0, cm->mi_stride * (cm->mi_rows + 1) * sizeof(*cm->mip));
+  cm->prev_mi = cm->prev_mip + cm->mi_stride + 1;
+  // Clear top border row
+  memset(cm->prev_mip, 0, sizeof(*cm->prev_mip) * cm->mi_stride);
+  // Clear left border column
+  for (i = 1; i < cm->mi_rows + 1; ++i)
+    memset(&cm->prev_mip[i * cm->mi_stride], 0, sizeof(*cm->prev_mip));
+
+  cm->mi_grid_visible = cm->mi_grid_base + cm->mi_stride + 1;
+  cm->prev_mi_grid_visible = cm->prev_mi_grid_base + cm->mi_stride + 1;
+
+  memset(cm->mi_grid_base, 0,
+         cm->mi_stride * (cm->mi_rows + 1) * sizeof(*cm->mi_grid_base));
+}
+
+static int vp10_enc_alloc_mi(VP10_COMMON *cm, int mi_size) {
+  cm->mip = vpx_calloc(mi_size, sizeof(*cm->mip));
+  if (!cm->mip) return 1;
+  cm->prev_mip = vpx_calloc(mi_size, sizeof(*cm->prev_mip));
+  if (!cm->prev_mip) return 1;
+  cm->mi_alloc_size = mi_size;
+
+  cm->mi_grid_base = (MODE_INFO **)vpx_calloc(mi_size, sizeof(MODE_INFO *));
+  if (!cm->mi_grid_base) return 1;
+  cm->prev_mi_grid_base =
+      (MODE_INFO **)vpx_calloc(mi_size, sizeof(MODE_INFO *));
+  if (!cm->prev_mi_grid_base) return 1;
+
+  return 0;
+}
+
+static void vp10_enc_free_mi(VP10_COMMON *cm) {
+  vpx_free(cm->mip);
+  cm->mip = NULL;
+  vpx_free(cm->prev_mip);
+  cm->prev_mip = NULL;
+  vpx_free(cm->mi_grid_base);
+  cm->mi_grid_base = NULL;
+  vpx_free(cm->prev_mi_grid_base);
+  cm->prev_mi_grid_base = NULL;
+}
+
+static void vp10_swap_mi_and_prev_mi(VP10_COMMON *cm) {
+  // Current mip will be the prev_mip for the next frame.
+  MODE_INFO **temp_base = cm->prev_mi_grid_base;
+  MODE_INFO *temp = cm->prev_mip;
+  cm->prev_mip = cm->mip;
+  cm->mip = temp;
+
+  // Update the upper left visible macroblock ptrs.
+  cm->mi = cm->mip + cm->mi_stride + 1;
+  cm->prev_mi = cm->prev_mip + cm->mi_stride + 1;
+
+  cm->prev_mi_grid_base = cm->mi_grid_base;
+  cm->mi_grid_base = temp_base;
+  cm->mi_grid_visible = cm->mi_grid_base + cm->mi_stride + 1;
+  cm->prev_mi_grid_visible = cm->prev_mi_grid_base + cm->mi_stride + 1;
+}
+
+void vp10_initialize_enc(void) {
+  static volatile int init_done = 0;
+
+  if (!init_done) {
+    vp10_rtcd();
+    vpx_dsp_rtcd();
+    vpx_scale_rtcd();
+    vp10_init_intra_predictors();
+    vp10_init_me_luts();
+    vp10_rc_init_minq_luts();
+    vp10_entropy_mv_init();
+    vp10_encode_token_init();
+#if CONFIG_EXT_INTER
+    vp10_init_wedge_masks();
+#endif
+    init_done = 1;
+  }
+}
+
+static void dealloc_compressor_data(VP10_COMP *cpi) {
+  VP10_COMMON *const cm = &cpi->common;
+  int i;
+
+  vpx_free(cpi->mbmi_ext_base);
+  cpi->mbmi_ext_base = NULL;
+
+  vpx_free(cpi->tile_data);
+  cpi->tile_data = NULL;
+
+  // Delete sementation map
+  vpx_free(cpi->segmentation_map);
+  cpi->segmentation_map = NULL;
+  vpx_free(cpi->coding_context.last_frame_seg_map_copy);
+  cpi->coding_context.last_frame_seg_map_copy = NULL;
+
+#if CONFIG_REF_MV
+  for (i = 0; i < NMV_CONTEXTS; ++i) {
+    vpx_free(cpi->nmv_costs[i][0]);
+    vpx_free(cpi->nmv_costs[i][1]);
+    vpx_free(cpi->nmv_costs_hp[i][0]);
+    vpx_free(cpi->nmv_costs_hp[i][1]);
+    cpi->nmv_costs[i][0] = NULL;
+    cpi->nmv_costs[i][1] = NULL;
+    cpi->nmv_costs_hp[i][0] = NULL;
+    cpi->nmv_costs_hp[i][1] = NULL;
+  }
+#endif
+
+  vpx_free(cpi->nmvcosts[0]);
+  vpx_free(cpi->nmvcosts[1]);
+  cpi->nmvcosts[0] = NULL;
+  cpi->nmvcosts[1] = NULL;
+
+  vpx_free(cpi->nmvcosts_hp[0]);
+  vpx_free(cpi->nmvcosts_hp[1]);
+  cpi->nmvcosts_hp[0] = NULL;
+  cpi->nmvcosts_hp[1] = NULL;
+
+  vpx_free(cpi->nmvsadcosts[0]);
+  vpx_free(cpi->nmvsadcosts[1]);
+  cpi->nmvsadcosts[0] = NULL;
+  cpi->nmvsadcosts[1] = NULL;
+
+  vpx_free(cpi->nmvsadcosts_hp[0]);
+  vpx_free(cpi->nmvsadcosts_hp[1]);
+  cpi->nmvsadcosts_hp[0] = NULL;
+  cpi->nmvsadcosts_hp[1] = NULL;
+
+  vp10_cyclic_refresh_free(cpi->cyclic_refresh);
+  cpi->cyclic_refresh = NULL;
+
+  vpx_free(cpi->active_map.map);
+  cpi->active_map.map = NULL;
+
+  // Free up-sampled reference buffers.
+  for (i = 0; i < (REF_FRAMES + 1); i++)
+    vpx_free_frame_buffer(&cpi->upsampled_ref_bufs[i].buf);
+
+  vp10_free_ref_frame_buffers(cm->buffer_pool);
+  vp10_free_context_buffers(cm);
+
+  vpx_free_frame_buffer(&cpi->last_frame_uf);
+#if CONFIG_LOOP_RESTORATION
+  vpx_free_frame_buffer(&cpi->last_frame_db);
+  vp10_free_restoration_buffers(cm);
+#endif  // CONFIG_LOOP_RESTORATION
+  vpx_free_frame_buffer(&cpi->scaled_source);
+  vpx_free_frame_buffer(&cpi->scaled_last_source);
+  vpx_free_frame_buffer(&cpi->alt_ref_buffer);
+  vp10_lookahead_destroy(cpi->lookahead);
+
+  vpx_free(cpi->tile_tok[0][0]);
+  cpi->tile_tok[0][0] = 0;
+
+  vp10_free_pc_tree(&cpi->td);
+  vp10_free_var_tree(&cpi->td);
+
+  if (cpi->common.allow_screen_content_tools)
+    vpx_free(cpi->td.mb.palette_buffer);
+
+  if (cpi->source_diff_var != NULL) {
+    vpx_free(cpi->source_diff_var);
+    cpi->source_diff_var = NULL;
+  }
+#if CONFIG_ANS
+  vp10_buf_ans_free(&cpi->buf_ans);
+#endif  // CONFIG_ANS
+}
+
+static void save_coding_context(VP10_COMP *cpi) {
+  CODING_CONTEXT *const cc = &cpi->coding_context;
+  VP10_COMMON *cm = &cpi->common;
+#if CONFIG_REF_MV
+  int i;
+#endif
+
+// Stores a snapshot of key state variables which can subsequently be
+// restored with a call to vp10_restore_coding_context. These functions are
+// intended for use in a re-code loop in vp10_compress_frame where the
+// quantizer value is adjusted between loop iterations.
+#if CONFIG_REF_MV
+  for (i = 0; i < NMV_CONTEXTS; ++i) {
+    vp10_copy(cc->nmv_vec_cost[i], cpi->td.mb.nmv_vec_cost[i]);
+    memcpy(cc->nmv_costs[i][0], cpi->nmv_costs[i][0],
+           MV_VALS * sizeof(*cpi->nmv_costs[i][0]));
+    memcpy(cc->nmv_costs[i][1], cpi->nmv_costs[i][1],
+           MV_VALS * sizeof(*cpi->nmv_costs[i][1]));
+    memcpy(cc->nmv_costs_hp[i][0], cpi->nmv_costs_hp[i][0],
+           MV_VALS * sizeof(*cpi->nmv_costs_hp[i][0]));
+    memcpy(cc->nmv_costs_hp[i][1], cpi->nmv_costs_hp[i][1],
+           MV_VALS * sizeof(*cpi->nmv_costs_hp[i][1]));
+  }
+#else
+  vp10_copy(cc->nmvjointcost, cpi->td.mb.nmvjointcost);
+#endif
+
+  memcpy(cc->nmvcosts[0], cpi->nmvcosts[0],
+         MV_VALS * sizeof(*cpi->nmvcosts[0]));
+  memcpy(cc->nmvcosts[1], cpi->nmvcosts[1],
+         MV_VALS * sizeof(*cpi->nmvcosts[1]));
+  memcpy(cc->nmvcosts_hp[0], cpi->nmvcosts_hp[0],
+         MV_VALS * sizeof(*cpi->nmvcosts_hp[0]));
+  memcpy(cc->nmvcosts_hp[1], cpi->nmvcosts_hp[1],
+         MV_VALS * sizeof(*cpi->nmvcosts_hp[1]));
+
+  memcpy(cpi->coding_context.last_frame_seg_map_copy, cm->last_frame_seg_map,
+         (cm->mi_rows * cm->mi_cols));
+
+  vp10_copy(cc->last_ref_lf_deltas, cm->lf.last_ref_deltas);
+  vp10_copy(cc->last_mode_lf_deltas, cm->lf.last_mode_deltas);
+
+  cc->fc = *cm->fc;
+}
+
+static void restore_coding_context(VP10_COMP *cpi) {
+  CODING_CONTEXT *const cc = &cpi->coding_context;
+  VP10_COMMON *cm = &cpi->common;
+#if CONFIG_REF_MV
+  int i;
+#endif
+
+// Restore key state variables to the snapshot state stored in the
+// previous call to vp10_save_coding_context.
+#if CONFIG_REF_MV
+  for (i = 0; i < NMV_CONTEXTS; ++i) {
+    vp10_copy(cpi->td.mb.nmv_vec_cost[i], cc->nmv_vec_cost[i]);
+    memcpy(cpi->nmv_costs[i][0], cc->nmv_costs[i][0],
+           MV_VALS * sizeof(*cc->nmv_costs[i][0]));
+    memcpy(cpi->nmv_costs[i][1], cc->nmv_costs[i][1],
+           MV_VALS * sizeof(*cc->nmv_costs[i][1]));
+    memcpy(cpi->nmv_costs_hp[i][0], cc->nmv_costs_hp[i][0],
+           MV_VALS * sizeof(*cc->nmv_costs_hp[i][0]));
+    memcpy(cpi->nmv_costs_hp[i][1], cc->nmv_costs_hp[i][1],
+           MV_VALS * sizeof(*cc->nmv_costs_hp[i][1]));
+  }
+#else
+  vp10_copy(cpi->td.mb.nmvjointcost, cc->nmvjointcost);
+#endif
+
+  memcpy(cpi->nmvcosts[0], cc->nmvcosts[0], MV_VALS * sizeof(*cc->nmvcosts[0]));
+  memcpy(cpi->nmvcosts[1], cc->nmvcosts[1], MV_VALS * sizeof(*cc->nmvcosts[1]));
+  memcpy(cpi->nmvcosts_hp[0], cc->nmvcosts_hp[0],
+         MV_VALS * sizeof(*cc->nmvcosts_hp[0]));
+  memcpy(cpi->nmvcosts_hp[1], cc->nmvcosts_hp[1],
+         MV_VALS * sizeof(*cc->nmvcosts_hp[1]));
+
+  memcpy(cm->last_frame_seg_map, cpi->coding_context.last_frame_seg_map_copy,
+         (cm->mi_rows * cm->mi_cols));
+
+  vp10_copy(cm->lf.last_ref_deltas, cc->last_ref_lf_deltas);
+  vp10_copy(cm->lf.last_mode_deltas, cc->last_mode_lf_deltas);
+
+  *cm->fc = cc->fc;
+}
+
+static void configure_static_seg_features(VP10_COMP *cpi) {
+  VP10_COMMON *const cm = &cpi->common;
+  const RATE_CONTROL *const rc = &cpi->rc;
+  struct segmentation *const seg = &cm->seg;
+
+  int high_q = (int)(rc->avg_q > 48.0);
+  int qi_delta;
+
+  // Disable and clear down for KF
+  if (cm->frame_type == KEY_FRAME) {
+    // Clear down the global segmentation map
+    memset(cpi->segmentation_map, 0, cm->mi_rows * cm->mi_cols);
+    seg->update_map = 0;
+    seg->update_data = 0;
+    cpi->static_mb_pct = 0;
+
+    // Disable segmentation
+    vp10_disable_segmentation(seg);
+
+    // Clear down the segment features.
+    vp10_clearall_segfeatures(seg);
+  } else if (cpi->refresh_alt_ref_frame) {
+    // If this is an alt ref frame
+    // Clear down the global segmentation map
+    memset(cpi->segmentation_map, 0, cm->mi_rows * cm->mi_cols);
+    seg->update_map = 0;
+    seg->update_data = 0;
+    cpi->static_mb_pct = 0;
+
+    // Disable segmentation and individual segment features by default
+    vp10_disable_segmentation(seg);
+    vp10_clearall_segfeatures(seg);
+
+    // Scan frames from current to arf frame.
+    // This function re-enables segmentation if appropriate.
+    vp10_update_mbgraph_stats(cpi);
+
+    // If segmentation was enabled set those features needed for the
+    // arf itself.
+    if (seg->enabled) {
+      seg->update_map = 1;
+      seg->update_data = 1;
+
+      qi_delta =
+          vp10_compute_qdelta(rc, rc->avg_q, rc->avg_q * 0.875, cm->bit_depth);
+      vp10_set_segdata(seg, 1, SEG_LVL_ALT_Q, qi_delta - 2);
+      vp10_set_segdata(seg, 1, SEG_LVL_ALT_LF, -2);
+
+      vp10_enable_segfeature(seg, 1, SEG_LVL_ALT_Q);
+      vp10_enable_segfeature(seg, 1, SEG_LVL_ALT_LF);
+
+      // Where relevant assume segment data is delta data
+      seg->abs_delta = SEGMENT_DELTADATA;
+    }
+  } else if (seg->enabled) {
+    // All other frames if segmentation has been enabled
+
+    // First normal frame in a valid gf or alt ref group
+    if (rc->frames_since_golden == 0) {
+      // Set up segment features for normal frames in an arf group
+      if (rc->source_alt_ref_active) {
+        seg->update_map = 0;
+        seg->update_data = 1;
+        seg->abs_delta = SEGMENT_DELTADATA;
+
+        qi_delta = vp10_compute_qdelta(rc, rc->avg_q, rc->avg_q * 1.125,
+                                       cm->bit_depth);
+        vp10_set_segdata(seg, 1, SEG_LVL_ALT_Q, qi_delta + 2);
+        vp10_enable_segfeature(seg, 1, SEG_LVL_ALT_Q);
+
+        vp10_set_segdata(seg, 1, SEG_LVL_ALT_LF, -2);
+        vp10_enable_segfeature(seg, 1, SEG_LVL_ALT_LF);
+
+        // Segment coding disabled for compred testing
+        if (high_q || (cpi->static_mb_pct == 100)) {
+          vp10_set_segdata(seg, 1, SEG_LVL_REF_FRAME, ALTREF_FRAME);
+          vp10_enable_segfeature(seg, 1, SEG_LVL_REF_FRAME);
+          vp10_enable_segfeature(seg, 1, SEG_LVL_SKIP);
+        }
+      } else {
+        // Disable segmentation and clear down features if alt ref
+        // is not active for this group
+
+        vp10_disable_segmentation(seg);
+
+        memset(cpi->segmentation_map, 0, cm->mi_rows * cm->mi_cols);
+
+        seg->update_map = 0;
+        seg->update_data = 0;
+
+        vp10_clearall_segfeatures(seg);
+      }
+    } else if (rc->is_src_frame_alt_ref) {
+      // Special case where we are coding over the top of a previous
+      // alt ref frame.
+      // Segment coding disabled for compred testing
+
+      // Enable ref frame features for segment 0 as well
+      vp10_enable_segfeature(seg, 0, SEG_LVL_REF_FRAME);
+      vp10_enable_segfeature(seg, 1, SEG_LVL_REF_FRAME);
+
+      // All mbs should use ALTREF_FRAME
+      vp10_clear_segdata(seg, 0, SEG_LVL_REF_FRAME);
+      vp10_set_segdata(seg, 0, SEG_LVL_REF_FRAME, ALTREF_FRAME);
+      vp10_clear_segdata(seg, 1, SEG_LVL_REF_FRAME);
+      vp10_set_segdata(seg, 1, SEG_LVL_REF_FRAME, ALTREF_FRAME);
+
+      // Skip all MBs if high Q (0,0 mv and skip coeffs)
+      if (high_q) {
+        vp10_enable_segfeature(seg, 0, SEG_LVL_SKIP);
+        vp10_enable_segfeature(seg, 1, SEG_LVL_SKIP);
+      }
+      // Enable data update
+      seg->update_data = 1;
+    } else {
+      // All other frames.
+
+      // No updates.. leave things as they are.
+      seg->update_map = 0;
+      seg->update_data = 0;
+    }
+  }
+}
+
+static void update_reference_segmentation_map(VP10_COMP *cpi) {
+  VP10_COMMON *const cm = &cpi->common;
+  MODE_INFO **mi_8x8_ptr = cm->mi_grid_visible;
+  uint8_t *cache_ptr = cm->last_frame_seg_map;
+  int row, col;
+
+  for (row = 0; row < cm->mi_rows; row++) {
+    MODE_INFO **mi_8x8 = mi_8x8_ptr;
+    uint8_t *cache = cache_ptr;
+    for (col = 0; col < cm->mi_cols; col++, mi_8x8++, cache++)
+      cache[0] = mi_8x8[0]->mbmi.segment_id;
+    mi_8x8_ptr += cm->mi_stride;
+    cache_ptr += cm->mi_cols;
+  }
+}
+
+static void alloc_raw_frame_buffers(VP10_COMP *cpi) {
+  VP10_COMMON *cm = &cpi->common;
+  const VP10EncoderConfig *oxcf = &cpi->oxcf;
+
+  if (!cpi->lookahead)
+    cpi->lookahead = vp10_lookahead_init(oxcf->width, oxcf->height,
+                                         cm->subsampling_x, cm->subsampling_y,
+#if CONFIG_VP9_HIGHBITDEPTH
+                                         cm->use_highbitdepth,
+#endif
+                                         oxcf->lag_in_frames);
+  if (!cpi->lookahead)
+    vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+                       "Failed to allocate lag buffers");
+
+  // TODO(agrange) Check if ARF is enabled and skip allocation if not.
+  if (vpx_realloc_frame_buffer(&cpi->alt_ref_buffer, oxcf->width, oxcf->height,
+                               cm->subsampling_x, cm->subsampling_y,
+#if CONFIG_VP9_HIGHBITDEPTH
+                               cm->use_highbitdepth,
+#endif
+                               VPX_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
+                               NULL, NULL, NULL))
+    vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+                       "Failed to allocate altref buffer");
+}
+
+static void alloc_util_frame_buffers(VP10_COMP *cpi) {
+  VP10_COMMON *const cm = &cpi->common;
+  if (vpx_realloc_frame_buffer(&cpi->last_frame_uf, cm->width, cm->height,
+                               cm->subsampling_x, cm->subsampling_y,
+#if CONFIG_VP9_HIGHBITDEPTH
+                               cm->use_highbitdepth,
+#endif
+                               VPX_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
+                               NULL, NULL, NULL))
+    vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+                       "Failed to allocate last frame buffer");
+
+#if CONFIG_LOOP_RESTORATION
+  if (vpx_realloc_frame_buffer(&cpi->last_frame_db, cm->width, cm->height,
+                               cm->subsampling_x, cm->subsampling_y,
+#if CONFIG_VP9_HIGHBITDEPTH
+                               cm->use_highbitdepth,
+#endif
+                               VPX_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
+                               NULL, NULL, NULL))
+    vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+                       "Failed to allocate last frame deblocked buffer");
+#endif  // CONFIG_LOOP_RESTORATION
+
+  if (vpx_realloc_frame_buffer(&cpi->scaled_source, cm->width, cm->height,
+                               cm->subsampling_x, cm->subsampling_y,
+#if CONFIG_VP9_HIGHBITDEPTH
+                               cm->use_highbitdepth,
+#endif
+                               VPX_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
+                               NULL, NULL, NULL))
+    vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+                       "Failed to allocate scaled source buffer");
+
+  if (vpx_realloc_frame_buffer(&cpi->scaled_last_source, cm->width, cm->height,
+                               cm->subsampling_x, cm->subsampling_y,
+#if CONFIG_VP9_HIGHBITDEPTH
+                               cm->use_highbitdepth,
+#endif
+                               VPX_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
+                               NULL, NULL, NULL))
+    vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+                       "Failed to allocate scaled last source buffer");
+}
+
+static int alloc_context_buffers_ext(VP10_COMP *cpi) {
+  VP10_COMMON *cm = &cpi->common;
+  int mi_size = cm->mi_cols * cm->mi_rows;
+
+  cpi->mbmi_ext_base = vpx_calloc(mi_size, sizeof(*cpi->mbmi_ext_base));
+  if (!cpi->mbmi_ext_base) return 1;
+
+  return 0;
+}
+
+void vp10_alloc_compressor_data(VP10_COMP *cpi) {
+  VP10_COMMON *cm = &cpi->common;
+
+  vp10_alloc_context_buffers(cm, cm->width, cm->height);
+
+  alloc_context_buffers_ext(cpi);
+
+  vpx_free(cpi->tile_tok[0][0]);
+
+  {
+    unsigned int tokens = get_token_alloc(cm->mb_rows, cm->mb_cols);
+    CHECK_MEM_ERROR(cm, cpi->tile_tok[0][0],
+                    vpx_calloc(tokens, sizeof(*cpi->tile_tok[0][0])));
+#if CONFIG_ANS
+    vp10_buf_ans_alloc(&cpi->buf_ans, cm, tokens);
+#endif  // CONFIG_ANS
+  }
+
+  vp10_setup_pc_tree(&cpi->common, &cpi->td);
+}
+
+void vp10_new_framerate(VP10_COMP *cpi, double framerate) {
+  cpi->framerate = framerate < 0.1 ? 30 : framerate;
+  vp10_rc_update_framerate(cpi);
+}
+
+static void set_tile_info(VP10_COMP *cpi) {
+  VP10_COMMON *const cm = &cpi->common;
+
+#if CONFIG_EXT_TILE
+#if CONFIG_EXT_PARTITION
+  if (cpi->oxcf.superblock_size != VPX_SUPERBLOCK_SIZE_64X64) {
+    cm->tile_width = clamp(cpi->oxcf.tile_columns, 1, 32);
+    cm->tile_height = clamp(cpi->oxcf.tile_rows, 1, 32);
+    cm->tile_width <<= MAX_MIB_SIZE_LOG2;
+    cm->tile_height <<= MAX_MIB_SIZE_LOG2;
+  } else {
+    cm->tile_width = clamp(cpi->oxcf.tile_columns, 1, 64);
+    cm->tile_height = clamp(cpi->oxcf.tile_rows, 1, 64);
+    cm->tile_width <<= MAX_MIB_SIZE_LOG2 - 1;
+    cm->tile_height <<= MAX_MIB_SIZE_LOG2 - 1;
+  }
+#else
+  cm->tile_width = clamp(cpi->oxcf.tile_columns, 1, 64);
+  cm->tile_height = clamp(cpi->oxcf.tile_rows, 1, 64);
+  cm->tile_width <<= MAX_MIB_SIZE_LOG2;
+  cm->tile_height <<= MAX_MIB_SIZE_LOG2;
+#endif  // CONFIG_EXT_PARTITION
+
+  cm->tile_width = VPXMIN(cm->tile_width, cm->mi_cols);
+  cm->tile_height = VPXMIN(cm->tile_height, cm->mi_rows);
+
+  assert(cm->tile_width >> MAX_MIB_SIZE <= 32);
+  assert(cm->tile_height >> MAX_MIB_SIZE <= 32);
+
+  // Get the number of tiles
+  cm->tile_cols = 1;
+  while (cm->tile_cols * cm->tile_width < cm->mi_cols) ++cm->tile_cols;
+
+  cm->tile_rows = 1;
+  while (cm->tile_rows * cm->tile_height < cm->mi_rows) ++cm->tile_rows;
+#else
+  int min_log2_tile_cols, max_log2_tile_cols;
+  vp10_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
+
+  cm->log2_tile_cols =
+      clamp(cpi->oxcf.tile_columns, min_log2_tile_cols, max_log2_tile_cols);
+  cm->log2_tile_rows = cpi->oxcf.tile_rows;
+
+  cm->tile_cols = 1 << cm->log2_tile_cols;
+  cm->tile_rows = 1 << cm->log2_tile_rows;
+
+  cm->tile_width = ALIGN_POWER_OF_TWO(cm->mi_cols, MAX_MIB_SIZE_LOG2);
+  cm->tile_width >>= cm->log2_tile_cols;
+  cm->tile_height = ALIGN_POWER_OF_TWO(cm->mi_rows, MAX_MIB_SIZE_LOG2);
+  cm->tile_height >>= cm->log2_tile_rows;
+
+  // round to integer multiples of max superblock size
+  cm->tile_width = ALIGN_POWER_OF_TWO(cm->tile_width, MAX_MIB_SIZE_LOG2);
+  cm->tile_height = ALIGN_POWER_OF_TWO(cm->tile_height, MAX_MIB_SIZE_LOG2);
+#endif  // CONFIG_EXT_TILE
+}
+
+static void update_frame_size(VP10_COMP *cpi) {
+  VP10_COMMON *const cm = &cpi->common;
+  MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
+
+  vp10_set_mb_mi(cm, cm->width, cm->height);
+  vp10_init_context_buffers(cm);
+  vp10_init_macroblockd(cm, xd, NULL);
+  memset(cpi->mbmi_ext_base, 0,
+         cm->mi_rows * cm->mi_cols * sizeof(*cpi->mbmi_ext_base));
+
+  set_tile_info(cpi);
+}
+
+static void init_buffer_indices(VP10_COMP *cpi) {
+#if CONFIG_EXT_REFS
+  int fb_idx;
+  for (fb_idx = 0; fb_idx < LAST_REF_FRAMES; ++fb_idx)
+    cpi->lst_fb_idxes[fb_idx] = fb_idx;
+  cpi->gld_fb_idx = LAST_REF_FRAMES;
+  cpi->bwd_fb_idx = LAST_REF_FRAMES + 1;
+  cpi->alt_fb_idx = LAST_REF_FRAMES + 2;
+  for (fb_idx = 0; fb_idx < MAX_EXT_ARFS + 1; ++fb_idx)
+    cpi->arf_map[fb_idx] = LAST_REF_FRAMES + 2 + fb_idx;
+#else
+  cpi->lst_fb_idx = 0;
+  cpi->gld_fb_idx = 1;
+  cpi->alt_fb_idx = 2;
+#endif  // CONFIG_EXT_REFS
+}
+
+static void init_config(struct VP10_COMP *cpi, VP10EncoderConfig *oxcf) {
+  VP10_COMMON *const cm = &cpi->common;
+
+  cpi->oxcf = *oxcf;
+  cpi->framerate = oxcf->init_framerate;
+
+  cm->profile = oxcf->profile;
+  cm->bit_depth = oxcf->bit_depth;
+#if CONFIG_VP9_HIGHBITDEPTH
+  cm->use_highbitdepth = oxcf->use_highbitdepth;
+#endif
+  cm->color_space = oxcf->color_space;
+  cm->color_range = oxcf->color_range;
+
+  cm->width = oxcf->width;
+  cm->height = oxcf->height;
+  vp10_alloc_compressor_data(cpi);
+
+  // Single thread case: use counts in common.
+  cpi->td.counts = &cm->counts;
+
+  // change includes all joint functionality
+  vp10_change_config(cpi, oxcf);
+
+  cpi->static_mb_pct = 0;
+  cpi->ref_frame_flags = 0;
+
+  init_buffer_indices(cpi);
+}
+
+static void set_rc_buffer_sizes(RATE_CONTROL *rc,
+                                const VP10EncoderConfig *oxcf) {
+  const int64_t bandwidth = oxcf->target_bandwidth;
+  const int64_t starting = oxcf->starting_buffer_level_ms;
+  const int64_t optimal = oxcf->optimal_buffer_level_ms;
+  const int64_t maximum = oxcf->maximum_buffer_size_ms;
+
+  rc->starting_buffer_level = starting * bandwidth / 1000;
+  rc->optimal_buffer_level =
+      (optimal == 0) ? bandwidth / 8 : optimal * bandwidth / 1000;
+  rc->maximum_buffer_size =
+      (maximum == 0) ? bandwidth / 8 : maximum * bandwidth / 1000;
+}
+
+#if CONFIG_VP9_HIGHBITDEPTH
+#define HIGHBD_BFP(BT, SDF, SDAF, VF, SVF, SVAF, SDX3F, SDX8F, SDX4DF) \
+  cpi->fn_ptr[BT].sdf = SDF;                                           \
+  cpi->fn_ptr[BT].sdaf = SDAF;                                         \
+  cpi->fn_ptr[BT].vf = VF;                                             \
+  cpi->fn_ptr[BT].svf = SVF;                                           \
+  cpi->fn_ptr[BT].svaf = SVAF;                                         \
+  cpi->fn_ptr[BT].sdx3f = SDX3F;                                       \
+  cpi->fn_ptr[BT].sdx8f = SDX8F;                                       \
+  cpi->fn_ptr[BT].sdx4df = SDX4DF;
+
+#define MAKE_BFP_SAD_WRAPPER(fnname)                                           \
+  static unsigned int fnname##_bits8(const uint8_t *src_ptr,                   \
+                                     int source_stride,                        \
+                                     const uint8_t *ref_ptr, int ref_stride) { \
+    return fnname(src_ptr, source_stride, ref_ptr, ref_stride);                \
+  }                                                                            \
+  static unsigned int fnname##_bits10(                                         \
+      const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr,       \
+      int ref_stride) {                                                        \
+    return fnname(src_ptr, source_stride, ref_ptr, ref_stride) >> 2;           \
+  }                                                                            \
+  static unsigned int fnname##_bits12(                                         \
+      const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr,       \
+      int ref_stride) {                                                        \
+    return fnname(src_ptr, source_stride, ref_ptr, ref_stride) >> 4;           \
+  }
+
+#define MAKE_BFP_SADAVG_WRAPPER(fnname)                                        \
+  static unsigned int fnname##_bits8(                                          \
+      const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr,       \
+      int ref_stride, const uint8_t *second_pred) {                            \
+    return fnname(src_ptr, source_stride, ref_ptr, ref_stride, second_pred);   \
+  }                                                                            \
+  static unsigned int fnname##_bits10(                                         \
+      const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr,       \
+      int ref_stride, const uint8_t *second_pred) {                            \
+    return fnname(src_ptr, source_stride, ref_ptr, ref_stride, second_pred) >> \
+           2;                                                                  \
+  }                                                                            \
+  static unsigned int fnname##_bits12(                                         \
+      const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr,       \
+      int ref_stride, const uint8_t *second_pred) {                            \
+    return fnname(src_ptr, source_stride, ref_ptr, ref_stride, second_pred) >> \
+           4;                                                                  \
+  }
+
+#define MAKE_BFP_SAD3_WRAPPER(fnname)                                    \
+  static void fnname##_bits8(const uint8_t *src_ptr, int source_stride,  \
+                             const uint8_t *ref_ptr, int ref_stride,     \
+                             unsigned int *sad_array) {                  \
+    fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array);      \
+  }                                                                      \
+  static void fnname##_bits10(const uint8_t *src_ptr, int source_stride, \
+                              const uint8_t *ref_ptr, int ref_stride,    \
+                              unsigned int *sad_array) {                 \
+    int i;                                                               \
+    fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array);      \
+    for (i = 0; i < 3; i++) sad_array[i] >>= 2;                          \
+  }                                                                      \
+  static void fnname##_bits12(const uint8_t *src_ptr, int source_stride, \
+                              const uint8_t *ref_ptr, int ref_stride,    \
+                              unsigned int *sad_array) {                 \
+    int i;                                                               \
+    fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array);      \
+    for (i = 0; i < 3; i++) sad_array[i] >>= 4;                          \
+  }
+
+#define MAKE_BFP_SAD8_WRAPPER(fnname)                                    \
+  static void fnname##_bits8(const uint8_t *src_ptr, int source_stride,  \
+                             const uint8_t *ref_ptr, int ref_stride,     \
+                             unsigned int *sad_array) {                  \
+    fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array);      \
+  }                                                                      \
+  static void fnname##_bits10(const uint8_t *src_ptr, int source_stride, \
+                              const uint8_t *ref_ptr, int ref_stride,    \
+                              unsigned int *sad_array) {                 \
+    int i;                                                               \
+    fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array);      \
+    for (i = 0; i < 8; i++) sad_array[i] >>= 2;                          \
+  }                                                                      \
+  static void fnname##_bits12(const uint8_t *src_ptr, int source_stride, \
+                              const uint8_t *ref_ptr, int ref_stride,    \
+                              unsigned int *sad_array) {                 \
+    int i;                                                               \
+    fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array);      \
+    for (i = 0; i < 8; i++) sad_array[i] >>= 4;                          \
+  }
+#define MAKE_BFP_SAD4D_WRAPPER(fnname)                                        \
+  static void fnname##_bits8(const uint8_t *src_ptr, int source_stride,       \
+                             const uint8_t *const ref_ptr[], int ref_stride,  \
+                             unsigned int *sad_array) {                       \
+    fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array);           \
+  }                                                                           \
+  static void fnname##_bits10(const uint8_t *src_ptr, int source_stride,      \
+                              const uint8_t *const ref_ptr[], int ref_stride, \
+                              unsigned int *sad_array) {                      \
+    int i;                                                                    \
+    fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array);           \
+    for (i = 0; i < 4; i++) sad_array[i] >>= 2;                               \
+  }                                                                           \
+  static void fnname##_bits12(const uint8_t *src_ptr, int source_stride,      \
+                              const uint8_t *const ref_ptr[], int ref_stride, \
+                              unsigned int *sad_array) {                      \
+    int i;                                                                    \
+    fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array);           \
+    for (i = 0; i < 4; i++) sad_array[i] >>= 4;                               \
+  }
+
+#if CONFIG_EXT_PARTITION
+MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad128x128)
+MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad128x128_avg)
+MAKE_BFP_SAD3_WRAPPER(vpx_highbd_sad128x128x3)
+MAKE_BFP_SAD8_WRAPPER(vpx_highbd_sad128x128x8)
+MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad128x128x4d)
+MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad128x64)
+MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad128x64_avg)
+MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad128x64x4d)
+MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad64x128)
+MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad64x128_avg)
+MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad64x128x4d)
+#endif  // CONFIG_EXT_PARTITION
+MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad32x16)
+MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad32x16_avg)
+MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad32x16x4d)
+MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad16x32)
+MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad16x32_avg)
+MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad16x32x4d)
+MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad64x32)
+MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad64x32_avg)
+MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad64x32x4d)
+MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad32x64)
+MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad32x64_avg)
+MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad32x64x4d)
+MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad32x32)
+MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad32x32_avg)
+MAKE_BFP_SAD3_WRAPPER(vpx_highbd_sad32x32x3)
+MAKE_BFP_SAD8_WRAPPER(vpx_highbd_sad32x32x8)
+MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad32x32x4d)
+MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad64x64)
+MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad64x64_avg)
+MAKE_BFP_SAD3_WRAPPER(vpx_highbd_sad64x64x3)
+MAKE_BFP_SAD8_WRAPPER(vpx_highbd_sad64x64x8)
+MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad64x64x4d)
+MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad16x16)
+MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad16x16_avg)
+MAKE_BFP_SAD3_WRAPPER(vpx_highbd_sad16x16x3)
+MAKE_BFP_SAD8_WRAPPER(vpx_highbd_sad16x16x8)
+MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad16x16x4d)
+MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad16x8)
+MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad16x8_avg)
+MAKE_BFP_SAD3_WRAPPER(vpx_highbd_sad16x8x3)
+MAKE_BFP_SAD8_WRAPPER(vpx_highbd_sad16x8x8)
+MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad16x8x4d)
+MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad8x16)
+MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad8x16_avg)
+MAKE_BFP_SAD3_WRAPPER(vpx_highbd_sad8x16x3)
+MAKE_BFP_SAD8_WRAPPER(vpx_highbd_sad8x16x8)
+MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad8x16x4d)
+MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad8x8)
+MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad8x8_avg)
+MAKE_BFP_SAD3_WRAPPER(vpx_highbd_sad8x8x3)
+MAKE_BFP_SAD8_WRAPPER(vpx_highbd_sad8x8x8)
+MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad8x8x4d)
+MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad8x4)
+MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad8x4_avg)
+MAKE_BFP_SAD8_WRAPPER(vpx_highbd_sad8x4x8)
+MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad8x4x4d)
+MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad4x8)
+MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad4x8_avg)
+MAKE_BFP_SAD8_WRAPPER(vpx_highbd_sad4x8x8)
+MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad4x8x4d)
+MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad4x4)
+MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad4x4_avg)
+MAKE_BFP_SAD3_WRAPPER(vpx_highbd_sad4x4x3)
+MAKE_BFP_SAD8_WRAPPER(vpx_highbd_sad4x4x8)
+MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad4x4x4d)
+
+#if CONFIG_EXT_INTER
+#define HIGHBD_MBFP(BT, MSDF, MVF, MSVF) \
+  cpi->fn_ptr[BT].msdf = MSDF;           \
+  cpi->fn_ptr[BT].mvf = MVF;             \
+  cpi->fn_ptr[BT].msvf = MSVF;
+
+#define MAKE_MBFP_SAD_WRAPPER(fnname)                                          \
+  static unsigned int fnname##_bits8(                                          \
+      const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr,       \
+      int ref_stride, const uint8_t *m, int m_stride) {                        \
+    return fnname(src_ptr, source_stride, ref_ptr, ref_stride, m, m_stride);   \
+  }                                                                            \
+  static unsigned int fnname##_bits10(                                         \
+      const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr,       \
+      int ref_stride, const uint8_t *m, int m_stride) {                        \
+    return fnname(src_ptr, source_stride, ref_ptr, ref_stride, m, m_stride) >> \
+           2;                                                                  \
+  }                                                                            \
+  static unsigned int fnname##_bits12(                                         \
+      const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr,       \
+      int ref_stride, const uint8_t *m, int m_stride) {                        \
+    return fnname(src_ptr, source_stride, ref_ptr, ref_stride, m, m_stride) >> \
+           4;                                                                  \
+  }
+
+#if CONFIG_EXT_PARTITION
+MAKE_MBFP_SAD_WRAPPER(vpx_highbd_masked_sad128x128)
+MAKE_MBFP_SAD_WRAPPER(vpx_highbd_masked_sad128x64)
+MAKE_MBFP_SAD_WRAPPER(vpx_highbd_masked_sad64x128)
+#endif  // CONFIG_EXT_PARTITION
+MAKE_MBFP_SAD_WRAPPER(vpx_highbd_masked_sad64x64)
+MAKE_MBFP_SAD_WRAPPER(vpx_highbd_masked_sad64x32)
+MAKE_MBFP_SAD_WRAPPER(vpx_highbd_masked_sad32x64)
+MAKE_MBFP_SAD_WRAPPER(vpx_highbd_masked_sad32x32)
+MAKE_MBFP_SAD_WRAPPER(vpx_highbd_masked_sad32x16)
+MAKE_MBFP_SAD_WRAPPER(vpx_highbd_masked_sad16x32)
+MAKE_MBFP_SAD_WRAPPER(vpx_highbd_masked_sad16x16)
+MAKE_MBFP_SAD_WRAPPER(vpx_highbd_masked_sad16x8)
+MAKE_MBFP_SAD_WRAPPER(vpx_highbd_masked_sad8x16)
+MAKE_MBFP_SAD_WRAPPER(vpx_highbd_masked_sad8x8)
+MAKE_MBFP_SAD_WRAPPER(vpx_highbd_masked_sad8x4)
+MAKE_MBFP_SAD_WRAPPER(vpx_highbd_masked_sad4x8)
+MAKE_MBFP_SAD_WRAPPER(vpx_highbd_masked_sad4x4)
+#endif  // CONFIG_EXT_INTER
+
+#if CONFIG_OBMC
+#define HIGHBD_OBFP(BT, OSDF, OVF, OSVF) \
+  cpi->fn_ptr[BT].osdf = OSDF;           \
+  cpi->fn_ptr[BT].ovf = OVF;             \
+  cpi->fn_ptr[BT].osvf = OSVF;
+
+#define MAKE_OBFP_SAD_WRAPPER(fnname)                                     \
+  static unsigned int fnname##_bits8(const uint8_t *ref, int ref_stride,  \
+                                     const int32_t *wsrc,                 \
+                                     const int32_t *msk) {                \
+    return fnname(ref, ref_stride, wsrc, msk);                            \
+  }                                                                       \
+  static unsigned int fnname##_bits10(const uint8_t *ref, int ref_stride, \
+                                      const int32_t *wsrc,                \
+                                      const int32_t *msk) {               \
+    return fnname(ref, ref_stride, wsrc, msk) >> 2;                       \
+  }                                                                       \
+  static unsigned int fnname##_bits12(const uint8_t *ref, int ref_stride, \
+                                      const int32_t *wsrc,                \
+                                      const int32_t *msk) {               \
+    return fnname(ref, ref_stride, wsrc, msk) >> 4;                       \
+  }
+
+#if CONFIG_EXT_PARTITION
+MAKE_OBFP_SAD_WRAPPER(vpx_highbd_obmc_sad128x128)
+MAKE_OBFP_SAD_WRAPPER(vpx_highbd_obmc_sad128x64)
+MAKE_OBFP_SAD_WRAPPER(vpx_highbd_obmc_sad64x128)
+#endif  // CONFIG_EXT_PARTITION
+MAKE_OBFP_SAD_WRAPPER(vpx_highbd_obmc_sad64x64)
+MAKE_OBFP_SAD_WRAPPER(vpx_highbd_obmc_sad64x32)
+MAKE_OBFP_SAD_WRAPPER(vpx_highbd_obmc_sad32x64)
+MAKE_OBFP_SAD_WRAPPER(vpx_highbd_obmc_sad32x32)
+MAKE_OBFP_SAD_WRAPPER(vpx_highbd_obmc_sad32x16)
+MAKE_OBFP_SAD_WRAPPER(vpx_highbd_obmc_sad16x32)
+MAKE_OBFP_SAD_WRAPPER(vpx_highbd_obmc_sad16x16)
+MAKE_OBFP_SAD_WRAPPER(vpx_highbd_obmc_sad16x8)
+MAKE_OBFP_SAD_WRAPPER(vpx_highbd_obmc_sad8x16)
+MAKE_OBFP_SAD_WRAPPER(vpx_highbd_obmc_sad8x8)
+MAKE_OBFP_SAD_WRAPPER(vpx_highbd_obmc_sad8x4)
+MAKE_OBFP_SAD_WRAPPER(vpx_highbd_obmc_sad4x8)
+MAKE_OBFP_SAD_WRAPPER(vpx_highbd_obmc_sad4x4)
+#endif  // CONFIG_OBMC
+
+static void highbd_set_var_fns(VP10_COMP *const cpi) {
+  VP10_COMMON *const cm = &cpi->common;
+  if (cm->use_highbitdepth) {
+    switch (cm->bit_depth) {
+      case VPX_BITS_8:
+        HIGHBD_BFP(BLOCK_32X16, vpx_highbd_sad32x16_bits8,
+                   vpx_highbd_sad32x16_avg_bits8, vpx_highbd_8_variance32x16,
+                   vpx_highbd_8_sub_pixel_variance32x16,
+                   vpx_highbd_8_sub_pixel_avg_variance32x16, NULL, NULL,
+                   vpx_highbd_sad32x16x4d_bits8)
+
+        HIGHBD_BFP(BLOCK_16X32, vpx_highbd_sad16x32_bits8,
+                   vpx_highbd_sad16x32_avg_bits8, vpx_highbd_8_variance16x32,
+                   vpx_highbd_8_sub_pixel_variance16x32,
+                   vpx_highbd_8_sub_pixel_avg_variance16x32, NULL, NULL,
+                   vpx_highbd_sad16x32x4d_bits8)
+
+        HIGHBD_BFP(BLOCK_64X32, vpx_highbd_sad64x32_bits8,
+                   vpx_highbd_sad64x32_avg_bits8, vpx_highbd_8_variance64x32,
+                   vpx_highbd_8_sub_pixel_variance64x32,
+                   vpx_highbd_8_sub_pixel_avg_variance64x32, NULL, NULL,
+                   vpx_highbd_sad64x32x4d_bits8)
+
+        HIGHBD_BFP(BLOCK_32X64, vpx_highbd_sad32x64_bits8,
+                   vpx_highbd_sad32x64_avg_bits8, vpx_highbd_8_variance32x64,
+                   vpx_highbd_8_sub_pixel_variance32x64,
+                   vpx_highbd_8_sub_pixel_avg_variance32x64, NULL, NULL,
+                   vpx_highbd_sad32x64x4d_bits8)
+
+        HIGHBD_BFP(BLOCK_32X32, vpx_highbd_sad32x32_bits8,
+                   vpx_highbd_sad32x32_avg_bits8, vpx_highbd_8_variance32x32,
+                   vpx_highbd_8_sub_pixel_variance32x32,
+                   vpx_highbd_8_sub_pixel_avg_variance32x32,
+                   vpx_highbd_sad32x32x3_bits8, vpx_highbd_sad32x32x8_bits8,
+                   vpx_highbd_sad32x32x4d_bits8)
+
+        HIGHBD_BFP(BLOCK_64X64, vpx_highbd_sad64x64_bits8,
+                   vpx_highbd_sad64x64_avg_bits8, vpx_highbd_8_variance64x64,
+                   vpx_highbd_8_sub_pixel_variance64x64,
+                   vpx_highbd_8_sub_pixel_avg_variance64x64,
+                   vpx_highbd_sad64x64x3_bits8, vpx_highbd_sad64x64x8_bits8,
+                   vpx_highbd_sad64x64x4d_bits8)
+
+        HIGHBD_BFP(BLOCK_16X16, vpx_highbd_sad16x16_bits8,
+                   vpx_highbd_sad16x16_avg_bits8, vpx_highbd_8_variance16x16,
+                   vpx_highbd_8_sub_pixel_variance16x16,
+                   vpx_highbd_8_sub_pixel_avg_variance16x16,
+                   vpx_highbd_sad16x16x3_bits8, vpx_highbd_sad16x16x8_bits8,
+                   vpx_highbd_sad16x16x4d_bits8)
+
+        HIGHBD_BFP(
+            BLOCK_16X8, vpx_highbd_sad16x8_bits8, vpx_highbd_sad16x8_avg_bits8,
+            vpx_highbd_8_variance16x8, vpx_highbd_8_sub_pixel_variance16x8,
+            vpx_highbd_8_sub_pixel_avg_variance16x8, vpx_highbd_sad16x8x3_bits8,
+            vpx_highbd_sad16x8x8_bits8, vpx_highbd_sad16x8x4d_bits8)
+
+        HIGHBD_BFP(
+            BLOCK_8X16, vpx_highbd_sad8x16_bits8, vpx_highbd_sad8x16_avg_bits8,
+            vpx_highbd_8_variance8x16, vpx_highbd_8_sub_pixel_variance8x16,
+            vpx_highbd_8_sub_pixel_avg_variance8x16, vpx_highbd_sad8x16x3_bits8,
+            vpx_highbd_sad8x16x8_bits8, vpx_highbd_sad8x16x4d_bits8)
+
+        HIGHBD_BFP(
+            BLOCK_8X8, vpx_highbd_sad8x8_bits8, vpx_highbd_sad8x8_avg_bits8,
+            vpx_highbd_8_variance8x8, vpx_highbd_8_sub_pixel_variance8x8,
+            vpx_highbd_8_sub_pixel_avg_variance8x8, vpx_highbd_sad8x8x3_bits8,
+            vpx_highbd_sad8x8x8_bits8, vpx_highbd_sad8x8x4d_bits8)
+
+        HIGHBD_BFP(BLOCK_8X4, vpx_highbd_sad8x4_bits8,
+                   vpx_highbd_sad8x4_avg_bits8, vpx_highbd_8_variance8x4,
+                   vpx_highbd_8_sub_pixel_variance8x4,
+                   vpx_highbd_8_sub_pixel_avg_variance8x4, NULL,
+                   vpx_highbd_sad8x4x8_bits8, vpx_highbd_sad8x4x4d_bits8)
+
+        HIGHBD_BFP(BLOCK_4X8, vpx_highbd_sad4x8_bits8,
+                   vpx_highbd_sad4x8_avg_bits8, vpx_highbd_8_variance4x8,
+                   vpx_highbd_8_sub_pixel_variance4x8,
+                   vpx_highbd_8_sub_pixel_avg_variance4x8, NULL,
+                   vpx_highbd_sad4x8x8_bits8, vpx_highbd_sad4x8x4d_bits8)
+
+        HIGHBD_BFP(
+            BLOCK_4X4, vpx_highbd_sad4x4_bits8, vpx_highbd_sad4x4_avg_bits8,
+            vpx_highbd_8_variance4x4, vpx_highbd_8_sub_pixel_variance4x4,
+            vpx_highbd_8_sub_pixel_avg_variance4x4, vpx_highbd_sad4x4x3_bits8,
+            vpx_highbd_sad4x4x8_bits8, vpx_highbd_sad4x4x4d_bits8)
+
+#if CONFIG_EXT_PARTITION
+        HIGHBD_BFP(BLOCK_128X128, vpx_highbd_sad128x128_bits8,
+                   vpx_highbd_sad128x128_avg_bits8,
+                   vpx_highbd_8_variance128x128,
+                   vpx_highbd_8_sub_pixel_variance128x128,
+                   vpx_highbd_8_sub_pixel_avg_variance128x128,
+                   vpx_highbd_sad128x128x3_bits8, vpx_highbd_sad128x128x8_bits8,
+                   vpx_highbd_sad128x128x4d_bits8)
+
+        HIGHBD_BFP(BLOCK_128X64, vpx_highbd_sad128x64_bits8,
+                   vpx_highbd_sad128x64_avg_bits8, vpx_highbd_8_variance128x64,
+                   vpx_highbd_8_sub_pixel_variance128x64,
+                   vpx_highbd_8_sub_pixel_avg_variance128x64, NULL, NULL,
+                   vpx_highbd_sad128x64x4d_bits8)
+
+        HIGHBD_BFP(BLOCK_64X128, vpx_highbd_sad64x128_bits8,
+                   vpx_highbd_sad64x128_avg_bits8, vpx_highbd_8_variance64x128,
+                   vpx_highbd_8_sub_pixel_variance64x128,
+                   vpx_highbd_8_sub_pixel_avg_variance64x128, NULL, NULL,
+                   vpx_highbd_sad64x128x4d_bits8)
+#endif  // CONFIG_EXT_PARTITION
+
+#if CONFIG_EXT_INTER
+#if CONFIG_EXT_PARTITION
+        HIGHBD_MBFP(BLOCK_128X128, vpx_highbd_masked_sad128x128_bits8,
+                    vpx_highbd_masked_variance128x128,
+                    vpx_highbd_masked_sub_pixel_variance128x128)
+        HIGHBD_MBFP(BLOCK_128X64, vpx_highbd_masked_sad128x64_bits8,
+                    vpx_highbd_masked_variance128x64,
+                    vpx_highbd_masked_sub_pixel_variance128x64)
+        HIGHBD_MBFP(BLOCK_64X128, vpx_highbd_masked_sad64x128_bits8,
+                    vpx_highbd_masked_variance64x128,
+                    vpx_highbd_masked_sub_pixel_variance64x128)
+#endif  // CONFIG_EXT_PARTITION
+        HIGHBD_MBFP(BLOCK_64X64, vpx_highbd_masked_sad64x64_bits8,
+                    vpx_highbd_masked_variance64x64,
+                    vpx_highbd_masked_sub_pixel_variance64x64)
+        HIGHBD_MBFP(BLOCK_64X32, vpx_highbd_masked_sad64x32_bits8,
+                    vpx_highbd_masked_variance64x32,
+                    vpx_highbd_masked_sub_pixel_variance64x32)
+        HIGHBD_MBFP(BLOCK_32X64, vpx_highbd_masked_sad32x64_bits8,
+                    vpx_highbd_masked_variance32x64,
+                    vpx_highbd_masked_sub_pixel_variance32x64)
+        HIGHBD_MBFP(BLOCK_32X32, vpx_highbd_masked_sad32x32_bits8,
+                    vpx_highbd_masked_variance32x32,
+                    vpx_highbd_masked_sub_pixel_variance32x32)
+        HIGHBD_MBFP(BLOCK_32X16, vpx_highbd_masked_sad32x16_bits8,
+                    vpx_highbd_masked_variance32x16,
+                    vpx_highbd_masked_sub_pixel_variance32x16)
+        HIGHBD_MBFP(BLOCK_16X32, vpx_highbd_masked_sad16x32_bits8,
+                    vpx_highbd_masked_variance16x32,
+                    vpx_highbd_masked_sub_pixel_variance16x32)
+        HIGHBD_MBFP(BLOCK_16X16, vpx_highbd_masked_sad16x16_bits8,
+                    vpx_highbd_masked_variance16x16,
+                    vpx_highbd_masked_sub_pixel_variance16x16)
+        HIGHBD_MBFP(BLOCK_8X16, vpx_highbd_masked_sad8x16_bits8,
+                    vpx_highbd_masked_variance8x16,
+                    vpx_highbd_masked_sub_pixel_variance8x16)
+        HIGHBD_MBFP(BLOCK_16X8, vpx_highbd_masked_sad16x8_bits8,
+                    vpx_highbd_masked_variance16x8,
+                    vpx_highbd_masked_sub_pixel_variance16x8)
+        HIGHBD_MBFP(BLOCK_8X8, vpx_highbd_masked_sad8x8_bits8,
+                    vpx_highbd_masked_variance8x8,
+                    vpx_highbd_masked_sub_pixel_variance8x8)
+        HIGHBD_MBFP(BLOCK_4X8, vpx_highbd_masked_sad4x8_bits8,
+                    vpx_highbd_masked_variance4x8,
+                    vpx_highbd_masked_sub_pixel_variance4x8)
+        HIGHBD_MBFP(BLOCK_8X4, vpx_highbd_masked_sad8x4_bits8,
+                    vpx_highbd_masked_variance8x4,
+                    vpx_highbd_masked_sub_pixel_variance8x4)
+        HIGHBD_MBFP(BLOCK_4X4, vpx_highbd_masked_sad4x4_bits8,
+                    vpx_highbd_masked_variance4x4,
+                    vpx_highbd_masked_sub_pixel_variance4x4)
+#endif  // CONFIG_EXT_INTER
+#if CONFIG_OBMC
+#if CONFIG_EXT_PARTITION
+        HIGHBD_OBFP(BLOCK_128X128, vpx_highbd_obmc_sad128x128_bits8,
+                    vpx_highbd_obmc_variance128x128,
+                    vpx_highbd_obmc_sub_pixel_variance128x128)
+        HIGHBD_OBFP(BLOCK_128X64, vpx_highbd_obmc_sad128x64_bits8,
+                    vpx_highbd_obmc_variance128x64,
+                    vpx_highbd_obmc_sub_pixel_variance128x64)
+        HIGHBD_OBFP(BLOCK_64X128, vpx_highbd_obmc_sad64x128_bits8,
+                    vpx_highbd_obmc_variance64x128,
+                    vpx_highbd_obmc_sub_pixel_variance64x128)
+#endif  // CONFIG_EXT_PARTITION
+        HIGHBD_OBFP(BLOCK_64X64, vpx_highbd_obmc_sad64x64_bits8,
+                    vpx_highbd_obmc_variance64x64,
+                    vpx_highbd_obmc_sub_pixel_variance64x64)
+        HIGHBD_OBFP(BLOCK_64X32, vpx_highbd_obmc_sad64x32_bits8,
+                    vpx_highbd_obmc_variance64x32,
+                    vpx_highbd_obmc_sub_pixel_variance64x32)
+        HIGHBD_OBFP(BLOCK_32X64, vpx_highbd_obmc_sad32x64_bits8,
+                    vpx_highbd_obmc_variance32x64,
+                    vpx_highbd_obmc_sub_pixel_variance32x64)
+        HIGHBD_OBFP(BLOCK_32X32, vpx_highbd_obmc_sad32x32_bits8,
+                    vpx_highbd_obmc_variance32x32,
+                    vpx_highbd_obmc_sub_pixel_variance32x32)
+        HIGHBD_OBFP(BLOCK_32X16, vpx_highbd_obmc_sad32x16_bits8,
+                    vpx_highbd_obmc_variance32x16,
+                    vpx_highbd_obmc_sub_pixel_variance32x16)
+        HIGHBD_OBFP(BLOCK_16X32, vpx_highbd_obmc_sad16x32_bits8,
+                    vpx_highbd_obmc_variance16x32,
+                    vpx_highbd_obmc_sub_pixel_variance16x32)
+        HIGHBD_OBFP(BLOCK_16X16, vpx_highbd_obmc_sad16x16_bits8,
+                    vpx_highbd_obmc_variance16x16,
+                    vpx_highbd_obmc_sub_pixel_variance16x16)
+        HIGHBD_OBFP(BLOCK_8X16, vpx_highbd_obmc_sad8x16_bits8,
+                    vpx_highbd_obmc_variance8x16,
+                    vpx_highbd_obmc_sub_pixel_variance8x16)
+        HIGHBD_OBFP(BLOCK_16X8, vpx_highbd_obmc_sad16x8_bits8,
+                    vpx_highbd_obmc_variance16x8,
+                    vpx_highbd_obmc_sub_pixel_variance16x8)
+        HIGHBD_OBFP(BLOCK_8X8, vpx_highbd_obmc_sad8x8_bits8,
+                    vpx_highbd_obmc_variance8x8,
+                    vpx_highbd_obmc_sub_pixel_variance8x8)
+        HIGHBD_OBFP(BLOCK_4X8, vpx_highbd_obmc_sad4x8_bits8,
+                    vpx_highbd_obmc_variance4x8,
+                    vpx_highbd_obmc_sub_pixel_variance4x8)
+        HIGHBD_OBFP(BLOCK_8X4, vpx_highbd_obmc_sad8x4_bits8,
+                    vpx_highbd_obmc_variance8x4,
+                    vpx_highbd_obmc_sub_pixel_variance8x4)
+        HIGHBD_OBFP(BLOCK_4X4, vpx_highbd_obmc_sad4x4_bits8,
+                    vpx_highbd_obmc_variance4x4,
+                    vpx_highbd_obmc_sub_pixel_variance4x4)
+#endif  // CONFIG_OBMC
+        break;
+
+      case VPX_BITS_10:
+        HIGHBD_BFP(BLOCK_32X16, vpx_highbd_sad32x16_bits10,
+                   vpx_highbd_sad32x16_avg_bits10, vpx_highbd_10_variance32x16,
+                   vpx_highbd_10_sub_pixel_variance32x16,
+                   vpx_highbd_10_sub_pixel_avg_variance32x16, NULL, NULL,
+                   vpx_highbd_sad32x16x4d_bits10)
+
+        HIGHBD_BFP(BLOCK_16X32, vpx_highbd_sad16x32_bits10,
+                   vpx_highbd_sad16x32_avg_bits10, vpx_highbd_10_variance16x32,
+                   vpx_highbd_10_sub_pixel_variance16x32,
+                   vpx_highbd_10_sub_pixel_avg_variance16x32, NULL, NULL,
+                   vpx_highbd_sad16x32x4d_bits10)
+
+        HIGHBD_BFP(BLOCK_64X32, vpx_highbd_sad64x32_bits10,
+                   vpx_highbd_sad64x32_avg_bits10, vpx_highbd_10_variance64x32,
+                   vpx_highbd_10_sub_pixel_variance64x32,
+                   vpx_highbd_10_sub_pixel_avg_variance64x32, NULL, NULL,
+                   vpx_highbd_sad64x32x4d_bits10)
+
+        HIGHBD_BFP(BLOCK_32X64, vpx_highbd_sad32x64_bits10,
+                   vpx_highbd_sad32x64_avg_bits10, vpx_highbd_10_variance32x64,
+                   vpx_highbd_10_sub_pixel_variance32x64,
+                   vpx_highbd_10_sub_pixel_avg_variance32x64, NULL, NULL,
+                   vpx_highbd_sad32x64x4d_bits10)
+
+        HIGHBD_BFP(BLOCK_32X32, vpx_highbd_sad32x32_bits10,
+                   vpx_highbd_sad32x32_avg_bits10, vpx_highbd_10_variance32x32,
+                   vpx_highbd_10_sub_pixel_variance32x32,
+                   vpx_highbd_10_sub_pixel_avg_variance32x32,
+                   vpx_highbd_sad32x32x3_bits10, vpx_highbd_sad32x32x8_bits10,
+                   vpx_highbd_sad32x32x4d_bits10)
+
+        HIGHBD_BFP(BLOCK_64X64, vpx_highbd_sad64x64_bits10,
+                   vpx_highbd_sad64x64_avg_bits10, vpx_highbd_10_variance64x64,
+                   vpx_highbd_10_sub_pixel_variance64x64,
+                   vpx_highbd_10_sub_pixel_avg_variance64x64,
+                   vpx_highbd_sad64x64x3_bits10, vpx_highbd_sad64x64x8_bits10,
+                   vpx_highbd_sad64x64x4d_bits10)
+
+        HIGHBD_BFP(BLOCK_16X16, vpx_highbd_sad16x16_bits10,
+                   vpx_highbd_sad16x16_avg_bits10, vpx_highbd_10_variance16x16,
+                   vpx_highbd_10_sub_pixel_variance16x16,
+                   vpx_highbd_10_sub_pixel_avg_variance16x16,
+                   vpx_highbd_sad16x16x3_bits10, vpx_highbd_sad16x16x8_bits10,
+                   vpx_highbd_sad16x16x4d_bits10)
+
+        HIGHBD_BFP(BLOCK_16X8, vpx_highbd_sad16x8_bits10,
+                   vpx_highbd_sad16x8_avg_bits10, vpx_highbd_10_variance16x8,
+                   vpx_highbd_10_sub_pixel_variance16x8,
+                   vpx_highbd_10_sub_pixel_avg_variance16x8,
+                   vpx_highbd_sad16x8x3_bits10, vpx_highbd_sad16x8x8_bits10,
+                   vpx_highbd_sad16x8x4d_bits10)
+
+        HIGHBD_BFP(BLOCK_8X16, vpx_highbd_sad8x16_bits10,
+                   vpx_highbd_sad8x16_avg_bits10, vpx_highbd_10_variance8x16,
+                   vpx_highbd_10_sub_pixel_variance8x16,
+                   vpx_highbd_10_sub_pixel_avg_variance8x16,
+                   vpx_highbd_sad8x16x3_bits10, vpx_highbd_sad8x16x8_bits10,
+                   vpx_highbd_sad8x16x4d_bits10)
+
+        HIGHBD_BFP(
+            BLOCK_8X8, vpx_highbd_sad8x8_bits10, vpx_highbd_sad8x8_avg_bits10,
+            vpx_highbd_10_variance8x8, vpx_highbd_10_sub_pixel_variance8x8,
+            vpx_highbd_10_sub_pixel_avg_variance8x8, vpx_highbd_sad8x8x3_bits10,
+            vpx_highbd_sad8x8x8_bits10, vpx_highbd_sad8x8x4d_bits10)
+
+        HIGHBD_BFP(BLOCK_8X4, vpx_highbd_sad8x4_bits10,
+                   vpx_highbd_sad8x4_avg_bits10, vpx_highbd_10_variance8x4,
+                   vpx_highbd_10_sub_pixel_variance8x4,
+                   vpx_highbd_10_sub_pixel_avg_variance8x4, NULL,
+                   vpx_highbd_sad8x4x8_bits10, vpx_highbd_sad8x4x4d_bits10)
+
+        HIGHBD_BFP(BLOCK_4X8, vpx_highbd_sad4x8_bits10,
+                   vpx_highbd_sad4x8_avg_bits10, vpx_highbd_10_variance4x8,
+                   vpx_highbd_10_sub_pixel_variance4x8,
+                   vpx_highbd_10_sub_pixel_avg_variance4x8, NULL,
+                   vpx_highbd_sad4x8x8_bits10, vpx_highbd_sad4x8x4d_bits10)
+
+        HIGHBD_BFP(
+            BLOCK_4X4, vpx_highbd_sad4x4_bits10, vpx_highbd_sad4x4_avg_bits10,
+            vpx_highbd_10_variance4x4, vpx_highbd_10_sub_pixel_variance4x4,
+            vpx_highbd_10_sub_pixel_avg_variance4x4, vpx_highbd_sad4x4x3_bits10,
+            vpx_highbd_sad4x4x8_bits10, vpx_highbd_sad4x4x4d_bits10)
+
+#if CONFIG_EXT_PARTITION
+        HIGHBD_BFP(
+            BLOCK_128X128, vpx_highbd_sad128x128_bits10,
+            vpx_highbd_sad128x128_avg_bits10, vpx_highbd_10_variance128x128,
+            vpx_highbd_10_sub_pixel_variance128x128,
+            vpx_highbd_10_sub_pixel_avg_variance128x128,
+            vpx_highbd_sad128x128x3_bits10, vpx_highbd_sad128x128x8_bits10,
+            vpx_highbd_sad128x128x4d_bits10)
+
+        HIGHBD_BFP(BLOCK_128X64, vpx_highbd_sad128x64_bits10,
+                   vpx_highbd_sad128x64_avg_bits10,
+                   vpx_highbd_10_variance128x64,
+                   vpx_highbd_10_sub_pixel_variance128x64,
+                   vpx_highbd_10_sub_pixel_avg_variance128x64, NULL, NULL,
+                   vpx_highbd_sad128x64x4d_bits10)
+
+        HIGHBD_BFP(BLOCK_64X128, vpx_highbd_sad64x128_bits10,
+                   vpx_highbd_sad64x128_avg_bits10,
+                   vpx_highbd_10_variance64x128,
+                   vpx_highbd_10_sub_pixel_variance64x128,
+                   vpx_highbd_10_sub_pixel_avg_variance64x128, NULL, NULL,
+                   vpx_highbd_sad64x128x4d_bits10)
+#endif  // CONFIG_EXT_PARTITION
+
+#if CONFIG_EXT_INTER
+#if CONFIG_EXT_PARTITION
+        HIGHBD_MBFP(BLOCK_128X128, vpx_highbd_masked_sad128x128_bits10,
+                    vpx_highbd_10_masked_variance128x128,
+                    vpx_highbd_10_masked_sub_pixel_variance128x128)
+        HIGHBD_MBFP(BLOCK_128X64, vpx_highbd_masked_sad128x64_bits10,
+                    vpx_highbd_10_masked_variance128x64,
+                    vpx_highbd_10_masked_sub_pixel_variance128x64)
+        HIGHBD_MBFP(BLOCK_64X128, vpx_highbd_masked_sad64x128_bits10,
+                    vpx_highbd_10_masked_variance64x128,
+                    vpx_highbd_10_masked_sub_pixel_variance64x128)
+#endif  // CONFIG_EXT_PARTITION
+        HIGHBD_MBFP(BLOCK_64X64, vpx_highbd_masked_sad64x64_bits10,
+                    vpx_highbd_10_masked_variance64x64,
+                    vpx_highbd_10_masked_sub_pixel_variance64x64)
+        HIGHBD_MBFP(BLOCK_64X32, vpx_highbd_masked_sad64x32_bits10,
+                    vpx_highbd_10_masked_variance64x32,
+                    vpx_highbd_10_masked_sub_pixel_variance64x32)
+        HIGHBD_MBFP(BLOCK_32X64, vpx_highbd_masked_sad32x64_bits10,
+                    vpx_highbd_10_masked_variance32x64,
+                    vpx_highbd_10_masked_sub_pixel_variance32x64)
+        HIGHBD_MBFP(BLOCK_32X32, vpx_highbd_masked_sad32x32_bits10,
+                    vpx_highbd_10_masked_variance32x32,
+                    vpx_highbd_10_masked_sub_pixel_variance32x32)
+        HIGHBD_MBFP(BLOCK_32X16, vpx_highbd_masked_sad32x16_bits10,
+                    vpx_highbd_10_masked_variance32x16,
+                    vpx_highbd_10_masked_sub_pixel_variance32x16)
+        HIGHBD_MBFP(BLOCK_16X32, vpx_highbd_masked_sad16x32_bits10,
+                    vpx_highbd_10_masked_variance16x32,
+                    vpx_highbd_10_masked_sub_pixel_variance16x32)
+        HIGHBD_MBFP(BLOCK_16X16, vpx_highbd_masked_sad16x16_bits10,
+                    vpx_highbd_10_masked_variance16x16,
+                    vpx_highbd_10_masked_sub_pixel_variance16x16)
+        HIGHBD_MBFP(BLOCK_8X16, vpx_highbd_masked_sad8x16_bits10,
+                    vpx_highbd_10_masked_variance8x16,
+                    vpx_highbd_10_masked_sub_pixel_variance8x16)
+        HIGHBD_MBFP(BLOCK_16X8, vpx_highbd_masked_sad16x8_bits10,
+                    vpx_highbd_10_masked_variance16x8,
+                    vpx_highbd_10_masked_sub_pixel_variance16x8)
+        HIGHBD_MBFP(BLOCK_8X8, vpx_highbd_masked_sad8x8_bits10,
+                    vpx_highbd_10_masked_variance8x8,
+                    vpx_highbd_10_masked_sub_pixel_variance8x8)
+        HIGHBD_MBFP(BLOCK_4X8, vpx_highbd_masked_sad4x8_bits10,
+                    vpx_highbd_10_masked_variance4x8,
+                    vpx_highbd_10_masked_sub_pixel_variance4x8)
+        HIGHBD_MBFP(BLOCK_8X4, vpx_highbd_masked_sad8x4_bits10,
+                    vpx_highbd_10_masked_variance8x4,
+                    vpx_highbd_10_masked_sub_pixel_variance8x4)
+        HIGHBD_MBFP(BLOCK_4X4, vpx_highbd_masked_sad4x4_bits10,
+                    vpx_highbd_10_masked_variance4x4,
+                    vpx_highbd_10_masked_sub_pixel_variance4x4)
+#endif  // CONFIG_EXT_INTER
+#if CONFIG_OBMC
+#if CONFIG_EXT_PARTITION
+        HIGHBD_OBFP(BLOCK_128X128, vpx_highbd_obmc_sad128x128_bits10,
+                    vpx_highbd_10_obmc_variance128x128,
+                    vpx_highbd_10_obmc_sub_pixel_variance128x128)
+        HIGHBD_OBFP(BLOCK_128X64, vpx_highbd_obmc_sad128x64_bits10,
+                    vpx_highbd_10_obmc_variance128x64,
+                    vpx_highbd_10_obmc_sub_pixel_variance128x64)
+        HIGHBD_OBFP(BLOCK_64X128, vpx_highbd_obmc_sad64x128_bits10,
+                    vpx_highbd_10_obmc_variance64x128,
+                    vpx_highbd_10_obmc_sub_pixel_variance64x128)
+#endif  // CONFIG_EXT_PARTITION
+        HIGHBD_OBFP(BLOCK_64X64, vpx_highbd_obmc_sad64x64_bits10,
+                    vpx_highbd_10_obmc_variance64x64,
+                    vpx_highbd_10_obmc_sub_pixel_variance64x64)
+        HIGHBD_OBFP(BLOCK_64X32, vpx_highbd_obmc_sad64x32_bits10,
+                    vpx_highbd_10_obmc_variance64x32,
+                    vpx_highbd_10_obmc_sub_pixel_variance64x32)
+        HIGHBD_OBFP(BLOCK_32X64, vpx_highbd_obmc_sad32x64_bits10,
+                    vpx_highbd_10_obmc_variance32x64,
+                    vpx_highbd_10_obmc_sub_pixel_variance32x64)
+        HIGHBD_OBFP(BLOCK_32X32, vpx_highbd_obmc_sad32x32_bits10,
+                    vpx_highbd_10_obmc_variance32x32,
+                    vpx_highbd_10_obmc_sub_pixel_variance32x32)
+        HIGHBD_OBFP(BLOCK_32X16, vpx_highbd_obmc_sad32x16_bits10,
+                    vpx_highbd_10_obmc_variance32x16,
+                    vpx_highbd_10_obmc_sub_pixel_variance32x16)
+        HIGHBD_OBFP(BLOCK_16X32, vpx_highbd_obmc_sad16x32_bits10,
+                    vpx_highbd_10_obmc_variance16x32,
+                    vpx_highbd_10_obmc_sub_pixel_variance16x32)
+        HIGHBD_OBFP(BLOCK_16X16, vpx_highbd_obmc_sad16x16_bits10,
+                    vpx_highbd_10_obmc_variance16x16,
+                    vpx_highbd_10_obmc_sub_pixel_variance16x16)
+        HIGHBD_OBFP(BLOCK_8X16, vpx_highbd_obmc_sad8x16_bits10,
+                    vpx_highbd_10_obmc_variance8x16,
+                    vpx_highbd_10_obmc_sub_pixel_variance8x16)
+        HIGHBD_OBFP(BLOCK_16X8, vpx_highbd_obmc_sad16x8_bits10,
+                    vpx_highbd_10_obmc_variance16x8,
+                    vpx_highbd_10_obmc_sub_pixel_variance16x8)
+        HIGHBD_OBFP(BLOCK_8X8, vpx_highbd_obmc_sad8x8_bits10,
+                    vpx_highbd_10_obmc_variance8x8,
+                    vpx_highbd_10_obmc_sub_pixel_variance8x8)
+        HIGHBD_OBFP(BLOCK_4X8, vpx_highbd_obmc_sad4x8_bits10,
+                    vpx_highbd_10_obmc_variance4x8,
+                    vpx_highbd_10_obmc_sub_pixel_variance4x8)
+        HIGHBD_OBFP(BLOCK_8X4, vpx_highbd_obmc_sad8x4_bits10,
+                    vpx_highbd_10_obmc_variance8x4,
+                    vpx_highbd_10_obmc_sub_pixel_variance8x4)
+        HIGHBD_OBFP(BLOCK_4X4, vpx_highbd_obmc_sad4x4_bits10,
+                    vpx_highbd_10_obmc_variance4x4,
+                    vpx_highbd_10_obmc_sub_pixel_variance4x4)
+#endif  // CONFIG_OBMC
+        break;
+
+      case VPX_BITS_12:
+        HIGHBD_BFP(BLOCK_32X16, vpx_highbd_sad32x16_bits12,
+                   vpx_highbd_sad32x16_avg_bits12, vpx_highbd_12_variance32x16,
+                   vpx_highbd_12_sub_pixel_variance32x16,
+                   vpx_highbd_12_sub_pixel_avg_variance32x16, NULL, NULL,
+                   vpx_highbd_sad32x16x4d_bits12)
+
+        HIGHBD_BFP(BLOCK_16X32, vpx_highbd_sad16x32_bits12,
+                   vpx_highbd_sad16x32_avg_bits12, vpx_highbd_12_variance16x32,
+                   vpx_highbd_12_sub_pixel_variance16x32,
+                   vpx_highbd_12_sub_pixel_avg_variance16x32, NULL, NULL,
+                   vpx_highbd_sad16x32x4d_bits12)
+
+        HIGHBD_BFP(BLOCK_64X32, vpx_highbd_sad64x32_bits12,
+                   vpx_highbd_sad64x32_avg_bits12, vpx_highbd_12_variance64x32,
+                   vpx_highbd_12_sub_pixel_variance64x32,
+                   vpx_highbd_12_sub_pixel_avg_variance64x32, NULL, NULL,
+                   vpx_highbd_sad64x32x4d_bits12)
+
+        HIGHBD_BFP(BLOCK_32X64, vpx_highbd_sad32x64_bits12,
+                   vpx_highbd_sad32x64_avg_bits12, vpx_highbd_12_variance32x64,
+                   vpx_highbd_12_sub_pixel_variance32x64,
+                   vpx_highbd_12_sub_pixel_avg_variance32x64, NULL, NULL,
+                   vpx_highbd_sad32x64x4d_bits12)
+
+        HIGHBD_BFP(BLOCK_32X32, vpx_highbd_sad32x32_bits12,
+                   vpx_highbd_sad32x32_avg_bits12, vpx_highbd_12_variance32x32,
+                   vpx_highbd_12_sub_pixel_variance32x32,
+                   vpx_highbd_12_sub_pixel_avg_variance32x32,
+                   vpx_highbd_sad32x32x3_bits12, vpx_highbd_sad32x32x8_bits12,
+                   vpx_highbd_sad32x32x4d_bits12)
+
+        HIGHBD_BFP(BLOCK_64X64, vpx_highbd_sad64x64_bits12,
+                   vpx_highbd_sad64x64_avg_bits12, vpx_highbd_12_variance64x64,
+                   vpx_highbd_12_sub_pixel_variance64x64,
+                   vpx_highbd_12_sub_pixel_avg_variance64x64,
+                   vpx_highbd_sad64x64x3_bits12, vpx_highbd_sad64x64x8_bits12,
+                   vpx_highbd_sad64x64x4d_bits12)
+
+        HIGHBD_BFP(BLOCK_16X16, vpx_highbd_sad16x16_bits12,
+                   vpx_highbd_sad16x16_avg_bits12, vpx_highbd_12_variance16x16,
+                   vpx_highbd_12_sub_pixel_variance16x16,
+                   vpx_highbd_12_sub_pixel_avg_variance16x16,
+                   vpx_highbd_sad16x16x3_bits12, vpx_highbd_sad16x16x8_bits12,
+                   vpx_highbd_sad16x16x4d_bits12)
+
+        HIGHBD_BFP(BLOCK_16X8, vpx_highbd_sad16x8_bits12,
+                   vpx_highbd_sad16x8_avg_bits12, vpx_highbd_12_variance16x8,
+                   vpx_highbd_12_sub_pixel_variance16x8,
+                   vpx_highbd_12_sub_pixel_avg_variance16x8,
+                   vpx_highbd_sad16x8x3_bits12, vpx_highbd_sad16x8x8_bits12,
+                   vpx_highbd_sad16x8x4d_bits12)
+
+        HIGHBD_BFP(BLOCK_8X16, vpx_highbd_sad8x16_bits12,
+                   vpx_highbd_sad8x16_avg_bits12, vpx_highbd_12_variance8x16,
+                   vpx_highbd_12_sub_pixel_variance8x16,
+                   vpx_highbd_12_sub_pixel_avg_variance8x16,
+                   vpx_highbd_sad8x16x3_bits12, vpx_highbd_sad8x16x8_bits12,
+                   vpx_highbd_sad8x16x4d_bits12)
+
+        HIGHBD_BFP(
+            BLOCK_8X8, vpx_highbd_sad8x8_bits12, vpx_highbd_sad8x8_avg_bits12,
+            vpx_highbd_12_variance8x8, vpx_highbd_12_sub_pixel_variance8x8,
+            vpx_highbd_12_sub_pixel_avg_variance8x8, vpx_highbd_sad8x8x3_bits12,
+            vpx_highbd_sad8x8x8_bits12, vpx_highbd_sad8x8x4d_bits12)
+
+        HIGHBD_BFP(BLOCK_8X4, vpx_highbd_sad8x4_bits12,
+                   vpx_highbd_sad8x4_avg_bits12, vpx_highbd_12_variance8x4,
+                   vpx_highbd_12_sub_pixel_variance8x4,
+                   vpx_highbd_12_sub_pixel_avg_variance8x4, NULL,
+                   vpx_highbd_sad8x4x8_bits12, vpx_highbd_sad8x4x4d_bits12)
+
+        HIGHBD_BFP(BLOCK_4X8, vpx_highbd_sad4x8_bits12,
+                   vpx_highbd_sad4x8_avg_bits12, vpx_highbd_12_variance4x8,
+                   vpx_highbd_12_sub_pixel_variance4x8,
+                   vpx_highbd_12_sub_pixel_avg_variance4x8, NULL,
+                   vpx_highbd_sad4x8x8_bits12, vpx_highbd_sad4x8x4d_bits12)
+
+        HIGHBD_BFP(
+            BLOCK_4X4, vpx_highbd_sad4x4_bits12, vpx_highbd_sad4x4_avg_bits12,
+            vpx_highbd_12_variance4x4, vpx_highbd_12_sub_pixel_variance4x4,
+            vpx_highbd_12_sub_pixel_avg_variance4x4, vpx_highbd_sad4x4x3_bits12,
+            vpx_highbd_sad4x4x8_bits12, vpx_highbd_sad4x4x4d_bits12)
+
+#if CONFIG_EXT_PARTITION
+        HIGHBD_BFP(
+            BLOCK_128X128, vpx_highbd_sad128x128_bits12,
+            vpx_highbd_sad128x128_avg_bits12, vpx_highbd_12_variance128x128,
+            vpx_highbd_12_sub_pixel_variance128x128,
+            vpx_highbd_12_sub_pixel_avg_variance128x128,
+            vpx_highbd_sad128x128x3_bits12, vpx_highbd_sad128x128x8_bits12,
+            vpx_highbd_sad128x128x4d_bits12)
+
+        HIGHBD_BFP(BLOCK_128X64, vpx_highbd_sad128x64_bits12,
+                   vpx_highbd_sad128x64_avg_bits12,
+                   vpx_highbd_12_variance128x64,
+                   vpx_highbd_12_sub_pixel_variance128x64,
+                   vpx_highbd_12_sub_pixel_avg_variance128x64, NULL, NULL,
+                   vpx_highbd_sad128x64x4d_bits12)
+
+        HIGHBD_BFP(BLOCK_64X128, vpx_highbd_sad64x128_bits12,
+                   vpx_highbd_sad64x128_avg_bits12,
+                   vpx_highbd_12_variance64x128,
+                   vpx_highbd_12_sub_pixel_variance64x128,
+                   vpx_highbd_12_sub_pixel_avg_variance64x128, NULL, NULL,
+                   vpx_highbd_sad64x128x4d_bits12)
+#endif  // CONFIG_EXT_PARTITION
+
+#if CONFIG_EXT_INTER
+#if CONFIG_EXT_PARTITION
+        HIGHBD_MBFP(BLOCK_128X128, vpx_highbd_masked_sad128x128_bits12,
+                    vpx_highbd_12_masked_variance128x128,
+                    vpx_highbd_12_masked_sub_pixel_variance128x128)
+        HIGHBD_MBFP(BLOCK_128X64, vpx_highbd_masked_sad128x64_bits12,
+                    vpx_highbd_12_masked_variance128x64,
+                    vpx_highbd_12_masked_sub_pixel_variance128x64)
+        HIGHBD_MBFP(BLOCK_64X128, vpx_highbd_masked_sad64x128_bits12,
+                    vpx_highbd_12_masked_variance64x128,
+                    vpx_highbd_12_masked_sub_pixel_variance64x128)
+#endif  // CONFIG_EXT_PARTITION
+        HIGHBD_MBFP(BLOCK_64X64, vpx_highbd_masked_sad64x64_bits12,
+                    vpx_highbd_12_masked_variance64x64,
+                    vpx_highbd_12_masked_sub_pixel_variance64x64)
+        HIGHBD_MBFP(BLOCK_64X32, vpx_highbd_masked_sad64x32_bits12,
+                    vpx_highbd_12_masked_variance64x32,
+                    vpx_highbd_12_masked_sub_pixel_variance64x32)
+        HIGHBD_MBFP(BLOCK_32X64, vpx_highbd_masked_sad32x64_bits12,
+                    vpx_highbd_12_masked_variance32x64,
+                    vpx_highbd_12_masked_sub_pixel_variance32x64)
+        HIGHBD_MBFP(BLOCK_32X32, vpx_highbd_masked_sad32x32_bits12,
+                    vpx_highbd_12_masked_variance32x32,
+                    vpx_highbd_12_masked_sub_pixel_variance32x32)
+        HIGHBD_MBFP(BLOCK_32X16, vpx_highbd_masked_sad32x16_bits12,
+                    vpx_highbd_12_masked_variance32x16,
+                    vpx_highbd_12_masked_sub_pixel_variance32x16)
+        HIGHBD_MBFP(BLOCK_16X32, vpx_highbd_masked_sad16x32_bits12,
+                    vpx_highbd_12_masked_variance16x32,
+                    vpx_highbd_12_masked_sub_pixel_variance16x32)
+        HIGHBD_MBFP(BLOCK_16X16, vpx_highbd_masked_sad16x16_bits12,
+                    vpx_highbd_12_masked_variance16x16,
+                    vpx_highbd_12_masked_sub_pixel_variance16x16)
+        HIGHBD_MBFP(BLOCK_8X16, vpx_highbd_masked_sad8x16_bits12,
+                    vpx_highbd_12_masked_variance8x16,
+                    vpx_highbd_12_masked_sub_pixel_variance8x16)
+        HIGHBD_MBFP(BLOCK_16X8, vpx_highbd_masked_sad16x8_bits12,
+                    vpx_highbd_12_masked_variance16x8,
+                    vpx_highbd_12_masked_sub_pixel_variance16x8)
+        HIGHBD_MBFP(BLOCK_8X8, vpx_highbd_masked_sad8x8_bits12,
+                    vpx_highbd_12_masked_variance8x8,
+                    vpx_highbd_12_masked_sub_pixel_variance8x8)
+        HIGHBD_MBFP(BLOCK_4X8, vpx_highbd_masked_sad4x8_bits12,
+                    vpx_highbd_12_masked_variance4x8,
+                    vpx_highbd_12_masked_sub_pixel_variance4x8)
+        HIGHBD_MBFP(BLOCK_8X4, vpx_highbd_masked_sad8x4_bits12,
+                    vpx_highbd_12_masked_variance8x4,
+                    vpx_highbd_12_masked_sub_pixel_variance8x4)
+        HIGHBD_MBFP(BLOCK_4X4, vpx_highbd_masked_sad4x4_bits12,
+                    vpx_highbd_12_masked_variance4x4,
+                    vpx_highbd_12_masked_sub_pixel_variance4x4)
+#endif  // CONFIG_EXT_INTER
+
+#if CONFIG_OBMC
+#if CONFIG_EXT_PARTITION
+        HIGHBD_OBFP(BLOCK_128X128, vpx_highbd_obmc_sad128x128_bits12,
+                    vpx_highbd_12_obmc_variance128x128,
+                    vpx_highbd_12_obmc_sub_pixel_variance128x128)
+        HIGHBD_OBFP(BLOCK_128X64, vpx_highbd_obmc_sad128x64_bits12,
+                    vpx_highbd_12_obmc_variance128x64,
+                    vpx_highbd_12_obmc_sub_pixel_variance128x64)
+        HIGHBD_OBFP(BLOCK_64X128, vpx_highbd_obmc_sad64x128_bits12,
+                    vpx_highbd_12_obmc_variance64x128,
+                    vpx_highbd_12_obmc_sub_pixel_variance64x128)
+#endif  // CONFIG_EXT_PARTITION
+        HIGHBD_OBFP(BLOCK_64X64, vpx_highbd_obmc_sad64x64_bits12,
+                    vpx_highbd_12_obmc_variance64x64,
+                    vpx_highbd_12_obmc_sub_pixel_variance64x64)
+        HIGHBD_OBFP(BLOCK_64X32, vpx_highbd_obmc_sad64x32_bits12,
+                    vpx_highbd_12_obmc_variance64x32,
+                    vpx_highbd_12_obmc_sub_pixel_variance64x32)
+        HIGHBD_OBFP(BLOCK_32X64, vpx_highbd_obmc_sad32x64_bits12,
+                    vpx_highbd_12_obmc_variance32x64,
+                    vpx_highbd_12_obmc_sub_pixel_variance32x64)
+        HIGHBD_OBFP(BLOCK_32X32, vpx_highbd_obmc_sad32x32_bits12,
+                    vpx_highbd_12_obmc_variance32x32,
+                    vpx_highbd_12_obmc_sub_pixel_variance32x32)
+        HIGHBD_OBFP(BLOCK_32X16, vpx_highbd_obmc_sad32x16_bits12,
+                    vpx_highbd_12_obmc_variance32x16,
+                    vpx_highbd_12_obmc_sub_pixel_variance32x16)
+        HIGHBD_OBFP(BLOCK_16X32, vpx_highbd_obmc_sad16x32_bits12,
+                    vpx_highbd_12_obmc_variance16x32,
+                    vpx_highbd_12_obmc_sub_pixel_variance16x32)
+        HIGHBD_OBFP(BLOCK_16X16, vpx_highbd_obmc_sad16x16_bits12,
+                    vpx_highbd_12_obmc_variance16x16,
+                    vpx_highbd_12_obmc_sub_pixel_variance16x16)
+        HIGHBD_OBFP(BLOCK_8X16, vpx_highbd_obmc_sad8x16_bits12,
+                    vpx_highbd_12_obmc_variance8x16,
+                    vpx_highbd_12_obmc_sub_pixel_variance8x16)
+        HIGHBD_OBFP(BLOCK_16X8, vpx_highbd_obmc_sad16x8_bits12,
+                    vpx_highbd_12_obmc_variance16x8,
+                    vpx_highbd_12_obmc_sub_pixel_variance16x8)
+        HIGHBD_OBFP(BLOCK_8X8, vpx_highbd_obmc_sad8x8_bits12,
+                    vpx_highbd_12_obmc_variance8x8,
+                    vpx_highbd_12_obmc_sub_pixel_variance8x8)
+        HIGHBD_OBFP(BLOCK_4X8, vpx_highbd_obmc_sad4x8_bits12,
+                    vpx_highbd_12_obmc_variance4x8,
+                    vpx_highbd_12_obmc_sub_pixel_variance4x8)
+        HIGHBD_OBFP(BLOCK_8X4, vpx_highbd_obmc_sad8x4_bits12,
+                    vpx_highbd_12_obmc_variance8x4,
+                    vpx_highbd_12_obmc_sub_pixel_variance8x4)
+        HIGHBD_OBFP(BLOCK_4X4, vpx_highbd_obmc_sad4x4_bits12,
+                    vpx_highbd_12_obmc_variance4x4,
+                    vpx_highbd_12_obmc_sub_pixel_variance4x4)
+#endif  // CONFIG_OBMC
+        break;
+
+      default:
+        assert(0 &&
+               "cm->bit_depth should be VPX_BITS_8, "
+               "VPX_BITS_10 or VPX_BITS_12");
+    }
+  }
+}
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+static void realloc_segmentation_maps(VP10_COMP *cpi) {
+  VP10_COMMON *const cm = &cpi->common;
+
+  // Create the encoder segmentation map and set all entries to 0
+  vpx_free(cpi->segmentation_map);
+  CHECK_MEM_ERROR(cm, cpi->segmentation_map,
+                  vpx_calloc(cm->mi_rows * cm->mi_cols, 1));
+
+  // Create a map used for cyclic background refresh.
+  if (cpi->cyclic_refresh) vp10_cyclic_refresh_free(cpi->cyclic_refresh);
+  CHECK_MEM_ERROR(cm, cpi->cyclic_refresh,
+                  vp10_cyclic_refresh_alloc(cm->mi_rows, cm->mi_cols));
+
+  // Create a map used to mark inactive areas.
+  vpx_free(cpi->active_map.map);
+  CHECK_MEM_ERROR(cm, cpi->active_map.map,
+                  vpx_calloc(cm->mi_rows * cm->mi_cols, 1));
+
+  // And a place holder structure is the coding context
+  // for use if we want to save and restore it
+  vpx_free(cpi->coding_context.last_frame_seg_map_copy);
+  CHECK_MEM_ERROR(cm, cpi->coding_context.last_frame_seg_map_copy,
+                  vpx_calloc(cm->mi_rows * cm->mi_cols, 1));
+}
+
+void vp10_change_config(struct VP10_COMP *cpi, const VP10EncoderConfig *oxcf) {
+  VP10_COMMON *const cm = &cpi->common;
+  RATE_CONTROL *const rc = &cpi->rc;
+
+  if (cm->profile != oxcf->profile) cm->profile = oxcf->profile;
+  cm->bit_depth = oxcf->bit_depth;
+  cm->color_space = oxcf->color_space;
+  cm->color_range = oxcf->color_range;
+
+  if (cm->profile <= PROFILE_1)
+    assert(cm->bit_depth == VPX_BITS_8);
+  else
+    assert(cm->bit_depth > VPX_BITS_8);
+
+  cpi->oxcf = *oxcf;
+#if CONFIG_VP9_HIGHBITDEPTH
+  cpi->td.mb.e_mbd.bd = (int)cm->bit_depth;
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_GLOBAL_MOTION
+  cpi->td.mb.e_mbd.global_motion = cm->global_motion;
+#endif  // CONFIG_GLOBAL_MOTION
+
+  if ((oxcf->pass == 0) && (oxcf->rc_mode == VPX_Q)) {
+    rc->baseline_gf_interval = FIXED_GF_INTERVAL;
+  } else {
+    rc->baseline_gf_interval = (MIN_GF_INTERVAL + MAX_GF_INTERVAL) / 2;
+  }
+
+  cpi->refresh_last_frame = 1;
+  cpi->refresh_golden_frame = 0;
+#if CONFIG_EXT_REFS
+  cpi->refresh_bwd_ref_frame = 0;
+#endif  // CONFIG_EXT_REFS
+
+  cm->refresh_frame_context =
+      (oxcf->error_resilient_mode || oxcf->frame_parallel_decoding_mode)
+          ? REFRESH_FRAME_CONTEXT_FORWARD
+          : REFRESH_FRAME_CONTEXT_BACKWARD;
+  cm->reset_frame_context = RESET_FRAME_CONTEXT_NONE;
+
+  cm->allow_screen_content_tools = (cpi->oxcf.content == VPX_CONTENT_SCREEN);
+  if (cm->allow_screen_content_tools) {
+    MACROBLOCK *x = &cpi->td.mb;
+    if (x->palette_buffer == 0) {
+      CHECK_MEM_ERROR(cm, x->palette_buffer,
+                      vpx_memalign(16, sizeof(*x->palette_buffer)));
+    }
+    // Reallocate the pc_tree, as it's contents depends on
+    // the state of cm->allow_screen_content_tools
+    vp10_free_pc_tree(&cpi->td);
+    vp10_setup_pc_tree(&cpi->common, &cpi->td);
+  }
+
+  vp10_reset_segment_features(cm);
+  vp10_set_high_precision_mv(cpi, 0);
+
+  {
+    int i;
+
+    for (i = 0; i < MAX_SEGMENTS; i++)
+      cpi->segment_encode_breakout[i] = cpi->oxcf.encode_breakout;
+  }
+  cpi->encode_breakout = cpi->oxcf.encode_breakout;
+
+  set_rc_buffer_sizes(rc, &cpi->oxcf);
+
+  // Under a configuration change, where maximum_buffer_size may change,
+  // keep buffer level clipped to the maximum allowed buffer size.
+  rc->bits_off_target = VPXMIN(rc->bits_off_target, rc->maximum_buffer_size);
+  rc->buffer_level = VPXMIN(rc->buffer_level, rc->maximum_buffer_size);
+
+  // Set up frame rate and related parameters rate control values.
+  vp10_new_framerate(cpi, cpi->framerate);
+
+  // Set absolute upper and lower quality limits
+  rc->worst_quality = cpi->oxcf.worst_allowed_q;
+  rc->best_quality = cpi->oxcf.best_allowed_q;
+
+  cm->interp_filter = cpi->sf.default_interp_filter;
+
+  if (cpi->oxcf.render_width > 0 && cpi->oxcf.render_height > 0) {
+    cm->render_width = cpi->oxcf.render_width;
+    cm->render_height = cpi->oxcf.render_height;
+  } else {
+    cm->render_width = cpi->oxcf.width;
+    cm->render_height = cpi->oxcf.height;
+  }
+  cm->width = cpi->oxcf.width;
+  cm->height = cpi->oxcf.height;
+
+  if (cpi->initial_width) {
+    if (cm->width > cpi->initial_width || cm->height > cpi->initial_height) {
+      vp10_free_context_buffers(cm);
+      vp10_alloc_compressor_data(cpi);
+      realloc_segmentation_maps(cpi);
+      cpi->initial_width = cpi->initial_height = 0;
+    }
+  }
+  update_frame_size(cpi);
+
+  cpi->alt_ref_source = NULL;
+  rc->is_src_frame_alt_ref = 0;
+
+#if CONFIG_EXT_REFS
+  rc->is_bwd_ref_frame = 0;
+  rc->is_last_bipred_frame = 0;
+  rc->is_bipred_frame = 0;
+#endif  // CONFIG_EXT_REFS
+
+#if 0
+  // Experimental RD Code
+  cpi->frame_distortion = 0;
+  cpi->last_frame_distortion = 0;
+#endif
+
+  set_tile_info(cpi);
+
+  cpi->ext_refresh_frame_flags_pending = 0;
+  cpi->ext_refresh_frame_context_pending = 0;
+
+#if CONFIG_VP9_HIGHBITDEPTH
+  highbd_set_var_fns(cpi);
+#endif
+}
+
+#ifndef M_LOG2_E
+#define M_LOG2_E 0.693147180559945309417
+#endif
+#define log2f(x) (log(x) / (float)M_LOG2_E)
+
+#if !CONFIG_REF_MV
+static void cal_nmvjointsadcost(int *mvjointsadcost) {
+  mvjointsadcost[0] = 600;
+  mvjointsadcost[1] = 300;
+  mvjointsadcost[2] = 300;
+  mvjointsadcost[3] = 300;
+}
+#endif
+
+static void cal_nmvsadcosts(int *mvsadcost[2]) {
+  int i = 1;
+
+  mvsadcost[0][0] = 0;
+  mvsadcost[1][0] = 0;
+
+  do {
+    double z = 256 * (2 * (log2f(8 * i) + .6));
+    mvsadcost[0][i] = (int)z;
+    mvsadcost[1][i] = (int)z;
+    mvsadcost[0][-i] = (int)z;
+    mvsadcost[1][-i] = (int)z;
+  } while (++i <= MV_MAX);
+}
+
+static void cal_nmvsadcosts_hp(int *mvsadcost[2]) {
+  int i = 1;
+
+  mvsadcost[0][0] = 0;
+  mvsadcost[1][0] = 0;
+
+  do {
+    double z = 256 * (2 * (log2f(8 * i) + .6));
+    mvsadcost[0][i] = (int)z;
+    mvsadcost[1][i] = (int)z;
+    mvsadcost[0][-i] = (int)z;
+    mvsadcost[1][-i] = (int)z;
+  } while (++i <= MV_MAX);
+}
+
+static INLINE void init_upsampled_ref_frame_bufs(VP10_COMP *cpi) {
+  int i;
+
+  for (i = 0; i < (REF_FRAMES + 1); ++i) {
+    cpi->upsampled_ref_bufs[i].ref_count = 0;
+    cpi->upsampled_ref_idx[i] = INVALID_IDX;
+  }
+}
+
+VP10_COMP *vp10_create_compressor(VP10EncoderConfig *oxcf,
+                                  BufferPool *const pool) {
+  unsigned int i;
+  VP10_COMP *volatile const cpi = vpx_memalign(32, sizeof(VP10_COMP));
+  VP10_COMMON *volatile const cm = cpi != NULL ? &cpi->common : NULL;
+
+  if (!cm) return NULL;
+
+  vp10_zero(*cpi);
+
+  if (setjmp(cm->error.jmp)) {
+    cm->error.setjmp = 0;
+    vp10_remove_compressor(cpi);
+    return 0;
+  }
+
+  cm->error.setjmp = 1;
+  cm->alloc_mi = vp10_enc_alloc_mi;
+  cm->free_mi = vp10_enc_free_mi;
+  cm->setup_mi = vp10_enc_setup_mi;
+
+  CHECK_MEM_ERROR(cm, cm->fc, (FRAME_CONTEXT *)vpx_calloc(1, sizeof(*cm->fc)));
+  CHECK_MEM_ERROR(
+      cm, cm->frame_contexts,
+      (FRAME_CONTEXT *)vpx_calloc(FRAME_CONTEXTS, sizeof(*cm->frame_contexts)));
+
+  cpi->resize_state = 0;
+  cpi->resize_avg_qp = 0;
+  cpi->resize_buffer_underflow = 0;
+  cpi->common.buffer_pool = pool;
+
+  init_config(cpi, oxcf);
+  vp10_rc_init(&cpi->oxcf, oxcf->pass, &cpi->rc);
+
+  cm->current_video_frame = 0;
+  cpi->partition_search_skippable_frame = 0;
+  cpi->tile_data = NULL;
+  cpi->last_show_frame_buf_idx = INVALID_IDX;
+
+  realloc_segmentation_maps(cpi);
+
+#if CONFIG_REF_MV
+  for (i = 0; i < NMV_CONTEXTS; ++i) {
+    CHECK_MEM_ERROR(cm, cpi->nmv_costs[i][0],
+                    vpx_calloc(MV_VALS, sizeof(*cpi->nmv_costs[i][0])));
+    CHECK_MEM_ERROR(cm, cpi->nmv_costs[i][1],
+                    vpx_calloc(MV_VALS, sizeof(*cpi->nmv_costs[i][1])));
+    CHECK_MEM_ERROR(cm, cpi->nmv_costs_hp[i][0],
+                    vpx_calloc(MV_VALS, sizeof(*cpi->nmv_costs_hp[i][0])));
+    CHECK_MEM_ERROR(cm, cpi->nmv_costs_hp[i][1],
+                    vpx_calloc(MV_VALS, sizeof(*cpi->nmv_costs_hp[i][1])));
+  }
+#endif
+
+  CHECK_MEM_ERROR(cm, cpi->nmvcosts[0],
+                  vpx_calloc(MV_VALS, sizeof(*cpi->nmvcosts[0])));
+  CHECK_MEM_ERROR(cm, cpi->nmvcosts[1],
+                  vpx_calloc(MV_VALS, sizeof(*cpi->nmvcosts[1])));
+  CHECK_MEM_ERROR(cm, cpi->nmvcosts_hp[0],
+                  vpx_calloc(MV_VALS, sizeof(*cpi->nmvcosts_hp[0])));
+  CHECK_MEM_ERROR(cm, cpi->nmvcosts_hp[1],
+                  vpx_calloc(MV_VALS, sizeof(*cpi->nmvcosts_hp[1])));
+  CHECK_MEM_ERROR(cm, cpi->nmvsadcosts[0],
+                  vpx_calloc(MV_VALS, sizeof(*cpi->nmvsadcosts[0])));
+  CHECK_MEM_ERROR(cm, cpi->nmvsadcosts[1],
+                  vpx_calloc(MV_VALS, sizeof(*cpi->nmvsadcosts[1])));
+  CHECK_MEM_ERROR(cm, cpi->nmvsadcosts_hp[0],
+                  vpx_calloc(MV_VALS, sizeof(*cpi->nmvsadcosts_hp[0])));
+  CHECK_MEM_ERROR(cm, cpi->nmvsadcosts_hp[1],
+                  vpx_calloc(MV_VALS, sizeof(*cpi->nmvsadcosts_hp[1])));
+
+  for (i = 0; i < (sizeof(cpi->mbgraph_stats) / sizeof(cpi->mbgraph_stats[0]));
+       i++) {
+    CHECK_MEM_ERROR(
+        cm, cpi->mbgraph_stats[i].mb_stats,
+        vpx_calloc(cm->MBs * sizeof(*cpi->mbgraph_stats[i].mb_stats), 1));
+  }
+
+#if CONFIG_FP_MB_STATS
+  cpi->use_fp_mb_stats = 0;
+  if (cpi->use_fp_mb_stats) {
+    // a place holder used to store the first pass mb stats in the first pass
+    CHECK_MEM_ERROR(cm, cpi->twopass.frame_mb_stats_buf,
+                    vpx_calloc(cm->MBs * sizeof(uint8_t), 1));
+  } else {
+    cpi->twopass.frame_mb_stats_buf = NULL;
+  }
+#endif
+
+  cpi->refresh_alt_ref_frame = 0;
+  cpi->multi_arf_last_grp_enabled = 0;
+
+  cpi->b_calculate_psnr = CONFIG_INTERNAL_STATS;
+#if CONFIG_INTERNAL_STATS
+  cpi->b_calculate_blockiness = 1;
+  cpi->b_calculate_consistency = 1;
+  cpi->total_inconsistency = 0;
+  cpi->psnr.worst = 100.0;
+  cpi->worst_ssim = 100.0;
+
+  cpi->count = 0;
+  cpi->bytes = 0;
+
+  if (cpi->b_calculate_psnr) {
+    cpi->total_sq_error = 0;
+    cpi->total_samples = 0;
+    cpi->tot_recode_hits = 0;
+    cpi->summed_quality = 0;
+    cpi->summed_weights = 0;
+  }
+
+  cpi->fastssim.worst = 100.0;
+  cpi->psnrhvs.worst = 100.0;
+
+  if (cpi->b_calculate_blockiness) {
+    cpi->total_blockiness = 0;
+    cpi->worst_blockiness = 0.0;
+  }
+
+  if (cpi->b_calculate_consistency) {
+    CHECK_MEM_ERROR(cm, cpi->ssim_vars,
+                    vpx_malloc(sizeof(*cpi->ssim_vars) * 4 *
+                               cpi->common.mi_rows * cpi->common.mi_cols));
+    cpi->worst_consistency = 100.0;
+  }
+#endif
+
+  cpi->first_time_stamp_ever = INT64_MAX;
+
+#if CONFIG_REF_MV
+  for (i = 0; i < NMV_CONTEXTS; ++i) {
+    cpi->td.mb.nmvcost[i][0] = &cpi->nmv_costs[i][0][MV_MAX];
+    cpi->td.mb.nmvcost[i][1] = &cpi->nmv_costs[i][1][MV_MAX];
+    cpi->td.mb.nmvcost_hp[i][0] = &cpi->nmv_costs_hp[i][0][MV_MAX];
+    cpi->td.mb.nmvcost_hp[i][1] = &cpi->nmv_costs_hp[i][1][MV_MAX];
+  }
+#else
+  cal_nmvjointsadcost(cpi->td.mb.nmvjointsadcost);
+  cpi->td.mb.nmvcost[0] = &cpi->nmvcosts[0][MV_MAX];
+  cpi->td.mb.nmvcost[1] = &cpi->nmvcosts[1][MV_MAX];
+  cpi->td.mb.nmvcost_hp[0] = &cpi->nmvcosts_hp[0][MV_MAX];
+  cpi->td.mb.nmvcost_hp[1] = &cpi->nmvcosts_hp[1][MV_MAX];
+#endif
+  cpi->td.mb.nmvsadcost[0] = &cpi->nmvsadcosts[0][MV_MAX];
+  cpi->td.mb.nmvsadcost[1] = &cpi->nmvsadcosts[1][MV_MAX];
+  cal_nmvsadcosts(cpi->td.mb.nmvsadcost);
+
+  cpi->td.mb.nmvsadcost_hp[0] = &cpi->nmvsadcosts_hp[0][MV_MAX];
+  cpi->td.mb.nmvsadcost_hp[1] = &cpi->nmvsadcosts_hp[1][MV_MAX];
+  cal_nmvsadcosts_hp(cpi->td.mb.nmvsadcost_hp);
+
+#ifdef OUTPUT_YUV_SKINMAP
+  yuv_skinmap_file = fopen("skinmap.yuv", "ab");
+#endif
+#ifdef OUTPUT_YUV_REC
+  yuv_rec_file = fopen("rec.yuv", "wb");
+#endif
+
+#if 0
+  framepsnr = fopen("framepsnr.stt", "a");
+  kf_list = fopen("kf_list.stt", "w");
+#endif
+
+  cpi->allow_encode_breakout = ENCODE_BREAKOUT_ENABLED;
+
+  if (oxcf->pass == 1) {
+    vp10_init_first_pass(cpi);
+  } else if (oxcf->pass == 2) {
+    const size_t packet_sz = sizeof(FIRSTPASS_STATS);
+    const int packets = (int)(oxcf->two_pass_stats_in.sz / packet_sz);
+
+#if CONFIG_FP_MB_STATS
+    if (cpi->use_fp_mb_stats) {
+      const size_t psz = cpi->common.MBs * sizeof(uint8_t);
+      const int ps = (int)(oxcf->firstpass_mb_stats_in.sz / psz);
+
+      cpi->twopass.firstpass_mb_stats.mb_stats_start =
+          oxcf->firstpass_mb_stats_in.buf;
+      cpi->twopass.firstpass_mb_stats.mb_stats_end =
+          cpi->twopass.firstpass_mb_stats.mb_stats_start +
+          (ps - 1) * cpi->common.MBs * sizeof(uint8_t);
+    }
+#endif
+
+    cpi->twopass.stats_in_start = oxcf->two_pass_stats_in.buf;
+    cpi->twopass.stats_in = cpi->twopass.stats_in_start;
+    cpi->twopass.stats_in_end = &cpi->twopass.stats_in[packets - 1];
+
+    vp10_init_second_pass(cpi);
+  }
+
+  init_upsampled_ref_frame_bufs(cpi);
+
+  vp10_set_speed_features_framesize_independent(cpi);
+  vp10_set_speed_features_framesize_dependent(cpi);
+
+  // Allocate memory to store variances for a frame.
+  CHECK_MEM_ERROR(cm, cpi->source_diff_var, vpx_calloc(cm->MBs, sizeof(diff)));
+  cpi->source_var_thresh = 0;
+  cpi->frames_till_next_var_check = 0;
+
+#define BFP(BT, SDF, SDAF, VF, SVF, SVAF, SDX3F, SDX8F, SDX4DF) \
+  cpi->fn_ptr[BT].sdf = SDF;                                    \
+  cpi->fn_ptr[BT].sdaf = SDAF;                                  \
+  cpi->fn_ptr[BT].vf = VF;                                      \
+  cpi->fn_ptr[BT].svf = SVF;                                    \
+  cpi->fn_ptr[BT].svaf = SVAF;                                  \
+  cpi->fn_ptr[BT].sdx3f = SDX3F;                                \
+  cpi->fn_ptr[BT].sdx8f = SDX8F;                                \
+  cpi->fn_ptr[BT].sdx4df = SDX4DF;
+
+#if CONFIG_EXT_PARTITION
+  BFP(BLOCK_128X128, vpx_sad128x128, vpx_sad128x128_avg, vpx_variance128x128,
+      vpx_sub_pixel_variance128x128, vpx_sub_pixel_avg_variance128x128,
+      vpx_sad128x128x3, vpx_sad128x128x8, vpx_sad128x128x4d)
+
+  BFP(BLOCK_128X64, vpx_sad128x64, vpx_sad128x64_avg, vpx_variance128x64,
+      vpx_sub_pixel_variance128x64, vpx_sub_pixel_avg_variance128x64, NULL,
+      NULL, vpx_sad128x64x4d)
+
+  BFP(BLOCK_64X128, vpx_sad64x128, vpx_sad64x128_avg, vpx_variance64x128,
+      vpx_sub_pixel_variance64x128, vpx_sub_pixel_avg_variance64x128, NULL,
+      NULL, vpx_sad64x128x4d)
+#endif  // CONFIG_EXT_PARTITION
+
+  BFP(BLOCK_32X16, vpx_sad32x16, vpx_sad32x16_avg, vpx_variance32x16,
+      vpx_sub_pixel_variance32x16, vpx_sub_pixel_avg_variance32x16, NULL, NULL,
+      vpx_sad32x16x4d)
+
+  BFP(BLOCK_16X32, vpx_sad16x32, vpx_sad16x32_avg, vpx_variance16x32,
+      vpx_sub_pixel_variance16x32, vpx_sub_pixel_avg_variance16x32, NULL, NULL,
+      vpx_sad16x32x4d)
+
+  BFP(BLOCK_64X32, vpx_sad64x32, vpx_sad64x32_avg, vpx_variance64x32,
+      vpx_sub_pixel_variance64x32, vpx_sub_pixel_avg_variance64x32, NULL, NULL,
+      vpx_sad64x32x4d)
+
+  BFP(BLOCK_32X64, vpx_sad32x64, vpx_sad32x64_avg, vpx_variance32x64,
+      vpx_sub_pixel_variance32x64, vpx_sub_pixel_avg_variance32x64, NULL, NULL,
+      vpx_sad32x64x4d)
+
+  BFP(BLOCK_32X32, vpx_sad32x32, vpx_sad32x32_avg, vpx_variance32x32,
+      vpx_sub_pixel_variance32x32, vpx_sub_pixel_avg_variance32x32,
+      vpx_sad32x32x3, vpx_sad32x32x8, vpx_sad32x32x4d)
+
+  BFP(BLOCK_64X64, vpx_sad64x64, vpx_sad64x64_avg, vpx_variance64x64,
+      vpx_sub_pixel_variance64x64, vpx_sub_pixel_avg_variance64x64,
+      vpx_sad64x64x3, vpx_sad64x64x8, vpx_sad64x64x4d)
+
+  BFP(BLOCK_16X16, vpx_sad16x16, vpx_sad16x16_avg, vpx_variance16x16,
+      vpx_sub_pixel_variance16x16, vpx_sub_pixel_avg_variance16x16,
+      vpx_sad16x16x3, vpx_sad16x16x8, vpx_sad16x16x4d)
+
+  BFP(BLOCK_16X8, vpx_sad16x8, vpx_sad16x8_avg, vpx_variance16x8,
+      vpx_sub_pixel_variance16x8, vpx_sub_pixel_avg_variance16x8, vpx_sad16x8x3,
+      vpx_sad16x8x8, vpx_sad16x8x4d)
+
+  BFP(BLOCK_8X16, vpx_sad8x16, vpx_sad8x16_avg, vpx_variance8x16,
+      vpx_sub_pixel_variance8x16, vpx_sub_pixel_avg_variance8x16, vpx_sad8x16x3,
+      vpx_sad8x16x8, vpx_sad8x16x4d)
+
+  BFP(BLOCK_8X8, vpx_sad8x8, vpx_sad8x8_avg, vpx_variance8x8,
+      vpx_sub_pixel_variance8x8, vpx_sub_pixel_avg_variance8x8, vpx_sad8x8x3,
+      vpx_sad8x8x8, vpx_sad8x8x4d)
+
+  BFP(BLOCK_8X4, vpx_sad8x4, vpx_sad8x4_avg, vpx_variance8x4,
+      vpx_sub_pixel_variance8x4, vpx_sub_pixel_avg_variance8x4, NULL,
+      vpx_sad8x4x8, vpx_sad8x4x4d)
+
+  BFP(BLOCK_4X8, vpx_sad4x8, vpx_sad4x8_avg, vpx_variance4x8,
+      vpx_sub_pixel_variance4x8, vpx_sub_pixel_avg_variance4x8, NULL,
+      vpx_sad4x8x8, vpx_sad4x8x4d)
+
+  BFP(BLOCK_4X4, vpx_sad4x4, vpx_sad4x4_avg, vpx_variance4x4,
+      vpx_sub_pixel_variance4x4, vpx_sub_pixel_avg_variance4x4, vpx_sad4x4x3,
+      vpx_sad4x4x8, vpx_sad4x4x4d)
+
+#if CONFIG_OBMC
+#define OBFP(BT, OSDF, OVF, OSVF) \
+  cpi->fn_ptr[BT].osdf = OSDF;    \
+  cpi->fn_ptr[BT].ovf = OVF;      \
+  cpi->fn_ptr[BT].osvf = OSVF;
+
+#if CONFIG_EXT_PARTITION
+  OBFP(BLOCK_128X128, vpx_obmc_sad128x128, vpx_obmc_variance128x128,
+       vpx_obmc_sub_pixel_variance128x128)
+  OBFP(BLOCK_128X64, vpx_obmc_sad128x64, vpx_obmc_variance128x64,
+       vpx_obmc_sub_pixel_variance128x64)
+  OBFP(BLOCK_64X128, vpx_obmc_sad64x128, vpx_obmc_variance64x128,
+       vpx_obmc_sub_pixel_variance64x128)
+#endif  // CONFIG_EXT_PARTITION
+  OBFP(BLOCK_64X64, vpx_obmc_sad64x64, vpx_obmc_variance64x64,
+       vpx_obmc_sub_pixel_variance64x64)
+  OBFP(BLOCK_64X32, vpx_obmc_sad64x32, vpx_obmc_variance64x32,
+       vpx_obmc_sub_pixel_variance64x32)
+  OBFP(BLOCK_32X64, vpx_obmc_sad32x64, vpx_obmc_variance32x64,
+       vpx_obmc_sub_pixel_variance32x64)
+  OBFP(BLOCK_32X32, vpx_obmc_sad32x32, vpx_obmc_variance32x32,
+       vpx_obmc_sub_pixel_variance32x32)
+  OBFP(BLOCK_32X16, vpx_obmc_sad32x16, vpx_obmc_variance32x16,
+       vpx_obmc_sub_pixel_variance32x16)
+  OBFP(BLOCK_16X32, vpx_obmc_sad16x32, vpx_obmc_variance16x32,
+       vpx_obmc_sub_pixel_variance16x32)
+  OBFP(BLOCK_16X16, vpx_obmc_sad16x16, vpx_obmc_variance16x16,
+       vpx_obmc_sub_pixel_variance16x16)
+  OBFP(BLOCK_16X8, vpx_obmc_sad16x8, vpx_obmc_variance16x8,
+       vpx_obmc_sub_pixel_variance16x8)
+  OBFP(BLOCK_8X16, vpx_obmc_sad8x16, vpx_obmc_variance8x16,
+       vpx_obmc_sub_pixel_variance8x16)
+  OBFP(BLOCK_8X8, vpx_obmc_sad8x8, vpx_obmc_variance8x8,
+       vpx_obmc_sub_pixel_variance8x8)
+  OBFP(BLOCK_4X8, vpx_obmc_sad4x8, vpx_obmc_variance4x8,
+       vpx_obmc_sub_pixel_variance4x8)
+  OBFP(BLOCK_8X4, vpx_obmc_sad8x4, vpx_obmc_variance8x4,
+       vpx_obmc_sub_pixel_variance8x4)
+  OBFP(BLOCK_4X4, vpx_obmc_sad4x4, vpx_obmc_variance4x4,
+       vpx_obmc_sub_pixel_variance4x4)
+#endif  // CONFIG_OBMC
+
+#if CONFIG_EXT_INTER
+#define MBFP(BT, MSDF, MVF, MSVF) \
+  cpi->fn_ptr[BT].msdf = MSDF;    \
+  cpi->fn_ptr[BT].mvf = MVF;      \
+  cpi->fn_ptr[BT].msvf = MSVF;
+
+#if CONFIG_EXT_PARTITION
+  MBFP(BLOCK_128X128, vpx_masked_sad128x128, vpx_masked_variance128x128,
+       vpx_masked_sub_pixel_variance128x128)
+  MBFP(BLOCK_128X64, vpx_masked_sad128x64, vpx_masked_variance128x64,
+       vpx_masked_sub_pixel_variance128x64)
+  MBFP(BLOCK_64X128, vpx_masked_sad64x128, vpx_masked_variance64x128,
+       vpx_masked_sub_pixel_variance64x128)
+#endif  // CONFIG_EXT_PARTITION
+  MBFP(BLOCK_64X64, vpx_masked_sad64x64, vpx_masked_variance64x64,
+       vpx_masked_sub_pixel_variance64x64)
+  MBFP(BLOCK_64X32, vpx_masked_sad64x32, vpx_masked_variance64x32,
+       vpx_masked_sub_pixel_variance64x32)
+  MBFP(BLOCK_32X64, vpx_masked_sad32x64, vpx_masked_variance32x64,
+       vpx_masked_sub_pixel_variance32x64)
+  MBFP(BLOCK_32X32, vpx_masked_sad32x32, vpx_masked_variance32x32,
+       vpx_masked_sub_pixel_variance32x32)
+  MBFP(BLOCK_32X16, vpx_masked_sad32x16, vpx_masked_variance32x16,
+       vpx_masked_sub_pixel_variance32x16)
+  MBFP(BLOCK_16X32, vpx_masked_sad16x32, vpx_masked_variance16x32,
+       vpx_masked_sub_pixel_variance16x32)
+  MBFP(BLOCK_16X16, vpx_masked_sad16x16, vpx_masked_variance16x16,
+       vpx_masked_sub_pixel_variance16x16)
+  MBFP(BLOCK_16X8, vpx_masked_sad16x8, vpx_masked_variance16x8,
+       vpx_masked_sub_pixel_variance16x8)
+  MBFP(BLOCK_8X16, vpx_masked_sad8x16, vpx_masked_variance8x16,
+       vpx_masked_sub_pixel_variance8x16)
+  MBFP(BLOCK_8X8, vpx_masked_sad8x8, vpx_masked_variance8x8,
+       vpx_masked_sub_pixel_variance8x8)
+  MBFP(BLOCK_4X8, vpx_masked_sad4x8, vpx_masked_variance4x8,
+       vpx_masked_sub_pixel_variance4x8)
+  MBFP(BLOCK_8X4, vpx_masked_sad8x4, vpx_masked_variance8x4,
+       vpx_masked_sub_pixel_variance8x4)
+  MBFP(BLOCK_4X4, vpx_masked_sad4x4, vpx_masked_variance4x4,
+       vpx_masked_sub_pixel_variance4x4)
+#endif  // CONFIG_EXT_INTER
+
+#if CONFIG_VP9_HIGHBITDEPTH
+  highbd_set_var_fns(cpi);
+#endif
+
+  /* vp10_init_quantizer() is first called here. Add check in
+   * vp10_frame_init_quantizer() so that vp10_init_quantizer is only
+   * called later when needed. This will avoid unnecessary calls of
+   * vp10_init_quantizer() for every frame.
+   */
+  vp10_init_quantizer(cpi);
+#if CONFIG_AOM_QM
+  aom_qm_init(cm);
+#endif
+
+  vp10_loop_filter_init(cm);
+#if CONFIG_LOOP_RESTORATION
+  vp10_loop_restoration_precal();
+#endif  // CONFIG_LOOP_RESTORATION
+
+  cm->error.setjmp = 0;
+
+  return cpi;
+}
+
+#define SNPRINT(H, T) snprintf((H) + strlen(H), sizeof(H) - strlen(H), (T))
+
+#define SNPRINT2(H, T, V) \
+  snprintf((H) + strlen(H), sizeof(H) - strlen(H), (T), (V))
+
+void vp10_remove_compressor(VP10_COMP *cpi) {
+  VP10_COMMON *cm;
+  unsigned int i;
+  int t;
+
+  if (!cpi) return;
+
+  cm = &cpi->common;
+  if (cm->current_video_frame > 0) {
+#if CONFIG_INTERNAL_STATS
+    vpx_clear_system_state();
+
+    if (cpi->oxcf.pass != 1) {
+      char headings[512] = { 0 };
+      char results[512] = { 0 };
+      FILE *f = fopen("opsnr.stt", "a");
+      double time_encoded =
+          (cpi->last_end_time_stamp_seen - cpi->first_time_stamp_ever) /
+          10000000.000;
+      double total_encode_time =
+          (cpi->time_receive_data + cpi->time_compress_data) / 1000.000;
+      const double dr =
+          (double)cpi->bytes * (double)8 / (double)1000 / time_encoded;
+      const double peak = (double)((1 << cpi->oxcf.input_bit_depth) - 1);
+      const double target_rate = (double)cpi->oxcf.target_bandwidth / 1000;
+      const double rate_err = ((100.0 * (dr - target_rate)) / target_rate);
+
+      if (cpi->b_calculate_psnr) {
+        const double total_psnr = vpx_sse_to_psnr(
+            (double)cpi->total_samples, peak, (double)cpi->total_sq_error);
+        const double total_ssim =
+            100 * pow(cpi->summed_quality / cpi->summed_weights, 8.0);
+        snprintf(headings, sizeof(headings),
+                 "Bitrate\tAVGPsnr\tGLBPsnr\tAVPsnrP\tGLPsnrP\t"
+                 "VPXSSIM\tVPSSIMP\tFASTSIM\tPSNRHVS\t"
+                 "WstPsnr\tWstSsim\tWstFast\tWstHVS");
+        snprintf(results, sizeof(results),
+                 "%7.2f\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t"
+                 "%7.3f\t%7.3f\t%7.3f\t%7.3f\t"
+                 "%7.3f\t%7.3f\t%7.3f\t%7.3f",
+                 dr, cpi->psnr.stat[ALL] / cpi->count, total_psnr,
+                 cpi->psnr.stat[ALL] / cpi->count, total_psnr, total_ssim,
+                 total_ssim, cpi->fastssim.stat[ALL] / cpi->count,
+                 cpi->psnrhvs.stat[ALL] / cpi->count, cpi->psnr.worst,
+                 cpi->worst_ssim, cpi->fastssim.worst, cpi->psnrhvs.worst);
+
+        if (cpi->b_calculate_blockiness) {
+          SNPRINT(headings, "\t  Block\tWstBlck");
+          SNPRINT2(results, "\t%7.3f", cpi->total_blockiness / cpi->count);
+          SNPRINT2(results, "\t%7.3f", cpi->worst_blockiness);
+        }
+
+        if (cpi->b_calculate_consistency) {
+          double consistency =
+              vpx_sse_to_psnr((double)cpi->total_samples, peak,
+                              (double)cpi->total_inconsistency);
+
+          SNPRINT(headings, "\tConsist\tWstCons");
+          SNPRINT2(results, "\t%7.3f", consistency);
+          SNPRINT2(results, "\t%7.3f", cpi->worst_consistency);
+        }
+
+        fprintf(f, "%s\t    Time  Rc-Err Abs Err\n", headings);
+        fprintf(f, "%s\t%8.0f %7.2f %7.2f\n", results, total_encode_time,
+                rate_err, fabs(rate_err));
+      }
+
+      fclose(f);
+    }
+
+#endif
+
+#if 0
+    {
+      printf("\n_pick_loop_filter_level:%d\n", cpi->time_pick_lpf / 1000);
+      printf("\n_frames recive_data encod_mb_row compress_frame  Total\n");
+      printf("%6d %10ld %10ld %10ld %10ld\n", cpi->common.current_video_frame,
+             cpi->time_receive_data / 1000, cpi->time_encode_sb_row / 1000,
+             cpi->time_compress_data / 1000,
+             (cpi->time_receive_data + cpi->time_compress_data) / 1000);
+    }
+#endif
+  }
+
+  for (t = 0; t < cpi->num_workers; ++t) {
+    VPxWorker *const worker = &cpi->workers[t];
+    EncWorkerData *const thread_data = &cpi->tile_thr_data[t];
+
+    // Deallocate allocated threads.
+    vpx_get_worker_interface()->end(worker);
+
+    // Deallocate allocated thread data.
+    if (t < cpi->num_workers - 1) {
+      if (cpi->common.allow_screen_content_tools)
+        vpx_free(thread_data->td->mb.palette_buffer);
+      vpx_free(thread_data->td->counts);
+      vp10_free_pc_tree(thread_data->td);
+      vp10_free_var_tree(thread_data->td);
+      vpx_free(thread_data->td);
+    }
+  }
+  vpx_free(cpi->tile_thr_data);
+  vpx_free(cpi->workers);
+
+  if (cpi->num_workers > 1) vp10_loop_filter_dealloc(&cpi->lf_row_sync);
+
+  dealloc_compressor_data(cpi);
+
+  for (i = 0; i < sizeof(cpi->mbgraph_stats) / sizeof(cpi->mbgraph_stats[0]);
+       ++i) {
+    vpx_free(cpi->mbgraph_stats[i].mb_stats);
+  }
+
+#if CONFIG_FP_MB_STATS
+  if (cpi->use_fp_mb_stats) {
+    vpx_free(cpi->twopass.frame_mb_stats_buf);
+    cpi->twopass.frame_mb_stats_buf = NULL;
+  }
+#endif
+
+  vp10_remove_common(cm);
+  vp10_free_ref_frame_buffers(cm->buffer_pool);
+  vpx_free(cpi);
+
+#ifdef OUTPUT_YUV_SKINMAP
+  fclose(yuv_skinmap_file);
+#endif
+#ifdef OUTPUT_YUV_REC
+  fclose(yuv_rec_file);
+#endif
+
+#if 0
+
+  if (keyfile)
+    fclose(keyfile);
+
+  if (framepsnr)
+    fclose(framepsnr);
+
+  if (kf_list)
+    fclose(kf_list);
+
+#endif
+}
+
+static void generate_psnr_packet(VP10_COMP *cpi) {
+  struct vpx_codec_cx_pkt pkt;
+  int i;
+  PSNR_STATS psnr;
+#if CONFIG_VP9_HIGHBITDEPTH
+  vpx_calc_highbd_psnr(cpi->Source, cpi->common.frame_to_show, &psnr,
+                       cpi->td.mb.e_mbd.bd, cpi->oxcf.input_bit_depth);
+#else
+  vpx_calc_psnr(cpi->Source, cpi->common.frame_to_show, &psnr);
+#endif
+
+  for (i = 0; i < 4; ++i) {
+    pkt.data.psnr.samples[i] = psnr.samples[i];
+    pkt.data.psnr.sse[i] = psnr.sse[i];
+    pkt.data.psnr.psnr[i] = psnr.psnr[i];
+  }
+  pkt.kind = VPX_CODEC_PSNR_PKT;
+  vpx_codec_pkt_list_add(cpi->output_pkt_list, &pkt);
+}
+
+int vp10_use_as_reference(VP10_COMP *cpi, int ref_frame_flags) {
+  if (ref_frame_flags > ((1 << INTER_REFS_PER_FRAME) - 1)) return -1;
+
+  cpi->ref_frame_flags = ref_frame_flags;
+  return 0;
+}
+
+void vp10_update_reference(VP10_COMP *cpi, int ref_frame_flags) {
+  cpi->ext_refresh_golden_frame = (ref_frame_flags & VPX_GOLD_FLAG) != 0;
+  cpi->ext_refresh_alt_ref_frame = (ref_frame_flags & VPX_ALT_FLAG) != 0;
+  cpi->ext_refresh_last_frame = (ref_frame_flags & VPX_LAST_FLAG) != 0;
+  cpi->ext_refresh_frame_flags_pending = 1;
+}
+
+static YV12_BUFFER_CONFIG *get_vp10_ref_frame_buffer(
+    VP10_COMP *cpi, VPX_REFFRAME ref_frame_flag) {
+  MV_REFERENCE_FRAME ref_frame = NONE;
+  if (ref_frame_flag == VPX_LAST_FLAG) ref_frame = LAST_FRAME;
+#if CONFIG_EXT_REFS
+  else if (ref_frame_flag == VPX_LAST2_FLAG)
+    ref_frame = LAST2_FRAME;
+  else if (ref_frame_flag == VPX_LAST3_FLAG)
+    ref_frame = LAST3_FRAME;
+#endif  // CONFIG_EXT_REFS
+  else if (ref_frame_flag == VPX_GOLD_FLAG)
+    ref_frame = GOLDEN_FRAME;
+#if CONFIG_EXT_REFS
+  else if (ref_frame_flag == VPX_BWD_FLAG)
+    ref_frame = BWDREF_FRAME;
+#endif  // CONFIG_EXT_REFS
+  else if (ref_frame_flag == VPX_ALT_FLAG)
+    ref_frame = ALTREF_FRAME;
+
+  return ref_frame == NONE ? NULL : get_ref_frame_buffer(cpi, ref_frame);
+}
+
+int vp10_copy_reference_enc(VP10_COMP *cpi, VPX_REFFRAME ref_frame_flag,
+                            YV12_BUFFER_CONFIG *sd) {
+  YV12_BUFFER_CONFIG *cfg = get_vp10_ref_frame_buffer(cpi, ref_frame_flag);
+  if (cfg) {
+    vpx_yv12_copy_frame(cfg, sd);
+    return 0;
+  } else {
+    return -1;
+  }
+}
+
+int vp10_set_reference_enc(VP10_COMP *cpi, VPX_REFFRAME ref_frame_flag,
+                           YV12_BUFFER_CONFIG *sd) {
+  YV12_BUFFER_CONFIG *cfg = get_vp10_ref_frame_buffer(cpi, ref_frame_flag);
+  if (cfg) {
+    vpx_yv12_copy_frame(sd, cfg);
+    return 0;
+  } else {
+    return -1;
+  }
+}
+
+int vp10_update_entropy(VP10_COMP *cpi, int update) {
+  cpi->ext_refresh_frame_context = update;
+  cpi->ext_refresh_frame_context_pending = 1;
+  return 0;
+}
+
+#if defined(OUTPUT_YUV_DENOISED) || defined(OUTPUT_YUV_SKINMAP)
+// The denoiser buffer is allocated as a YUV 440 buffer. This function writes it
+// as YUV 420. We simply use the top-left pixels of the UV buffers, since we do
+// not denoise the UV channels at this time. If ever we implement UV channel
+// denoising we will have to modify this.
+void vp10_write_yuv_frame_420(YV12_BUFFER_CONFIG *s, FILE *f) {
+  uint8_t *src = s->y_buffer;
+  int h = s->y_height;
+
+  do {
+    fwrite(src, s->y_width, 1, f);
+    src += s->y_stride;
+  } while (--h);
+
+  src = s->u_buffer;
+  h = s->uv_height;
+
+  do {
+    fwrite(src, s->uv_width, 1, f);
+    src += s->uv_stride;
+  } while (--h);
+
+  src = s->v_buffer;
+  h = s->uv_height;
+
+  do {
+    fwrite(src, s->uv_width, 1, f);
+    src += s->uv_stride;
+  } while (--h);
+}
+#endif
+
+#if CONFIG_EXT_REFS
+static void check_show_existing_frame(VP10_COMP *cpi) {
+  const GF_GROUP *const gf_group = &cpi->twopass.gf_group;
+  VP10_COMMON *const cm = &cpi->common;
+  const FRAME_UPDATE_TYPE next_frame_update_type =
+      gf_group->update_type[gf_group->index];
+  const int which_arf = gf_group->arf_update_idx[gf_group->index];
+  if (cpi->rc.is_last_bipred_frame) {
+    // NOTE(zoeliu): If the current frame is a last bi-predictive frame, it is
+    //               needed next to show the BWDREF_FRAME, which is pointed by
+    //               the last_fb_idxes[0] after reference frame buffer update
+    cpi->rc.is_last_bipred_frame = 0;
+    cm->show_existing_frame = 1;
+    cpi->existing_fb_idx_to_show = cpi->lst_fb_idxes[0];
+  } else if (cpi->is_arf_filter_off[which_arf] &&
+             (next_frame_update_type == OVERLAY_UPDATE ||
+              next_frame_update_type == INTNL_OVERLAY_UPDATE)) {
+    // Other parameters related to OVERLAY_UPDATE will be taken care of
+    // in vp10_rc_get_second_pass_params(cpi)
+    cm->show_existing_frame = 1;
+    cpi->rc.is_src_frame_alt_ref = 1;
+    cpi->existing_fb_idx_to_show = cpi->alt_fb_idx;
+    cpi->is_arf_filter_off[which_arf] = 0;
+  } else {
+    cm->show_existing_frame = 0;
+  }
+  cpi->rc.is_src_frame_ext_arf = 0;
+}
+#endif  // CONFIG_EXT_REFS
+
+#ifdef OUTPUT_YUV_REC
+void vp10_write_one_yuv_frame(VP10_COMMON *cm, YV12_BUFFER_CONFIG *s) {
+  uint8_t *src = s->y_buffer;
+  int h = cm->height;
+
+#if CONFIG_VP9_HIGHBITDEPTH
+  if (s->flags & YV12_FLAG_HIGHBITDEPTH) {
+    uint16_t *src16 = CONVERT_TO_SHORTPTR(s->y_buffer);
+
+    do {
+      fwrite(src16, s->y_width, 2, yuv_rec_file);
+      src16 += s->y_stride;
+    } while (--h);
+
+    src16 = CONVERT_TO_SHORTPTR(s->u_buffer);
+    h = s->uv_height;
+
+    do {
+      fwrite(src16, s->uv_width, 2, yuv_rec_file);
+      src16 += s->uv_stride;
+    } while (--h);
+
+    src16 = CONVERT_TO_SHORTPTR(s->v_buffer);
+    h = s->uv_height;
+
+    do {
+      fwrite(src16, s->uv_width, 2, yuv_rec_file);
+      src16 += s->uv_stride;
+    } while (--h);
+
+    fflush(yuv_rec_file);
+    return;
+  }
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+  do {
+    fwrite(src, s->y_width, 1, yuv_rec_file);
+    src += s->y_stride;
+  } while (--h);
+
+  src = s->u_buffer;
+  h = s->uv_height;
+
+  do {
+    fwrite(src, s->uv_width, 1, yuv_rec_file);
+    src += s->uv_stride;
+  } while (--h);
+
+  src = s->v_buffer;
+  h = s->uv_height;
+
+  do {
+    fwrite(src, s->uv_width, 1, yuv_rec_file);
+    src += s->uv_stride;
+  } while (--h);
+
+  fflush(yuv_rec_file);
+}
+#endif  // OUTPUT_YUV_REC
+
+#if CONFIG_VP9_HIGHBITDEPTH
+static void scale_and_extend_frame_nonnormative(const YV12_BUFFER_CONFIG *src,
+                                                YV12_BUFFER_CONFIG *dst,
+                                                int bd) {
+#else
+static void scale_and_extend_frame_nonnormative(const YV12_BUFFER_CONFIG *src,
+                                                YV12_BUFFER_CONFIG *dst) {
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+  // TODO(dkovalev): replace YV12_BUFFER_CONFIG with vpx_image_t
+  int i;
+  const uint8_t *const srcs[3] = { src->y_buffer, src->u_buffer,
+                                   src->v_buffer };
+  const int src_strides[3] = { src->y_stride, src->uv_stride, src->uv_stride };
+  const int src_widths[3] = { src->y_crop_width, src->uv_crop_width,
+                              src->uv_crop_width };
+  const int src_heights[3] = { src->y_crop_height, src->uv_crop_height,
+                               src->uv_crop_height };
+  uint8_t *const dsts[3] = { dst->y_buffer, dst->u_buffer, dst->v_buffer };
+  const int dst_strides[3] = { dst->y_stride, dst->uv_stride, dst->uv_stride };
+  const int dst_widths[3] = { dst->y_crop_width, dst->uv_crop_width,
+                              dst->uv_crop_width };
+  const int dst_heights[3] = { dst->y_crop_height, dst->uv_crop_height,
+                               dst->uv_crop_height };
+
+  for (i = 0; i < MAX_MB_PLANE; ++i) {
+#if CONFIG_VP9_HIGHBITDEPTH
+    if (src->flags & YV12_FLAG_HIGHBITDEPTH) {
+      vp10_highbd_resize_plane(srcs[i], src_heights[i], src_widths[i],
+                               src_strides[i], dsts[i], dst_heights[i],
+                               dst_widths[i], dst_strides[i], bd);
+    } else {
+      vp10_resize_plane(srcs[i], src_heights[i], src_widths[i], src_strides[i],
+                        dsts[i], dst_heights[i], dst_widths[i], dst_strides[i]);
+    }
+#else
+    vp10_resize_plane(srcs[i], src_heights[i], src_widths[i], src_strides[i],
+                      dsts[i], dst_heights[i], dst_widths[i], dst_strides[i]);
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+  }
+  vpx_extend_frame_borders(dst);
+}
+
+#if CONFIG_VP9_HIGHBITDEPTH
+static void scale_and_extend_frame(const YV12_BUFFER_CONFIG *src,
+                                   YV12_BUFFER_CONFIG *dst, int planes,
+                                   int bd) {
+#else
+static void scale_and_extend_frame(const YV12_BUFFER_CONFIG *src,
+                                   YV12_BUFFER_CONFIG *dst, int planes) {
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+  const int src_w = src->y_crop_width;
+  const int src_h = src->y_crop_height;
+  const int dst_w = dst->y_crop_width;
+  const int dst_h = dst->y_crop_height;
+  const uint8_t *const srcs[3] = { src->y_buffer, src->u_buffer,
+                                   src->v_buffer };
+  const int src_strides[3] = { src->y_stride, src->uv_stride, src->uv_stride };
+  uint8_t *const dsts[3] = { dst->y_buffer, dst->u_buffer, dst->v_buffer };
+  const int dst_strides[3] = { dst->y_stride, dst->uv_stride, dst->uv_stride };
+  const InterpFilterParams interp_filter_params =
+      vp10_get_interp_filter_params(EIGHTTAP_REGULAR);
+  const int16_t *kernel = interp_filter_params.filter_ptr;
+  const int taps = interp_filter_params.taps;
+  int x, y, i;
+
+  for (y = 0; y < dst_h; y += 16) {
+    for (x = 0; x < dst_w; x += 16) {
+      for (i = 0; i < planes; ++i) {
+        const int factor = (i == 0 || i == 3 ? 1 : 2);
+        const int x_q4 = x * (16 / factor) * src_w / dst_w;
+        const int y_q4 = y * (16 / factor) * src_h / dst_h;
+        const int src_stride = src_strides[i];
+        const int dst_stride = dst_strides[i];
+        const uint8_t *src_ptr = srcs[i] +
+                                 (y / factor) * src_h / dst_h * src_stride +
+                                 (x / factor) * src_w / dst_w;
+        uint8_t *dst_ptr = dsts[i] + (y / factor) * dst_stride + (x / factor);
+
+#if CONFIG_VP9_HIGHBITDEPTH
+        if (src->flags & YV12_FLAG_HIGHBITDEPTH) {
+          vpx_highbd_convolve8(src_ptr, src_stride, dst_ptr, dst_stride,
+                               &kernel[(x_q4 & 0xf) * taps], 16 * src_w / dst_w,
+                               &kernel[(y_q4 & 0xf) * taps], 16 * src_h / dst_h,
+                               16 / factor, 16 / factor, bd);
+        } else {
+          vpx_scaled_2d(src_ptr, src_stride, dst_ptr, dst_stride,
+                        &kernel[(x_q4 & 0xf) * taps], 16 * src_w / dst_w,
+                        &kernel[(y_q4 & 0xf) * taps], 16 * src_h / dst_h,
+                        16 / factor, 16 / factor);
+        }
+#else
+        vpx_scaled_2d(src_ptr, src_stride, dst_ptr, dst_stride,
+                      &kernel[(x_q4 & 0xf) * taps], 16 * src_w / dst_w,
+                      &kernel[(y_q4 & 0xf) * taps], 16 * src_h / dst_h,
+                      16 / factor, 16 / factor);
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+      }
+    }
+  }
+
+  if (planes == 1)
+    vpx_extend_frame_borders_y(dst);
+  else
+    vpx_extend_frame_borders(dst);
+}
+
+static int scale_down(VP10_COMP *cpi, int q) {
+  RATE_CONTROL *const rc = &cpi->rc;
+  GF_GROUP *const gf_group = &cpi->twopass.gf_group;
+  int scale = 0;
+  assert(frame_is_kf_gf_arf(cpi));
+
+  if (rc->frame_size_selector == UNSCALED &&
+      q >= rc->rf_level_maxq[gf_group->rf_level[gf_group->index]]) {
+    const int max_size_thresh =
+        (int)(rate_thresh_mult[SCALE_STEP1] *
+              VPXMAX(rc->this_frame_target, rc->avg_frame_bandwidth));
+    scale = rc->projected_frame_size > max_size_thresh ? 1 : 0;
+  }
+  return scale;
+}
+
+// Function to test for conditions that indicate we should loop
+// back and recode a frame.
+static int recode_loop_test(VP10_COMP *cpi, int high_limit, int low_limit,
+                            int q, int maxq, int minq) {
+  const RATE_CONTROL *const rc = &cpi->rc;
+  const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+  const int frame_is_kfgfarf = frame_is_kf_gf_arf(cpi);
+  int force_recode = 0;
+
+  if ((rc->projected_frame_size >= rc->max_frame_bandwidth) ||
+      (cpi->sf.recode_loop == ALLOW_RECODE) ||
+      (frame_is_kfgfarf && (cpi->sf.recode_loop == ALLOW_RECODE_KFARFGF))) {
+    if (frame_is_kfgfarf && (oxcf->resize_mode == RESIZE_DYNAMIC) &&
+        scale_down(cpi, q)) {
+      // Code this group at a lower resolution.
+      cpi->resize_pending = 1;
+      return 1;
+    }
+
+    // TODO(agrange) high_limit could be greater than the scale-down threshold.
+    if ((rc->projected_frame_size > high_limit && q < maxq) ||
+        (rc->projected_frame_size < low_limit && q > minq)) {
+      force_recode = 1;
+    } else if (cpi->oxcf.rc_mode == VPX_CQ) {
+      // Deal with frame undershoot and whether or not we are
+      // below the automatically set cq level.
+      if (q > oxcf->cq_level &&
+          rc->projected_frame_size < ((rc->this_frame_target * 7) >> 3)) {
+        force_recode = 1;
+      }
+    }
+  }
+  return force_recode;
+}
+
+static INLINE int get_free_upsampled_ref_buf(EncRefCntBuffer *ubufs) {
+  int i;
+
+  for (i = 0; i < (REF_FRAMES + 1); i++) {
+    if (!ubufs[i].ref_count) {
+      return i;
+    }
+  }
+  return INVALID_IDX;
+}
+
+// Up-sample 1 reference frame.
+static INLINE int upsample_ref_frame(VP10_COMP *cpi,
+                                     const YV12_BUFFER_CONFIG *const ref) {
+  VP10_COMMON *const cm = &cpi->common;
+  EncRefCntBuffer *ubufs = cpi->upsampled_ref_bufs;
+  int new_uidx = get_free_upsampled_ref_buf(ubufs);
+
+  if (new_uidx == INVALID_IDX) {
+    return INVALID_IDX;
+  } else {
+    YV12_BUFFER_CONFIG *upsampled_ref = &ubufs[new_uidx].buf;
+
+    // Can allocate buffer for Y plane only.
+    if (upsampled_ref->buffer_alloc_sz < (ref->buffer_alloc_sz << 6))
+      if (vpx_realloc_frame_buffer(upsampled_ref, (cm->width << 3),
+                                   (cm->height << 3), cm->subsampling_x,
+                                   cm->subsampling_y,
+#if CONFIG_VP9_HIGHBITDEPTH
+                                   cm->use_highbitdepth,
+#endif
+                                   (VPX_ENC_BORDER_IN_PIXELS << 3),
+                                   cm->byte_alignment, NULL, NULL, NULL))
+        vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+                           "Failed to allocate up-sampled frame buffer");
+
+// Currently, only Y plane is up-sampled, U, V are not used.
+#if CONFIG_VP9_HIGHBITDEPTH
+    scale_and_extend_frame(ref, upsampled_ref, 1, (int)cm->bit_depth);
+#else
+    scale_and_extend_frame(ref, upsampled_ref, 1);
+#endif
+    return new_uidx;
+  }
+}
+
+#define DUMP_REF_FRAME_IMAGES 0
+
+#if DUMP_REF_FRAME_IMAGES == 1
+static int dump_one_image(VP10_COMMON *cm,
+                          const YV12_BUFFER_CONFIG *const ref_buf,
+                          char *file_name) {
+  int h;
+  FILE *f_ref = NULL;
+
+  if (ref_buf == NULL) {
+    printf("Frame data buffer is NULL.\n");
+    return VPX_CODEC_MEM_ERROR;
+  }
+
+  if ((f_ref = fopen(file_name, "wb")) == NULL) {
+    printf("Unable to open file %s to write.\n", file_name);
+    return VPX_CODEC_MEM_ERROR;
+  }
+
+  // --- Y ---
+  for (h = 0; h < cm->height; ++h) {
+    fwrite(&ref_buf->y_buffer[h * ref_buf->y_stride], 1, cm->width, f_ref);
+  }
+  // --- U ---
+  for (h = 0; h < (cm->height >> 1); ++h) {
+    fwrite(&ref_buf->u_buffer[h * ref_buf->uv_stride], 1, (cm->width >> 1),
+           f_ref);
+  }
+  // --- V ---
+  for (h = 0; h < (cm->height >> 1); ++h) {
+    fwrite(&ref_buf->v_buffer[h * ref_buf->uv_stride], 1, (cm->width >> 1),
+           f_ref);
+  }
+
+  fclose(f_ref);
+
+  return VPX_CODEC_OK;
+}
+
+static void dump_ref_frame_images(VP10_COMP *cpi) {
+  VP10_COMMON *const cm = &cpi->common;
+  MV_REFERENCE_FRAME ref_frame;
+
+  for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
+    char file_name[256] = "";
+    snprintf(file_name, sizeof(file_name), "/tmp/enc_F%d_ref_%d.yuv",
+             cm->current_video_frame, ref_frame);
+    dump_one_image(cm, get_ref_frame_buffer(cpi, ref_frame), file_name);
+  }
+}
+#endif  // DUMP_REF_FRAME_IMAGES == 1
+
+#if CONFIG_EXT_REFS
+// This function is used to shift the virtual indices of last reference frames
+// as follows:
+// LAST_FRAME -> LAST2_FRAME -> LAST3_FRAME
+// when the LAST_FRAME is updated.
+static INLINE void shift_last_ref_frames(VP10_COMP *cpi) {
+  int ref_frame;
+  for (ref_frame = LAST_REF_FRAMES - 1; ref_frame > 0; --ref_frame) {
+    cpi->lst_fb_idxes[ref_frame] = cpi->lst_fb_idxes[ref_frame - 1];
+
+    // [0] is allocated to the current coded frame. The statistics for the
+    // reference frames start at [1].
+    if (!cpi->rc.is_src_frame_alt_ref) {
+      memcpy(cpi->interp_filter_selected[ref_frame + 1],
+             cpi->interp_filter_selected[ref_frame],
+             sizeof(cpi->interp_filter_selected[ref_frame]));
+    }
+  }
+}
+#endif
+
+void vp10_update_reference_frames(VP10_COMP *cpi) {
+  VP10_COMMON *const cm = &cpi->common;
+  BufferPool *const pool = cm->buffer_pool;
+  const int use_upsampled_ref = cpi->sf.use_upsampled_references;
+  int new_uidx = 0;
+
+  // NOTE: Save the new show frame buffer index for --test-code=warn, i.e.,
+  //       for the purpose to verify no mismatch between encoder and decoder.
+  if (cm->show_frame) cpi->last_show_frame_buf_idx = cm->new_fb_idx;
+
+  if (use_upsampled_ref) {
+#if CONFIG_EXT_REFS
+    if (cm->show_existing_frame) {
+      new_uidx = cpi->upsampled_ref_idx[cpi->existing_fb_idx_to_show];
+      // TODO(zoeliu): Once following is confirmed, remove it.
+      assert(cpi->upsampled_ref_bufs[new_uidx].ref_count > 0);
+    } else {
+#endif  // CONFIG_EXT_REFS
+      // Up-sample the current encoded frame.
+      RefCntBuffer *bufs = pool->frame_bufs;
+      const YV12_BUFFER_CONFIG *const ref = &bufs[cm->new_fb_idx].buf;
+
+      new_uidx = upsample_ref_frame(cpi, ref);
+#if CONFIG_EXT_REFS
+      assert(new_uidx != INVALID_IDX);
+    }
+#endif  // CONFIG_EXT_REFS
+  }
+  // At this point the new frame has been encoded.
+  // If any buffer copy / swapping is signaled it should be done here.
+  if (cm->frame_type == KEY_FRAME) {
+    ref_cnt_fb(pool->frame_bufs, &cm->ref_frame_map[cpi->gld_fb_idx],
+               cm->new_fb_idx);
+#if CONFIG_EXT_REFS
+    ref_cnt_fb(pool->frame_bufs, &cm->ref_frame_map[cpi->bwd_fb_idx],
+               cm->new_fb_idx);
+#endif  // CONFIG_EXT_REFS
+    ref_cnt_fb(pool->frame_bufs, &cm->ref_frame_map[cpi->alt_fb_idx],
+               cm->new_fb_idx);
+
+    if (use_upsampled_ref) {
+      uref_cnt_fb(cpi->upsampled_ref_bufs,
+                  &cpi->upsampled_ref_idx[cpi->gld_fb_idx], new_uidx);
+#if CONFIG_EXT_REFS
+      uref_cnt_fb(cpi->upsampled_ref_bufs,
+                  &cpi->upsampled_ref_idx[cpi->bwd_fb_idx], new_uidx);
+#endif  // CONFIG_EXT_REFS
+      uref_cnt_fb(cpi->upsampled_ref_bufs,
+                  &cpi->upsampled_ref_idx[cpi->alt_fb_idx], new_uidx);
+    }
+  } else if (vp10_preserve_existing_gf(cpi)) {
+    // We have decided to preserve the previously existing golden frame as our
+    // new ARF frame. However, in the short term in function
+    // vp10_bitstream.c::get_refresh_mask() we left it in the GF slot and, if
+    // we're updating the GF with the current decoded frame, we save it to the
+    // ARF slot instead.
+    // We now have to update the ARF with the current frame and swap gld_fb_idx
+    // and alt_fb_idx so that, overall, we've stored the old GF in the new ARF
+    // slot and, if we're updating the GF, the current frame becomes the new GF.
+    int tmp;
+
+    ref_cnt_fb(pool->frame_bufs, &cm->ref_frame_map[cpi->alt_fb_idx],
+               cm->new_fb_idx);
+    if (use_upsampled_ref)
+      uref_cnt_fb(cpi->upsampled_ref_bufs,
+                  &cpi->upsampled_ref_idx[cpi->alt_fb_idx], new_uidx);
+
+    tmp = cpi->alt_fb_idx;
+    cpi->alt_fb_idx = cpi->gld_fb_idx;
+    cpi->gld_fb_idx = tmp;
+
+#if CONFIG_EXT_REFS
+    // We need to modify the mapping accordingly
+    cpi->arf_map[0] = cpi->alt_fb_idx;
+#endif
+// TODO(zoeliu): Do we need to copy cpi->interp_filter_selected[0] over to
+// cpi->interp_filter_selected[GOLDEN_FRAME]?
+#if CONFIG_EXT_REFS
+  } else if (cpi->rc.is_last_bipred_frame) {
+    // Refresh the LAST_FRAME with the BWDREF_FRAME and retire the LAST3_FRAME
+    // by updating the virtual indices. Note that the frame BWDREF_FRAME points
+    // to now should be retired, and it should not be used before refreshed.
+    int tmp = cpi->lst_fb_idxes[LAST_REF_FRAMES - 1];
+
+    shift_last_ref_frames(cpi);
+
+    cpi->lst_fb_idxes[0] = cpi->bwd_fb_idx;
+    if (!cpi->rc.is_src_frame_alt_ref) {
+      memcpy(cpi->interp_filter_selected[0],
+             cpi->interp_filter_selected[BWDREF_FRAME],
+             sizeof(cpi->interp_filter_selected[BWDREF_FRAME]));
+    }
+    cpi->bwd_fb_idx = tmp;
+#endif  // CONFIG_EXT_REFS
+#if CONFIG_EXT_REFS
+  } else if (cpi->rc.is_src_frame_ext_arf && cm->show_existing_frame) {
+    // Deal with the special case for showing existing internal ALTREF_FRAME
+    // Refresh the LAST_FRAME with the ALTREF_FRAME and retire the LAST3_FRAME
+    // by updating the virtual indices.
+    const GF_GROUP *const gf_group = &cpi->twopass.gf_group;
+    int which_arf = gf_group->arf_ref_idx[gf_group->index];
+    int tmp = cpi->lst_fb_idxes[LAST_REF_FRAMES - 1];
+
+    shift_last_ref_frames(cpi);
+
+    cpi->lst_fb_idxes[0] = cpi->alt_fb_idx;
+    memcpy(cpi->interp_filter_selected[LAST_FRAME],
+           cpi->interp_filter_selected[ALTREF_FRAME + which_arf],
+           sizeof(cpi->interp_filter_selected[ALTREF_FRAME + which_arf]));
+
+    cpi->alt_fb_idx = tmp;
+    // We need to modify the mapping accordingly
+    cpi->arf_map[which_arf] = cpi->alt_fb_idx;
+#endif     // CONFIG_EXT_REFS
+  } else { /* For non key/golden frames */
+    if (cpi->refresh_alt_ref_frame) {
+      int arf_idx = cpi->alt_fb_idx;
+      int which_arf = 0;
+#if CONFIG_EXT_REFS
+      if (cpi->oxcf.pass == 2) {
+        const GF_GROUP *const gf_group = &cpi->twopass.gf_group;
+        which_arf = gf_group->arf_update_idx[gf_group->index];
+        arf_idx = cpi->arf_map[which_arf];
+      }
+#else
+      if ((cpi->oxcf.pass == 2) && cpi->multi_arf_allowed) {
+        const GF_GROUP *const gf_group = &cpi->twopass.gf_group;
+        arf_idx = gf_group->arf_update_idx[gf_group->index];
+      }
+#endif  // CONFIG_EXT_REFS
+      ref_cnt_fb(pool->frame_bufs, &cm->ref_frame_map[arf_idx], cm->new_fb_idx);
+      if (use_upsampled_ref)
+        uref_cnt_fb(cpi->upsampled_ref_bufs, &cpi->upsampled_ref_idx[arf_idx],
+                    new_uidx);
+
+      memcpy(cpi->interp_filter_selected[ALTREF_FRAME + which_arf],
+             cpi->interp_filter_selected[0],
+             sizeof(cpi->interp_filter_selected[0]));
+    }
+
+    if (cpi->refresh_golden_frame) {
+      ref_cnt_fb(pool->frame_bufs, &cm->ref_frame_map[cpi->gld_fb_idx],
+                 cm->new_fb_idx);
+      if (use_upsampled_ref)
+        uref_cnt_fb(cpi->upsampled_ref_bufs,
+                    &cpi->upsampled_ref_idx[cpi->gld_fb_idx], new_uidx);
+
+      if (!cpi->rc.is_src_frame_alt_ref) {
+        memcpy(cpi->interp_filter_selected[GOLDEN_FRAME],
+               cpi->interp_filter_selected[0],
+               sizeof(cpi->interp_filter_selected[0]));
+      } else {
+        int which_arf = 0;
+#if CONFIG_EXT_REFS
+        if (cpi->oxcf.pass == 2) {
+          const GF_GROUP *const gf_group = &cpi->twopass.gf_group;
+          which_arf = gf_group->arf_update_idx[gf_group->index];
+        }
+#endif  // CONFIG_EXT_REFS
+        memcpy(cpi->interp_filter_selected[GOLDEN_FRAME],
+               cpi->interp_filter_selected[ALTREF_FRAME + which_arf],
+               sizeof(cpi->interp_filter_selected[ALTREF_FRAME + which_arf]));
+      }
+    }
+
+#if CONFIG_EXT_REFS
+    if (cpi->refresh_bwd_ref_frame) {
+      if (cpi->rc.is_bwd_ref_frame && cpi->num_extra_arfs) {
+        // We have swapped the virtual indices to allow bwd_ref_frame to use
+        // ALT0 as reference frame. We need to swap them back.
+        // NOTE: The ALT_REFs' are indexed reversely, and ALT0 refers to the
+        //       farthest ALT_REF from the first frame in the gf group.
+        int tmp = cpi->arf_map[0];
+        cpi->arf_map[0] = cpi->alt_fb_idx;
+        cpi->alt_fb_idx = cpi->bwd_fb_idx;
+        cpi->bwd_fb_idx = tmp;
+      }
+      ref_cnt_fb(pool->frame_bufs, &cm->ref_frame_map[cpi->bwd_fb_idx],
+                 cm->new_fb_idx);
+      if (use_upsampled_ref)
+        uref_cnt_fb(cpi->upsampled_ref_bufs,
+                    &cpi->upsampled_ref_idx[cpi->bwd_fb_idx], new_uidx);
+
+      memcpy(cpi->interp_filter_selected[BWDREF_FRAME],
+             cpi->interp_filter_selected[0],
+             sizeof(cpi->interp_filter_selected[0]));
+    }
+#endif  // CONFIG_EXT_REFS
+  }
+
+  if (cpi->refresh_last_frame) {
+#if CONFIG_EXT_REFS
+    // NOTE(zoeliu): We have two layers of mapping (1) from the per-frame
+    // reference to the reference frame buffer virtual index; and then (2) from
+    // the virtual index to the reference frame buffer physical index:
+    //
+    // LAST_FRAME,      ..., LAST3_FRAME,     ..., ALTREF_FRAME
+    //      |                     |                     |
+    //      v                     v                     v
+    // lst_fb_idxes[0], ..., lst_fb_idxes[2], ..., alt_fb_idx
+    //      |                     |                     |
+    //      v                     v                     v
+    // ref_frame_map[], ..., ref_frame_map[], ..., ref_frame_map[]
+    //
+    // When refresh_last_frame is set, it is intended to retire LAST3_FRAME,
+    // have the other 2 LAST reference frames shifted as follows:
+    // LAST_FRAME -> LAST2_FRAME -> LAST3_FRAME
+    // , and then have LAST_FRAME refreshed by the newly coded frame.
+    //
+    // To fulfill it, the decoder will be notified to execute following 2 steps:
+    //
+    // (a) To change ref_frame_map[] and have the virtual index of LAST3_FRAME
+    //     to point to the newly coded frame, i.e.
+    //     ref_frame_map[lst_fb_idexes[2]] => new_fb_idx;
+    //
+    // (b) To change the 1st layer mapping to have LAST_FRAME mapped to the
+    //     original virtual index of LAST3_FRAME and have the other mappings
+    //     shifted as follows:
+    // LAST_FRAME,      LAST2_FRAME,     LAST3_FRAME
+    //      |                |                |
+    //      v                v                v
+    // lst_fb_idxes[2], lst_fb_idxes[0], lst_fb_idxes[1]
+    int ref_frame;
+    if (cpi->rc.is_bwd_ref_frame && cpi->num_extra_arfs) {
+      // We have swapped the virtual indices to use ALT0 as BWD_REF
+      // and we need to swap them back.
+      int tmp = cpi->arf_map[0];
+      cpi->arf_map[0] = cpi->alt_fb_idx;
+      cpi->alt_fb_idx = cpi->bwd_fb_idx;
+      cpi->bwd_fb_idx = tmp;
+    }
+    if (cm->frame_type == KEY_FRAME) {
+      for (ref_frame = 0; ref_frame < LAST_REF_FRAMES; ++ref_frame) {
+        ref_cnt_fb(pool->frame_bufs,
+                   &cm->ref_frame_map[cpi->lst_fb_idxes[ref_frame]],
+                   cm->new_fb_idx);
+
+        if (use_upsampled_ref)
+          uref_cnt_fb(cpi->upsampled_ref_bufs,
+                      &cpi->upsampled_ref_idx[cpi->lst_fb_idxes[ref_frame]],
+                      new_uidx);
+      }
+    } else {
+      int tmp;
+
+      ref_cnt_fb(pool->frame_bufs,
+                 &cm->ref_frame_map[cpi->lst_fb_idxes[LAST_REF_FRAMES - 1]],
+                 cm->new_fb_idx);
+
+      if (use_upsampled_ref)
+        uref_cnt_fb(
+            cpi->upsampled_ref_bufs,
+            &cpi->upsampled_ref_idx[cpi->lst_fb_idxes[LAST_REF_FRAMES - 1]],
+            new_uidx);
+
+      tmp = cpi->lst_fb_idxes[LAST_REF_FRAMES - 1];
+
+      shift_last_ref_frames(cpi);
+
+      cpi->lst_fb_idxes[0] = tmp;
+
+      if (!cpi->rc.is_src_frame_alt_ref) {
+        if (cm->show_existing_frame) {
+          memcpy(cpi->interp_filter_selected[LAST_FRAME],
+                 cpi->interp_filter_selected[BWDREF_FRAME],
+                 sizeof(cpi->interp_filter_selected[BWDREF_FRAME]));
+        } else {
+          memcpy(cpi->interp_filter_selected[LAST_FRAME],
+                 cpi->interp_filter_selected[0],
+                 sizeof(cpi->interp_filter_selected[0]));
+        }
+      }
+    }
+#else
+    ref_cnt_fb(pool->frame_bufs, &cm->ref_frame_map[cpi->lst_fb_idx],
+               cm->new_fb_idx);
+    if (use_upsampled_ref)
+      uref_cnt_fb(cpi->upsampled_ref_bufs,
+                  &cpi->upsampled_ref_idx[cpi->lst_fb_idx], new_uidx);
+    if (!cpi->rc.is_src_frame_alt_ref) {
+      memcpy(cpi->interp_filter_selected[LAST_FRAME],
+             cpi->interp_filter_selected[0],
+             sizeof(cpi->interp_filter_selected[0]));
+    }
+#endif  // CONFIG_EXT_REFS
+  }
+
+#if DUMP_REF_FRAME_IMAGES == 1
+  // Dump out all reference frame images.
+  dump_ref_frame_images(cpi);
+#endif  // DUMP_REF_FRAME_IMAGES
+}
+
+static void loopfilter_frame(VP10_COMP *cpi, VP10_COMMON *cm) {
+  MACROBLOCKD *xd = &cpi->td.mb.e_mbd;
+  struct loopfilter *lf = &cm->lf;
+  if (is_lossless_requested(&cpi->oxcf)) {
+    lf->filter_level = 0;
+  } else {
+    struct vpx_usec_timer timer;
+
+    vpx_clear_system_state();
+
+    vpx_usec_timer_start(&timer);
+
+#if CONFIG_LOOP_RESTORATION
+    vp10_pick_filter_restoration(cpi->Source, cpi, cpi->sf.lpf_pick);
+#else
+    vp10_pick_filter_level(cpi->Source, cpi, cpi->sf.lpf_pick);
+#endif  // CONFIG_LOOP_RESTORATION
+
+    vpx_usec_timer_mark(&timer);
+    cpi->time_pick_lpf += vpx_usec_timer_elapsed(&timer);
+  }
+
+  if (lf->filter_level > 0) {
+#if CONFIG_VAR_TX || CONFIG_EXT_PARTITION
+    vp10_loop_filter_frame(cm->frame_to_show, cm, xd, lf->filter_level, 0, 0);
+#else
+    if (cpi->num_workers > 1)
+      vp10_loop_filter_frame_mt(cm->frame_to_show, cm, xd->plane,
+                                lf->filter_level, 0, 0, cpi->workers,
+                                cpi->num_workers, &cpi->lf_row_sync);
+    else
+      vp10_loop_filter_frame(cm->frame_to_show, cm, xd, lf->filter_level, 0, 0);
+#endif
+  }
+#if CONFIG_DERING
+  if (is_lossless_requested(&cpi->oxcf)) {
+    cm->dering_level = 0;
+  } else {
+    cm->dering_level =
+        vp10_dering_search(cm->frame_to_show, cpi->Source, cm, xd);
+    vp10_dering_frame(cm->frame_to_show, cm, xd, cm->dering_level);
+  }
+#endif  // CONFIG_DERING
+
+#if CONFIG_CLPF
+  cm->clpf = 0;
+  if (!is_lossless_requested(&cpi->oxcf)) {
+    // Test CLPF
+    int i, hq = 1;
+    uint64_t before, after;
+    // TODO(yaowu): investigate per-segment CLPF decision and
+    // an optimal threshold, use 80 for now.
+    for (i = 0; i < MAX_SEGMENTS; i++)
+      hq &= vp10_get_qindex(&cm->seg, i, cm->base_qindex) < 80;
+
+    if (!hq) {  // Don't try filter if the entire image is nearly losslessly
+                // encoded
+#if CLPF_FILTER_ALL_PLANES
+      vpx_yv12_copy_frame(cm->frame_to_show, &cpi->last_frame_uf);
+      before = vpx_get_y_sse(cpi->Source, cm->frame_to_show) +
+               vpx_get_u_sse(cpi->Source, cm->frame_to_show) +
+               vpx_get_v_sse(cpi->Source, cm->frame_to_show);
+      vp10_clpf_frame(cm->frame_to_show, cm, xd);
+      after = vpx_get_y_sse(cpi->Source, cm->frame_to_show) +
+              vpx_get_u_sse(cpi->Source, cm->frame_to_show) +
+              vpx_get_v_sse(cpi->Source, cm->frame_to_show);
+#else
+      vpx_yv12_copy_y(cm->frame_to_show, &cpi->last_frame_uf);
+      before = vpx_get_y_sse(cpi->Source, cm->frame_to_show);
+      vp10_clpf_frame(cm->frame_to_show, cm, xd);
+      after = vpx_get_y_sse(cpi->Source, cm->frame_to_show);
+#endif
+      if (before < after) {
+// No improvement, restore original
+#if CLPF_FILTER_ALL_PLANES
+        vpx_yv12_copy_frame(&cpi->last_frame_uf, cm->frame_to_show);
+#else
+        vpx_yv12_copy_y(&cpi->last_frame_uf, cm->frame_to_show);
+#endif
+      } else {
+        cm->clpf = 1;
+      }
+    }
+  }
+#endif
+#if CONFIG_LOOP_RESTORATION
+  if (cm->rst_info.restoration_type != RESTORE_NONE) {
+    vp10_loop_restoration_init(&cm->rst_internal, &cm->rst_info,
+                               cm->frame_type == KEY_FRAME, cm->width,
+                               cm->height);
+    vp10_loop_restoration_rows(cm->frame_to_show, cm, 0, cm->mi_rows, 0);
+  }
+#endif  // CONFIG_LOOP_RESTORATION
+
+  vpx_extend_frame_inner_borders(cm->frame_to_show);
+}
+
+static INLINE void alloc_frame_mvs(VP10_COMMON *const cm, int buffer_idx) {
+  RefCntBuffer *const new_fb_ptr = &cm->buffer_pool->frame_bufs[buffer_idx];
+  if (new_fb_ptr->mvs == NULL || new_fb_ptr->mi_rows < cm->mi_rows ||
+      new_fb_ptr->mi_cols < cm->mi_cols) {
+    vpx_free(new_fb_ptr->mvs);
+    CHECK_MEM_ERROR(cm, new_fb_ptr->mvs,
+                    (MV_REF *)vpx_calloc(cm->mi_rows * cm->mi_cols,
+                                         sizeof(*new_fb_ptr->mvs)));
+    new_fb_ptr->mi_rows = cm->mi_rows;
+    new_fb_ptr->mi_cols = cm->mi_cols;
+  }
+}
+
+void vp10_scale_references(VP10_COMP *cpi) {
+  VP10_COMMON *cm = &cpi->common;
+  MV_REFERENCE_FRAME ref_frame;
+  const VPX_REFFRAME ref_mask[INTER_REFS_PER_FRAME] = {
+    VPX_LAST_FLAG,
+#if CONFIG_EXT_REFS
+    VPX_LAST2_FLAG,
+    VPX_LAST3_FLAG,
+#endif  // CONFIG_EXT_REFS
+    VPX_GOLD_FLAG,
+#if CONFIG_EXT_REFS
+    VPX_BWD_FLAG,
+#endif  // CONFIG_EXT_REFS
+    VPX_ALT_FLAG
+  };
+
+  for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
+    // Need to convert from VPX_REFFRAME to index into ref_mask (subtract 1).
+    if (cpi->ref_frame_flags & ref_mask[ref_frame - 1]) {
+      BufferPool *const pool = cm->buffer_pool;
+      const YV12_BUFFER_CONFIG *const ref =
+          get_ref_frame_buffer(cpi, ref_frame);
+
+      if (ref == NULL) {
+        cpi->scaled_ref_idx[ref_frame - 1] = INVALID_IDX;
+        continue;
+      }
+
+#if CONFIG_VP9_HIGHBITDEPTH
+      if (ref->y_crop_width != cm->width || ref->y_crop_height != cm->height) {
+        RefCntBuffer *new_fb_ptr = NULL;
+        int force_scaling = 0;
+        int new_fb = cpi->scaled_ref_idx[ref_frame - 1];
+        if (new_fb == INVALID_IDX) {
+          new_fb = get_free_fb(cm);
+          force_scaling = 1;
+        }
+        if (new_fb == INVALID_IDX) return;
+        new_fb_ptr = &pool->frame_bufs[new_fb];
+        if (force_scaling || new_fb_ptr->buf.y_crop_width != cm->width ||
+            new_fb_ptr->buf.y_crop_height != cm->height) {
+          if (vpx_realloc_frame_buffer(&new_fb_ptr->buf, cm->width, cm->height,
+                                       cm->subsampling_x, cm->subsampling_y,
+                                       cm->use_highbitdepth,
+                                       VPX_ENC_BORDER_IN_PIXELS,
+                                       cm->byte_alignment, NULL, NULL, NULL))
+            vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+                               "Failed to allocate frame buffer");
+          scale_and_extend_frame(ref, &new_fb_ptr->buf, MAX_MB_PLANE,
+                                 (int)cm->bit_depth);
+          cpi->scaled_ref_idx[ref_frame - 1] = new_fb;
+          alloc_frame_mvs(cm, new_fb);
+        }
+#else
+      if (ref->y_crop_width != cm->width || ref->y_crop_height != cm->height) {
+        RefCntBuffer *new_fb_ptr = NULL;
+        int force_scaling = 0;
+        int new_fb = cpi->scaled_ref_idx[ref_frame - 1];
+        if (new_fb == INVALID_IDX) {
+          new_fb = get_free_fb(cm);
+          force_scaling = 1;
+        }
+        if (new_fb == INVALID_IDX) return;
+        new_fb_ptr = &pool->frame_bufs[new_fb];
+        if (force_scaling || new_fb_ptr->buf.y_crop_width != cm->width ||
+            new_fb_ptr->buf.y_crop_height != cm->height) {
+          if (vpx_realloc_frame_buffer(&new_fb_ptr->buf, cm->width, cm->height,
+                                       cm->subsampling_x, cm->subsampling_y,
+                                       VPX_ENC_BORDER_IN_PIXELS,
+                                       cm->byte_alignment, NULL, NULL, NULL))
+            vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+                               "Failed to allocate frame buffer");
+          scale_and_extend_frame(ref, &new_fb_ptr->buf, MAX_MB_PLANE);
+          cpi->scaled_ref_idx[ref_frame - 1] = new_fb;
+          alloc_frame_mvs(cm, new_fb);
+        }
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+        if (cpi->sf.use_upsampled_references &&
+            (force_scaling || new_fb_ptr->buf.y_crop_width != cm->width ||
+             new_fb_ptr->buf.y_crop_height != cm->height)) {
+          const int map_idx = get_ref_frame_map_idx(cpi, ref_frame);
+          EncRefCntBuffer *ubuf =
+              &cpi->upsampled_ref_bufs[cpi->upsampled_ref_idx[map_idx]];
+
+          if (vpx_realloc_frame_buffer(&ubuf->buf, (cm->width << 3),
+                                       (cm->height << 3), cm->subsampling_x,
+                                       cm->subsampling_y,
+#if CONFIG_VP9_HIGHBITDEPTH
+                                       cm->use_highbitdepth,
+#endif
+                                       (VPX_ENC_BORDER_IN_PIXELS << 3),
+                                       cm->byte_alignment, NULL, NULL, NULL))
+            vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+                               "Failed to allocate up-sampled frame buffer");
+#if CONFIG_VP9_HIGHBITDEPTH
+          scale_and_extend_frame(&new_fb_ptr->buf, &ubuf->buf, 1,
+                                 (int)cm->bit_depth);
+#else
+          scale_and_extend_frame(&new_fb_ptr->buf, &ubuf->buf, 1);
+#endif
+        }
+      } else {
+        const int buf_idx = get_ref_frame_buf_idx(cpi, ref_frame);
+        RefCntBuffer *const buf = &pool->frame_bufs[buf_idx];
+        buf->buf.y_crop_width = ref->y_crop_width;
+        buf->buf.y_crop_height = ref->y_crop_height;
+        cpi->scaled_ref_idx[ref_frame - 1] = buf_idx;
+        ++buf->ref_count;
+      }
+    } else {
+      if (cpi->oxcf.pass != 0) cpi->scaled_ref_idx[ref_frame - 1] = INVALID_IDX;
+    }
+  }
+}
+
+static void release_scaled_references(VP10_COMP *cpi) {
+  VP10_COMMON *cm = &cpi->common;
+  int i;
+  if (cpi->oxcf.pass == 0) {
+    // Only release scaled references under certain conditions:
+    // if reference will be updated, or if scaled reference has same resolution.
+    int refresh[INTER_REFS_PER_FRAME];
+    refresh[0] = (cpi->refresh_last_frame) ? 1 : 0;
+#if CONFIG_EXT_REFS
+    refresh[1] = refresh[2] = 0;
+    refresh[3] = (cpi->refresh_golden_frame) ? 1 : 0;
+    refresh[4] = (cpi->refresh_bwd_ref_frame) ? 1 : 0;
+    refresh[5] = (cpi->refresh_alt_ref_frame) ? 1 : 0;
+#else
+    refresh[1] = (cpi->refresh_golden_frame) ? 1 : 0;
+    refresh[2] = (cpi->refresh_alt_ref_frame) ? 1 : 0;
+#endif  // CONFIG_EXT_REFS
+    for (i = LAST_FRAME; i <= ALTREF_FRAME; ++i) {
+      const int idx = cpi->scaled_ref_idx[i - 1];
+      RefCntBuffer *const buf =
+          idx != INVALID_IDX ? &cm->buffer_pool->frame_bufs[idx] : NULL;
+      const YV12_BUFFER_CONFIG *const ref = get_ref_frame_buffer(cpi, i);
+      if (buf != NULL &&
+          (refresh[i - 1] || (buf->buf.y_crop_width == ref->y_crop_width &&
+                              buf->buf.y_crop_height == ref->y_crop_height))) {
+        --buf->ref_count;
+        cpi->scaled_ref_idx[i - 1] = INVALID_IDX;
+      }
+    }
+  } else {
+    for (i = 0; i < TOTAL_REFS_PER_FRAME; ++i) {
+      const int idx = cpi->scaled_ref_idx[i];
+      RefCntBuffer *const buf =
+          idx != INVALID_IDX ? &cm->buffer_pool->frame_bufs[idx] : NULL;
+      if (buf != NULL) {
+        --buf->ref_count;
+        cpi->scaled_ref_idx[i] = INVALID_IDX;
+      }
+    }
+  }
+}
+
+static void full_to_model_count(unsigned int *model_count,
+                                unsigned int *full_count) {
+  int n;
+  model_count[ZERO_TOKEN] = full_count[ZERO_TOKEN];
+  model_count[ONE_TOKEN] = full_count[ONE_TOKEN];
+  model_count[TWO_TOKEN] = full_count[TWO_TOKEN];
+  for (n = THREE_TOKEN; n < EOB_TOKEN; ++n)
+    model_count[TWO_TOKEN] += full_count[n];
+  model_count[EOB_MODEL_TOKEN] = full_count[EOB_TOKEN];
+}
+
+void vp10_full_to_model_counts(vp10_coeff_count_model *model_count,
+                               vp10_coeff_count *full_count) {
+  int i, j, k, l;
+
+  for (i = 0; i < PLANE_TYPES; ++i)
+    for (j = 0; j < REF_TYPES; ++j)
+      for (k = 0; k < COEF_BANDS; ++k)
+        for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l)
+          full_to_model_count(model_count[i][j][k][l], full_count[i][j][k][l]);
+}
+
+#if 0 && CONFIG_INTERNAL_STATS
+static void output_frame_level_debug_stats(VP10_COMP *cpi) {
+  VP10_COMMON *const cm = &cpi->common;
+  FILE *const f = fopen("tmp.stt", cm->current_video_frame ? "a" : "w");
+  int64_t recon_err;
+
+  vpx_clear_system_state();
+
+  recon_err = vpx_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
+
+  if (cpi->twopass.total_left_stats.coded_error != 0.0)
+    fprintf(f, "%10u %dx%d  %10d %10d %d %d %10d %10d %10d %10d"
+       "%10"PRId64" %10"PRId64" %5d %5d %10"PRId64" "
+       "%10"PRId64" %10"PRId64" %10d "
+       "%7.2lf %7.2lf %7.2lf %7.2lf %7.2lf"
+        "%6d %6d %5d %5d %5d "
+        "%10"PRId64" %10.3lf"
+        "%10lf %8u %10"PRId64" %10d %10d %10d\n",
+        cpi->common.current_video_frame,
+        cm->width, cm->height,
+        cpi->td.rd_counts.m_search_count,
+        cpi->td.rd_counts.ex_search_count,
+        cpi->rc.source_alt_ref_pending,
+        cpi->rc.source_alt_ref_active,
+        cpi->rc.this_frame_target,
+        cpi->rc.projected_frame_size,
+        cpi->rc.projected_frame_size / cpi->common.MBs,
+        (cpi->rc.projected_frame_size - cpi->rc.this_frame_target),
+        cpi->rc.vbr_bits_off_target,
+        cpi->rc.vbr_bits_off_target_fast,
+        cpi->twopass.extend_minq,
+        cpi->twopass.extend_minq_fast,
+        cpi->rc.total_target_vs_actual,
+        (cpi->rc.starting_buffer_level - cpi->rc.bits_off_target),
+        cpi->rc.total_actual_bits, cm->base_qindex,
+        vp10_convert_qindex_to_q(cm->base_qindex, cm->bit_depth),
+        (double)vp10_dc_quant(cm->base_qindex, 0, cm->bit_depth) / 4.0,
+        vp10_convert_qindex_to_q(cpi->twopass.active_worst_quality,
+                                cm->bit_depth),
+        cpi->rc.avg_q,
+        vp10_convert_qindex_to_q(cpi->oxcf.cq_level, cm->bit_depth),
+        cpi->refresh_last_frame, cpi->refresh_golden_frame,
+        cpi->refresh_alt_ref_frame, cm->frame_type, cpi->rc.gfu_boost,
+        cpi->twopass.bits_left,
+        cpi->twopass.total_left_stats.coded_error,
+        cpi->twopass.bits_left /
+            (1 + cpi->twopass.total_left_stats.coded_error),
+        cpi->tot_recode_hits, recon_err, cpi->rc.kf_boost,
+        cpi->twopass.kf_zeromotion_pct,
+        cpi->twopass.fr_content_type);
+
+  fclose(f);
+
+  if (0) {
+    FILE *const fmodes = fopen("Modes.stt", "a");
+    int i;
+
+    fprintf(fmodes, "%6d:%1d:%1d:%1d ", cpi->common.current_video_frame,
+            cm->frame_type, cpi->refresh_golden_frame,
+            cpi->refresh_alt_ref_frame);
+
+    for (i = 0; i < MAX_MODES; ++i)
+      fprintf(fmodes, "%5d ", cpi->mode_chosen_counts[i]);
+
+    fprintf(fmodes, "\n");
+
+    fclose(fmodes);
+  }
+}
+#endif
+
+static void set_mv_search_params(VP10_COMP *cpi) {
+  const VP10_COMMON *const cm = &cpi->common;
+  const unsigned int max_mv_def = VPXMIN(cm->width, cm->height);
+
+  // Default based on max resolution.
+  cpi->mv_step_param = vp10_init_search_range(max_mv_def);
+
+  if (cpi->sf.mv.auto_mv_step_size) {
+    if (frame_is_intra_only(cm)) {
+      // Initialize max_mv_magnitude for use in the first INTER frame
+      // after a key/intra-only frame.
+      cpi->max_mv_magnitude = max_mv_def;
+    } else {
+      if (cm->show_frame) {
+        // Allow mv_steps to correspond to twice the max mv magnitude found
+        // in the previous frame, capped by the default max_mv_magnitude based
+        // on resolution.
+        cpi->mv_step_param = vp10_init_search_range(
+            VPXMIN(max_mv_def, 2 * cpi->max_mv_magnitude));
+      }
+      cpi->max_mv_magnitude = 0;
+    }
+  }
+}
+
+static void set_size_independent_vars(VP10_COMP *cpi) {
+  vp10_set_speed_features_framesize_independent(cpi);
+  vp10_set_rd_speed_thresholds(cpi);
+  vp10_set_rd_speed_thresholds_sub8x8(cpi);
+  cpi->common.interp_filter = cpi->sf.default_interp_filter;
+}
+
+static void set_size_dependent_vars(VP10_COMP *cpi, int *q, int *bottom_index,
+                                    int *top_index) {
+  VP10_COMMON *const cm = &cpi->common;
+  const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+
+  // Setup variables that depend on the dimensions of the frame.
+  vp10_set_speed_features_framesize_dependent(cpi);
+
+  // Decide q and q bounds.
+  *q = vp10_rc_pick_q_and_bounds(cpi, bottom_index, top_index);
+
+  if (!frame_is_intra_only(cm)) {
+    vp10_set_high_precision_mv(cpi, (*q) < HIGH_PRECISION_MV_QTHRESH);
+  }
+
+  // Configure experimental use of segmentation for enhanced coding of
+  // static regions if indicated.
+  // Only allowed in the second pass of a two pass encode, as it requires
+  // lagged coding, and if the relevant speed feature flag is set.
+  if (oxcf->pass == 2 && cpi->sf.static_segmentation)
+    configure_static_seg_features(cpi);
+}
+
+static void init_motion_estimation(VP10_COMP *cpi) {
+  int y_stride = cpi->scaled_source.y_stride;
+
+  if (cpi->sf.mv.search_method == NSTEP) {
+    vp10_init3smotion_compensation(&cpi->ss_cfg, y_stride);
+  } else if (cpi->sf.mv.search_method == DIAMOND) {
+    vp10_init_dsmotion_compensation(&cpi->ss_cfg, y_stride);
+  }
+}
+
+static void set_frame_size(VP10_COMP *cpi) {
+  int ref_frame;
+  VP10_COMMON *const cm = &cpi->common;
+  VP10EncoderConfig *const oxcf = &cpi->oxcf;
+  MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
+
+  if (oxcf->pass == 2 && oxcf->rc_mode == VPX_VBR &&
+      ((oxcf->resize_mode == RESIZE_FIXED && cm->current_video_frame == 0) ||
+       (oxcf->resize_mode == RESIZE_DYNAMIC && cpi->resize_pending))) {
+    vp10_calculate_coded_size(cpi, &oxcf->scaled_frame_width,
+                              &oxcf->scaled_frame_height);
+
+    // There has been a change in frame size.
+    vp10_set_size_literal(cpi, oxcf->scaled_frame_width,
+                          oxcf->scaled_frame_height);
+  }
+
+  if (oxcf->pass == 0 && oxcf->rc_mode == VPX_CBR &&
+      oxcf->resize_mode == RESIZE_DYNAMIC) {
+    if (cpi->resize_pending == 1) {
+      oxcf->scaled_frame_width =
+          (cm->width * cpi->resize_scale_num) / cpi->resize_scale_den;
+      oxcf->scaled_frame_height =
+          (cm->height * cpi->resize_scale_num) / cpi->resize_scale_den;
+    } else if (cpi->resize_pending == -1) {
+      // Go back up to original size.
+      oxcf->scaled_frame_width = oxcf->width;
+      oxcf->scaled_frame_height = oxcf->height;
+    }
+    if (cpi->resize_pending != 0) {
+      // There has been a change in frame size.
+      vp10_set_size_literal(cpi, oxcf->scaled_frame_width,
+                            oxcf->scaled_frame_height);
+
+      // TODO(agrange) Scale cpi->max_mv_magnitude if frame-size has changed.
+      set_mv_search_params(cpi);
+    }
+  }
+
+  if (oxcf->pass == 2) {
+    vp10_set_target_rate(cpi);
+  }
+
+  alloc_frame_mvs(cm, cm->new_fb_idx);
+
+  // Reset the frame pointers to the current frame size.
+  if (vpx_realloc_frame_buffer(get_frame_new_buffer(cm), cm->width, cm->height,
+                               cm->subsampling_x, cm->subsampling_y,
+#if CONFIG_VP9_HIGHBITDEPTH
+                               cm->use_highbitdepth,
+#endif
+                               VPX_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
+                               NULL, NULL, NULL))
+    vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+                       "Failed to allocate frame buffer");
+
+  alloc_util_frame_buffers(cpi);
+  init_motion_estimation(cpi);
+
+  for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
+    RefBuffer *const ref_buf = &cm->frame_refs[ref_frame - LAST_FRAME];
+    const int buf_idx = get_ref_frame_buf_idx(cpi, ref_frame);
+
+    ref_buf->idx = buf_idx;
+
+    if (buf_idx != INVALID_IDX) {
+      YV12_BUFFER_CONFIG *const buf = &cm->buffer_pool->frame_bufs[buf_idx].buf;
+      ref_buf->buf = buf;
+#if CONFIG_VP9_HIGHBITDEPTH
+      vp10_setup_scale_factors_for_frame(
+          &ref_buf->sf, buf->y_crop_width, buf->y_crop_height, cm->width,
+          cm->height, (buf->flags & YV12_FLAG_HIGHBITDEPTH) ? 1 : 0);
+#else
+      vp10_setup_scale_factors_for_frame(&ref_buf->sf, buf->y_crop_width,
+                                         buf->y_crop_height, cm->width,
+                                         cm->height);
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+      if (vp10_is_scaled(&ref_buf->sf)) vpx_extend_frame_borders(buf);
+    } else {
+      ref_buf->buf = NULL;
+    }
+  }
+
+  set_ref_ptrs(cm, xd, LAST_FRAME, LAST_FRAME);
+}
+
+static void reset_use_upsampled_references(VP10_COMP *cpi) {
+  MV_REFERENCE_FRAME ref_frame;
+
+  // reset up-sampled reference buffer structure.
+  init_upsampled_ref_frame_bufs(cpi);
+
+  for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
+    const YV12_BUFFER_CONFIG *const ref = get_ref_frame_buffer(cpi, ref_frame);
+    int new_uidx = upsample_ref_frame(cpi, ref);
+
+    // Update the up-sampled reference index.
+    cpi->upsampled_ref_idx[get_ref_frame_map_idx(cpi, ref_frame)] = new_uidx;
+    cpi->upsampled_ref_bufs[new_uidx].ref_count++;
+  }
+}
+
+static void encode_without_recode_loop(VP10_COMP *cpi) {
+  VP10_COMMON *const cm = &cpi->common;
+  int q = 0, bottom_index = 0, top_index = 0;  // Dummy variables.
+  const int use_upsampled_ref = cpi->sf.use_upsampled_references;
+
+  vpx_clear_system_state();
+
+  set_frame_size(cpi);
+
+  // For 1 pass CBR under dynamic resize mode: use faster scaling for source.
+  // Only for 2x2 scaling for now.
+  if (cpi->oxcf.pass == 0 && cpi->oxcf.rc_mode == VPX_CBR &&
+      cpi->oxcf.resize_mode == RESIZE_DYNAMIC &&
+      cpi->un_scaled_source->y_width == (cm->width << 1) &&
+      cpi->un_scaled_source->y_height == (cm->height << 1)) {
+    cpi->Source = vp10_scale_if_required_fast(cm, cpi->un_scaled_source,
+                                              &cpi->scaled_source);
+    if (cpi->unscaled_last_source != NULL)
+      cpi->Last_Source = vp10_scale_if_required_fast(
+          cm, cpi->unscaled_last_source, &cpi->scaled_last_source);
+  } else {
+    cpi->Source =
+        vp10_scale_if_required(cm, cpi->un_scaled_source, &cpi->scaled_source);
+    if (cpi->unscaled_last_source != NULL)
+      cpi->Last_Source = vp10_scale_if_required(cm, cpi->unscaled_last_source,
+                                                &cpi->scaled_last_source);
+  }
+
+  if (frame_is_intra_only(cm) == 0) {
+    vp10_scale_references(cpi);
+  }
+
+  set_size_independent_vars(cpi);
+  set_size_dependent_vars(cpi, &q, &bottom_index, &top_index);
+
+  // cpi->sf.use_upsampled_references can be different from frame to frame.
+  // Every time when cpi->sf.use_upsampled_references is changed from 0 to 1.
+  // The reference frames for this frame have to be up-sampled before encoding.
+  if (!use_upsampled_ref && cpi->sf.use_upsampled_references)
+    reset_use_upsampled_references(cpi);
+
+  vp10_set_quantizer(cm, q);
+  vp10_set_variance_partition_thresholds(cpi, q);
+
+  setup_frame(cpi);
+
+#if CONFIG_ENTROPY
+  cm->do_subframe_update = cm->tile_cols == 1 && cm->tile_rows == 1;
+  vp10_copy(cm->starting_coef_probs, cm->fc->coef_probs);
+  vp10_copy(cpi->subframe_stats.enc_starting_coef_probs, cm->fc->coef_probs);
+  cm->coef_probs_update_idx = 0;
+  vp10_copy(cpi->subframe_stats.coef_probs_buf[0], cm->fc->coef_probs);
+#endif  // CONFIG_ENTROPY
+
+  suppress_active_map(cpi);
+  // Variance adaptive and in frame q adjustment experiments are mutually
+  // exclusive.
+  if (cpi->oxcf.aq_mode == VARIANCE_AQ) {
+    vp10_vaq_frame_setup(cpi);
+  } else if (cpi->oxcf.aq_mode == COMPLEXITY_AQ) {
+    vp10_setup_in_frame_q_adj(cpi);
+  } else if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) {
+    vp10_cyclic_refresh_setup(cpi);
+  }
+  apply_active_map(cpi);
+
+  // transform / motion compensation build reconstruction frame
+  vp10_encode_frame(cpi);
+
+  // Update some stats from cyclic refresh, and check if we should not update
+  // golden reference, for 1 pass CBR.
+  if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->frame_type != KEY_FRAME &&
+      (cpi->oxcf.pass == 0 && cpi->oxcf.rc_mode == VPX_CBR))
+    vp10_cyclic_refresh_check_golden_update(cpi);
+
+  // Update the skip mb flag probabilities based on the distribution
+  // seen in the last encoder iteration.
+  // update_base_skip_probs(cpi);
+  vpx_clear_system_state();
+}
+
+static void encode_with_recode_loop(VP10_COMP *cpi, size_t *size,
+                                    uint8_t *dest) {
+  VP10_COMMON *const cm = &cpi->common;
+  RATE_CONTROL *const rc = &cpi->rc;
+  int bottom_index, top_index;
+  int loop_count = 0;
+  int loop_at_this_size = 0;
+  int loop = 0;
+  int overshoot_seen = 0;
+  int undershoot_seen = 0;
+  int frame_over_shoot_limit;
+  int frame_under_shoot_limit;
+  int q = 0, q_low = 0, q_high = 0;
+  const int use_upsampled_ref = cpi->sf.use_upsampled_references;
+
+  set_size_independent_vars(cpi);
+
+  // cpi->sf.use_upsampled_references can be different from frame to frame.
+  // Every time when cpi->sf.use_upsampled_references is changed from 0 to 1.
+  // The reference frames for this frame have to be up-sampled before encoding.
+  if (!use_upsampled_ref && cpi->sf.use_upsampled_references)
+    reset_use_upsampled_references(cpi);
+
+  do {
+    vpx_clear_system_state();
+
+    set_frame_size(cpi);
+
+    if (loop_count == 0 || cpi->resize_pending != 0) {
+      set_size_dependent_vars(cpi, &q, &bottom_index, &top_index);
+
+      // TODO(agrange) Scale cpi->max_mv_magnitude if frame-size has changed.
+      set_mv_search_params(cpi);
+
+      // Reset the loop state for new frame size.
+      overshoot_seen = 0;
+      undershoot_seen = 0;
+
+      // Reconfiguration for change in frame size has concluded.
+      cpi->resize_pending = 0;
+
+      q_low = bottom_index;
+      q_high = top_index;
+
+      loop_at_this_size = 0;
+    }
+
+    // Decide frame size bounds first time through.
+    if (loop_count == 0) {
+      vp10_rc_compute_frame_size_bounds(cpi, rc->this_frame_target,
+                                        &frame_under_shoot_limit,
+                                        &frame_over_shoot_limit);
+    }
+
+    cpi->Source =
+        vp10_scale_if_required(cm, cpi->un_scaled_source, &cpi->scaled_source);
+
+    if (cpi->unscaled_last_source != NULL)
+      cpi->Last_Source = vp10_scale_if_required(cm, cpi->unscaled_last_source,
+                                                &cpi->scaled_last_source);
+
+    if (frame_is_intra_only(cm) == 0) {
+      if (loop_count > 0) {
+        release_scaled_references(cpi);
+      }
+      vp10_scale_references(cpi);
+    }
+
+    vp10_set_quantizer(cm, q);
+
+    if (loop_count == 0) setup_frame(cpi);
+
+#if CONFIG_ENTROPY
+    // Base q-index may have changed, so we need to assign proper default coef
+    // probs before every iteration.
+    if (frame_is_intra_only(cm) || cm->error_resilient_mode) {
+      int i;
+      vp10_default_coef_probs(cm);
+      if (cm->frame_type == KEY_FRAME || cm->error_resilient_mode ||
+          cm->reset_frame_context == RESET_FRAME_CONTEXT_ALL) {
+        for (i = 0; i < FRAME_CONTEXTS; ++i) cm->frame_contexts[i] = *cm->fc;
+      } else if (cm->reset_frame_context == RESET_FRAME_CONTEXT_CURRENT) {
+        cm->frame_contexts[cm->frame_context_idx] = *cm->fc;
+      }
+    }
+#endif  // CONFIG_ENTROPY
+
+#if CONFIG_ENTROPY
+    cm->do_subframe_update = cm->tile_cols == 1 && cm->tile_rows == 1;
+    if (loop_count == 0 || frame_is_intra_only(cm) ||
+        cm->error_resilient_mode) {
+      vp10_copy(cm->starting_coef_probs, cm->fc->coef_probs);
+      vp10_copy(cpi->subframe_stats.enc_starting_coef_probs,
+                cm->fc->coef_probs);
+    } else {
+      if (cm->do_subframe_update) {
+        vp10_copy(cm->fc->coef_probs,
+                  cpi->subframe_stats.enc_starting_coef_probs);
+        vp10_copy(cm->starting_coef_probs,
+                  cpi->subframe_stats.enc_starting_coef_probs);
+        vp10_zero(cpi->subframe_stats.coef_counts_buf);
+        vp10_zero(cpi->subframe_stats.eob_counts_buf);
+      }
+    }
+    cm->coef_probs_update_idx = 0;
+    vp10_copy(cpi->subframe_stats.coef_probs_buf[0], cm->fc->coef_probs);
+#endif  // CONFIG_ENTROPY
+
+    // Variance adaptive and in frame q adjustment experiments are mutually
+    // exclusive.
+    if (cpi->oxcf.aq_mode == VARIANCE_AQ) {
+      vp10_vaq_frame_setup(cpi);
+    } else if (cpi->oxcf.aq_mode == COMPLEXITY_AQ) {
+      vp10_setup_in_frame_q_adj(cpi);
+    }
+
+    // transform / motion compensation build reconstruction frame
+    vp10_encode_frame(cpi);
+
+    // Update the skip mb flag probabilities based on the distribution
+    // seen in the last encoder iteration.
+    // update_base_skip_probs(cpi);
+
+    vpx_clear_system_state();
+
+    // Dummy pack of the bitstream using up to date stats to get an
+    // accurate estimate of output frame size to determine if we need
+    // to recode.
+    if (cpi->sf.recode_loop >= ALLOW_RECODE_KFARFGF) {
+      save_coding_context(cpi);
+
+      vp10_pack_bitstream(cpi, dest, size);
+
+      rc->projected_frame_size = (int)(*size) << 3;
+      restore_coding_context(cpi);
+
+      if (frame_over_shoot_limit == 0) frame_over_shoot_limit = 1;
+    }
+
+    if (cpi->oxcf.rc_mode == VPX_Q) {
+      loop = 0;
+    } else {
+      if ((cm->frame_type == KEY_FRAME) && rc->this_key_frame_forced &&
+          (rc->projected_frame_size < rc->max_frame_bandwidth)) {
+        int last_q = q;
+        int64_t kf_err;
+
+        int64_t high_err_target = cpi->ambient_err;
+        int64_t low_err_target = cpi->ambient_err >> 1;
+
+#if CONFIG_VP9_HIGHBITDEPTH
+        if (cm->use_highbitdepth) {
+          kf_err = vpx_highbd_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
+        } else {
+          kf_err = vpx_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
+        }
+#else
+        kf_err = vpx_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+        // Prevent possible divide by zero error below for perfect KF
+        kf_err += !kf_err;
+
+        // The key frame is not good enough or we can afford
+        // to make it better without undue risk of popping.
+        if ((kf_err > high_err_target &&
+             rc->projected_frame_size <= frame_over_shoot_limit) ||
+            (kf_err > low_err_target &&
+             rc->projected_frame_size <= frame_under_shoot_limit)) {
+          // Lower q_high
+          q_high = q > q_low ? q - 1 : q_low;
+
+          // Adjust Q
+          q = (int)((q * high_err_target) / kf_err);
+          q = VPXMIN(q, (q_high + q_low) >> 1);
+        } else if (kf_err < low_err_target &&
+                   rc->projected_frame_size >= frame_under_shoot_limit) {
+          // The key frame is much better than the previous frame
+          // Raise q_low
+          q_low = q < q_high ? q + 1 : q_high;
+
+          // Adjust Q
+          q = (int)((q * low_err_target) / kf_err);
+          q = VPXMIN(q, (q_high + q_low + 1) >> 1);
+        }
+
+        // Clamp Q to upper and lower limits:
+        q = clamp(q, q_low, q_high);
+
+        loop = q != last_q;
+      } else if (recode_loop_test(cpi, frame_over_shoot_limit,
+                                  frame_under_shoot_limit, q,
+                                  VPXMAX(q_high, top_index), bottom_index)) {
+        // Is the projected frame size out of range and are we allowed
+        // to attempt to recode.
+        int last_q = q;
+        int retries = 0;
+
+        if (cpi->resize_pending == 1) {
+          // Change in frame size so go back around the recode loop.
+          cpi->rc.frame_size_selector =
+              SCALE_STEP1 - cpi->rc.frame_size_selector;
+          cpi->rc.next_frame_size_selector = cpi->rc.frame_size_selector;
+
+#if CONFIG_INTERNAL_STATS
+          ++cpi->tot_recode_hits;
+#endif
+          ++loop_count;
+          loop = 1;
+          continue;
+        }
+
+        // Frame size out of permitted range:
+        // Update correction factor & compute new Q to try...
+
+        // Frame is too large
+        if (rc->projected_frame_size > rc->this_frame_target) {
+          // Special case if the projected size is > the max allowed.
+          if (rc->projected_frame_size >= rc->max_frame_bandwidth)
+            q_high = rc->worst_quality;
+
+          // Raise Qlow as to at least the current value
+          q_low = q < q_high ? q + 1 : q_high;
+
+          if (undershoot_seen || loop_at_this_size > 1) {
+            // Update rate_correction_factor unless
+            vp10_rc_update_rate_correction_factors(cpi);
+
+            q = (q_high + q_low + 1) / 2;
+          } else {
+            // Update rate_correction_factor unless
+            vp10_rc_update_rate_correction_factors(cpi);
+
+            q = vp10_rc_regulate_q(cpi, rc->this_frame_target, bottom_index,
+                                   VPXMAX(q_high, top_index));
+
+            while (q < q_low && retries < 10) {
+              vp10_rc_update_rate_correction_factors(cpi);
+              q = vp10_rc_regulate_q(cpi, rc->this_frame_target, bottom_index,
+                                     VPXMAX(q_high, top_index));
+              retries++;
+            }
+          }
+
+          overshoot_seen = 1;
+        } else {
+          // Frame is too small
+          q_high = q > q_low ? q - 1 : q_low;
+
+          if (overshoot_seen || loop_at_this_size > 1) {
+            vp10_rc_update_rate_correction_factors(cpi);
+            q = (q_high + q_low) / 2;
+          } else {
+            vp10_rc_update_rate_correction_factors(cpi);
+            q = vp10_rc_regulate_q(cpi, rc->this_frame_target, bottom_index,
+                                   top_index);
+            // Special case reset for qlow for constrained quality.
+            // This should only trigger where there is very substantial
+            // undershoot on a frame and the auto cq level is above
+            // the user passsed in value.
+            if (cpi->oxcf.rc_mode == VPX_CQ && q < q_low) {
+              q_low = q;
+            }
+
+            while (q > q_high && retries < 10) {
+              vp10_rc_update_rate_correction_factors(cpi);
+              q = vp10_rc_regulate_q(cpi, rc->this_frame_target, bottom_index,
+                                     top_index);
+              retries++;
+            }
+          }
+
+          undershoot_seen = 1;
+        }
+
+        // Clamp Q to upper and lower limits:
+        q = clamp(q, q_low, q_high);
+
+        loop = (q != last_q);
+      } else {
+        loop = 0;
+      }
+    }
+
+    // Special case for overlay frame.
+    if (rc->is_src_frame_alt_ref &&
+        rc->projected_frame_size < rc->max_frame_bandwidth)
+      loop = 0;
+
+    if (loop) {
+      ++loop_count;
+      ++loop_at_this_size;
+
+#if CONFIG_INTERNAL_STATS
+      ++cpi->tot_recode_hits;
+#endif
+    }
+  } while (loop);
+}
+
+static int get_ref_frame_flags(const VP10_COMP *cpi) {
+  const int *const map = cpi->common.ref_frame_map;
+
+#if CONFIG_EXT_REFS
+  const int last2_is_last =
+      map[cpi->lst_fb_idxes[1]] == map[cpi->lst_fb_idxes[0]];
+  const int last3_is_last =
+      map[cpi->lst_fb_idxes[2]] == map[cpi->lst_fb_idxes[0]];
+  const int gld_is_last = map[cpi->gld_fb_idx] == map[cpi->lst_fb_idxes[0]];
+  const int bwd_is_last = map[cpi->bwd_fb_idx] == map[cpi->lst_fb_idxes[0]];
+  const int alt_is_last = map[cpi->alt_fb_idx] == map[cpi->lst_fb_idxes[0]];
+
+  const int last3_is_last2 =
+      map[cpi->lst_fb_idxes[2]] == map[cpi->lst_fb_idxes[1]];
+  const int gld_is_last2 = map[cpi->gld_fb_idx] == map[cpi->lst_fb_idxes[1]];
+  const int bwd_is_last2 = map[cpi->bwd_fb_idx] == map[cpi->lst_fb_idxes[1]];
+
+  const int gld_is_last3 = map[cpi->gld_fb_idx] == map[cpi->lst_fb_idxes[2]];
+  const int bwd_is_last3 = map[cpi->bwd_fb_idx] == map[cpi->lst_fb_idxes[2]];
+
+  const int bwd_is_gld = map[cpi->bwd_fb_idx] == map[cpi->gld_fb_idx];
+
+  const int last2_is_alt = map[cpi->lst_fb_idxes[1]] == map[cpi->alt_fb_idx];
+  const int last3_is_alt = map[cpi->lst_fb_idxes[2]] == map[cpi->alt_fb_idx];
+  const int gld_is_alt = map[cpi->gld_fb_idx] == map[cpi->alt_fb_idx];
+  const int bwd_is_alt = map[cpi->bwd_fb_idx] == map[cpi->alt_fb_idx];
+#else
+  const int gld_is_last = map[cpi->gld_fb_idx] == map[cpi->lst_fb_idx];
+  const int gld_is_alt = map[cpi->gld_fb_idx] == map[cpi->alt_fb_idx];
+  const int alt_is_last = map[cpi->alt_fb_idx] == map[cpi->lst_fb_idx];
+#endif  // CONFIG_EXT_REFS
+
+  int flags = VPX_REFFRAME_ALL;
+
+#if CONFIG_EXT_REFS
+  // Disable the use of BWDREF_FRAME for non-bipredictive frames.
+  if (!(cpi->rc.is_bipred_frame || cpi->rc.is_last_bipred_frame ||
+        (cpi->rc.is_bwd_ref_frame && cpi->num_extra_arfs)))
+    flags &= ~VPX_BWD_FLAG;
+#endif  // CONFIG_EXT_REFS
+
+  if (gld_is_last || gld_is_alt) flags &= ~VPX_GOLD_FLAG;
+
+  if (cpi->rc.frames_till_gf_update_due == INT_MAX) flags &= ~VPX_GOLD_FLAG;
+
+  if (alt_is_last) flags &= ~VPX_ALT_FLAG;
+
+#if CONFIG_EXT_REFS
+  if (last2_is_last || last2_is_alt) flags &= ~VPX_LAST2_FLAG;
+
+  if (last3_is_last || last3_is_last2 || last3_is_alt) flags &= ~VPX_LAST3_FLAG;
+
+  if (gld_is_last2 || gld_is_last3) flags &= ~VPX_GOLD_FLAG;
+
+  if ((bwd_is_last || bwd_is_last2 || bwd_is_last3 || bwd_is_gld ||
+       bwd_is_alt) &&
+      (flags & VPX_BWD_FLAG))
+    flags &= ~VPX_BWD_FLAG;
+#endif  // CONFIG_EXT_REFS
+
+  return flags;
+}
+
+static void set_ext_overrides(VP10_COMP *cpi) {
+  // Overrides the defaults with the externally supplied values with
+  // vp10_update_reference() and vp10_update_entropy() calls
+  // Note: The overrides are valid only for the next frame passed
+  // to encode_frame_to_data_rate() function
+  if (cpi->ext_refresh_frame_context_pending) {
+    cpi->common.refresh_frame_context = cpi->ext_refresh_frame_context;
+    cpi->ext_refresh_frame_context_pending = 0;
+  }
+  if (cpi->ext_refresh_frame_flags_pending) {
+    cpi->refresh_last_frame = cpi->ext_refresh_last_frame;
+    cpi->refresh_golden_frame = cpi->ext_refresh_golden_frame;
+    cpi->refresh_alt_ref_frame = cpi->ext_refresh_alt_ref_frame;
+    cpi->ext_refresh_frame_flags_pending = 0;
+  }
+}
+
+YV12_BUFFER_CONFIG *vp10_scale_if_required_fast(VP10_COMMON *cm,
+                                                YV12_BUFFER_CONFIG *unscaled,
+                                                YV12_BUFFER_CONFIG *scaled) {
+  if (cm->mi_cols * MI_SIZE != unscaled->y_width ||
+      cm->mi_rows * MI_SIZE != unscaled->y_height) {
+    // For 2x2 scaling down.
+    vpx_scale_frame(unscaled, scaled, unscaled->y_buffer, 9, 2, 1, 2, 1, 0);
+    vpx_extend_frame_borders(scaled);
+    return scaled;
+  } else {
+    return unscaled;
+  }
+}
+
+YV12_BUFFER_CONFIG *vp10_scale_if_required(VP10_COMMON *cm,
+                                           YV12_BUFFER_CONFIG *unscaled,
+                                           YV12_BUFFER_CONFIG *scaled) {
+  if (cm->mi_cols * MI_SIZE != unscaled->y_width ||
+      cm->mi_rows * MI_SIZE != unscaled->y_height) {
+#if CONFIG_VP9_HIGHBITDEPTH
+    scale_and_extend_frame_nonnormative(unscaled, scaled, (int)cm->bit_depth);
+#else
+    scale_and_extend_frame_nonnormative(unscaled, scaled);
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+    return scaled;
+  } else {
+    return unscaled;
+  }
+}
+
+static void set_arf_sign_bias(VP10_COMP *cpi) {
+  VP10_COMMON *const cm = &cpi->common;
+  int arf_sign_bias;
+#if CONFIG_EXT_REFS
+  const GF_GROUP *const gf_group = &cpi->twopass.gf_group;
+  // The arf_sign_bias will be one for internal ARFs'
+  arf_sign_bias = cpi->rc.source_alt_ref_active &&
+                  (!cpi->refresh_alt_ref_frame ||
+                   (gf_group->rf_level[gf_group->index] == GF_ARF_LOW));
+#else
+  if ((cpi->oxcf.pass == 2) && cpi->multi_arf_allowed) {
+    const GF_GROUP *const gf_group = &cpi->twopass.gf_group;
+    arf_sign_bias = cpi->rc.source_alt_ref_active &&
+                    (!cpi->refresh_alt_ref_frame ||
+                     (gf_group->rf_level[gf_group->index] == GF_ARF_LOW));
+  } else {
+    arf_sign_bias =
+        (cpi->rc.source_alt_ref_active && !cpi->refresh_alt_ref_frame);
+  }
+#endif
+  cm->ref_frame_sign_bias[ALTREF_FRAME] = arf_sign_bias;
+#if CONFIG_EXT_REFS
+  cm->ref_frame_sign_bias[BWDREF_FRAME] = cm->ref_frame_sign_bias[ALTREF_FRAME];
+#endif  // CONFIG_EXT_REFS
+}
+
+static int setup_interp_filter_search_mask(VP10_COMP *cpi) {
+  INTERP_FILTER ifilter;
+  int ref_total[TOTAL_REFS_PER_FRAME] = { 0 };
+  MV_REFERENCE_FRAME ref;
+  int mask = 0;
+  int arf_idx = ALTREF_FRAME;
+
+#if CONFIG_EXT_REFS
+  // Get which arf used as ALTREF_FRAME
+  if (cpi->oxcf.pass == 2)
+    arf_idx += cpi->twopass.gf_group.arf_ref_idx[cpi->twopass.gf_group.index];
+#endif
+
+  if (cpi->common.last_frame_type == KEY_FRAME || cpi->refresh_alt_ref_frame)
+    return mask;
+
+#if CONFIG_EXT_REFS
+  for (ref = LAST_FRAME; ref < ALTREF_FRAME; ++ref)
+    for (ifilter = EIGHTTAP_REGULAR; ifilter < SWITCHABLE_FILTERS; ++ifilter)
+      ref_total[ref] += cpi->interp_filter_selected[ref][ifilter];
+
+  for (ifilter = EIGHTTAP_REGULAR; ifilter < SWITCHABLE_FILTERS; ++ifilter)
+    ref_total[ref] += cpi->interp_filter_selected[arf_idx][ifilter];
+#else
+  for (ref = LAST_FRAME; ref <= ALTREF_FRAME; ++ref)
+    for (ifilter = EIGHTTAP_REGULAR; ifilter < SWITCHABLE_FILTERS; ++ifilter)
+      ref_total[ref] += cpi->interp_filter_selected[ref][ifilter];
+#endif
+
+  for (ifilter = EIGHTTAP_REGULAR; ifilter < SWITCHABLE_FILTERS; ++ifilter) {
+    if ((ref_total[LAST_FRAME] &&
+         cpi->interp_filter_selected[LAST_FRAME][ifilter] == 0) &&
+#if CONFIG_EXT_REFS
+        (ref_total[LAST2_FRAME] == 0 ||
+         cpi->interp_filter_selected[LAST2_FRAME][ifilter] * 50 <
+             ref_total[LAST2_FRAME]) &&
+        (ref_total[LAST3_FRAME] == 0 ||
+         cpi->interp_filter_selected[LAST3_FRAME][ifilter] * 50 <
+             ref_total[LAST3_FRAME]) &&
+#endif  // CONFIG_EXT_REFS
+        (ref_total[GOLDEN_FRAME] == 0 ||
+         cpi->interp_filter_selected[GOLDEN_FRAME][ifilter] * 50 <
+             ref_total[GOLDEN_FRAME]) &&
+#if CONFIG_EXT_REFS
+        (ref_total[BWDREF_FRAME] == 0 ||
+         cpi->interp_filter_selected[BWDREF_FRAME][ifilter] * 50 <
+             ref_total[BWDREF_FRAME]) &&
+#endif  // CONFIG_EXT_REFS
+        (ref_total[ALTREF_FRAME] == 0 ||
+         cpi->interp_filter_selected[arf_idx][ifilter] * 50 <
+             ref_total[ALTREF_FRAME]))
+      mask |= 1 << ifilter;
+  }
+  return mask;
+}
+
+#define DUMP_RECON_FRAMES 0
+
+#if DUMP_RECON_FRAMES == 1
+// NOTE(zoeliu): For debug - Output the filtered reconstructed video.
+static void dump_filtered_recon_frames(VP10_COMP *cpi) {
+  VP10_COMMON *const cm = &cpi->common;
+  const YV12_BUFFER_CONFIG *recon_buf = cm->frame_to_show;
+  int h;
+  char file_name[256] = "/tmp/enc_filtered_recon.yuv";
+  FILE *f_recon = NULL;
+
+  if (recon_buf == NULL || !cm->show_frame) {
+    printf("Frame %d is not ready or no show to dump.\n",
+           cm->current_video_frame);
+    return;
+  }
+
+  if (cm->current_video_frame == 0) {
+    if ((f_recon = fopen(file_name, "wb")) == NULL) {
+      printf("Unable to open file %s to write.\n", file_name);
+      return;
+    }
+  } else {
+    if ((f_recon = fopen(file_name, "ab")) == NULL) {
+      printf("Unable to open file %s to append.\n", file_name);
+      return;
+    }
+  }
+  printf(
+      "\nFrame=%5d, encode_update_type[%5d]=%1d, show_existing_frame=%d, "
+      "y_stride=%4d, uv_stride=%4d, width=%4d, height=%4d\n",
+      cm->current_video_frame, cpi->twopass.gf_group.index,
+      cpi->twopass.gf_group.update_type[cpi->twopass.gf_group.index],
+      cm->show_existing_frame, recon_buf->y_stride, recon_buf->uv_stride,
+      cm->width, cm->height);
+
+  // --- Y ---
+  for (h = 0; h < cm->height; ++h) {
+    fwrite(&recon_buf->y_buffer[h * recon_buf->y_stride], 1, cm->width,
+           f_recon);
+  }
+  // --- U ---
+  for (h = 0; h < (cm->height >> 1); ++h) {
+    fwrite(&recon_buf->u_buffer[h * recon_buf->uv_stride], 1, (cm->width >> 1),
+           f_recon);
+  }
+  // --- V ---
+  for (h = 0; h < (cm->height >> 1); ++h) {
+    fwrite(&recon_buf->v_buffer[h * recon_buf->uv_stride], 1, (cm->width >> 1),
+           f_recon);
+  }
+
+  fclose(f_recon);
+}
+#endif  // DUMP_RECON_FRAMES
+
+static void encode_frame_to_data_rate(VP10_COMP *cpi, size_t *size,
+                                      uint8_t *dest,
+                                      unsigned int *frame_flags) {
+  VP10_COMMON *const cm = &cpi->common;
+  const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+  struct segmentation *const seg = &cm->seg;
+  TX_SIZE t;
+  set_ext_overrides(cpi);
+  vpx_clear_system_state();
+
+  // Set the arf sign bias for this frame.
+  set_arf_sign_bias(cpi);
+
+#if CONFIG_EXT_REFS
+  // NOTE:
+  // (1) Move the setup of the ref_frame_flags upfront as it would be
+  //     determined by the current frame properties;
+  // (2) The setup of the ref_frame_flags applies to both show_existing_frame's
+  //     and the other cases.
+  if (cm->current_video_frame > 0)
+    cpi->ref_frame_flags = get_ref_frame_flags(cpi);
+
+  if (cm->show_existing_frame) {
+    // NOTE(zoeliu): In BIDIR_PRED, the existing frame to show is the current
+    //               BWDREF_FRAME in the reference frame buffer.
+    cm->frame_type = INTER_FRAME;
+    cm->show_frame = 1;
+    cpi->frame_flags = *frame_flags;
+
+    // In the case of show_existing frame, we will not send fresh flag
+    // to decoder. Any change in the reference frame buffer can be done by
+    // switching the virtual indices.
+
+    cpi->refresh_last_frame = 0;
+    cpi->refresh_golden_frame = 0;
+    cpi->refresh_bwd_ref_frame = 0;
+    cpi->refresh_alt_ref_frame = 0;
+
+    cpi->rc.is_bwd_ref_frame = 0;
+    cpi->rc.is_last_bipred_frame = 0;
+    cpi->rc.is_bipred_frame = 0;
+
+    // Build the bitstream
+    vp10_pack_bitstream(cpi, dest, size);
+
+    // Set up frame to show to get ready for stats collection.
+    cm->frame_to_show = get_frame_new_buffer(cm);
+
+#if DUMP_RECON_FRAMES == 1
+    // NOTE(zoeliu): For debug - Output the filtered reconstructed video.
+    dump_filtered_recon_frames(cpi);
+#endif  // DUMP_RECON_FRAMES
+
+    // Update the LAST_FRAME in the reference frame buffer.
+    vp10_update_reference_frames(cpi);
+
+    // Update frame flags
+    cpi->frame_flags &= ~FRAMEFLAGS_GOLDEN;
+    cpi->frame_flags &= ~FRAMEFLAGS_BWDREF;
+    cpi->frame_flags &= ~FRAMEFLAGS_ALTREF;
+
+    *frame_flags = cpi->frame_flags & ~FRAMEFLAGS_KEY;
+
+    // Update the frame type
+    cm->last_frame_type = cm->frame_type;
+
+#if CONFIG_EXT_REFS
+    // Since we allocate a spot for the OVERLAY frame in the gf group, we need
+    // to do post-encoding update accordingly.
+    if (cpi->rc.is_src_frame_alt_ref) {
+      vp10_set_target_rate(cpi);
+      vp10_rc_postencode_update(cpi, *size);
+    }
+#endif
+
+    cm->last_width = cm->width;
+    cm->last_height = cm->height;
+
+    ++cm->current_video_frame;
+
+    return;
+  }
+#endif  // CONFIG_EXT_REFS
+
+  // Set default state for segment based loop filter update flags.
+  cm->lf.mode_ref_delta_update = 0;
+
+  if (cpi->oxcf.pass == 2 && cpi->sf.adaptive_interp_filter_search)
+    cpi->sf.interp_filter_search_mask = setup_interp_filter_search_mask(cpi);
+
+  // Set various flags etc to special state if it is a key frame.
+  if (frame_is_intra_only(cm)) {
+    // Reset the loop filter deltas and segmentation map.
+    vp10_reset_segment_features(cm);
+
+    // If segmentation is enabled force a map update for key frames.
+    if (seg->enabled) {
+      seg->update_map = 1;
+      seg->update_data = 1;
+    }
+
+    // The alternate reference frame cannot be active for a key frame.
+    cpi->rc.source_alt_ref_active = 0;
+
+    cm->error_resilient_mode = oxcf->error_resilient_mode;
+
+    // By default, encoder assumes decoder can use prev_mi.
+    if (cm->error_resilient_mode) {
+      cm->reset_frame_context = RESET_FRAME_CONTEXT_NONE;
+      cm->refresh_frame_context = REFRESH_FRAME_CONTEXT_FORWARD;
+    } else if (cm->intra_only) {
+      // Only reset the current context.
+      cm->reset_frame_context = RESET_FRAME_CONTEXT_CURRENT;
+    }
+  }
+
+  // For 1 pass CBR, check if we are dropping this frame.
+  // Never drop on key frame.
+  if (oxcf->pass == 0 && oxcf->rc_mode == VPX_CBR &&
+      cm->frame_type != KEY_FRAME) {
+    if (vp10_rc_drop_frame(cpi)) {
+      vp10_rc_postencode_update_drop_frame(cpi);
+      ++cm->current_video_frame;
+      return;
+    }
+  }
+
+  vpx_clear_system_state();
+
+#if CONFIG_INTERNAL_STATS
+  memset(cpi->mode_chosen_counts, 0,
+         MAX_MODES * sizeof(*cpi->mode_chosen_counts));
+#endif
+
+  if (cpi->sf.recode_loop == DISALLOW_RECODE) {
+    encode_without_recode_loop(cpi);
+  } else {
+    encode_with_recode_loop(cpi, size, dest);
+  }
+
+#ifdef OUTPUT_YUV_SKINMAP
+  if (cpi->common.current_video_frame > 1) {
+    vp10_compute_skin_map(cpi, yuv_skinmap_file);
+  }
+#endif  // OUTPUT_YUV_SKINMAP
+
+  // Special case code to reduce pulsing when key frames are forced at a
+  // fixed interval. Note the reconstruction error if it is the frame before
+  // the force key frame
+  if (cpi->rc.next_key_frame_forced && cpi->rc.frames_to_key == 1) {
+#if CONFIG_VP9_HIGHBITDEPTH
+    if (cm->use_highbitdepth) {
+      cpi->ambient_err =
+          vpx_highbd_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
+    } else {
+      cpi->ambient_err = vpx_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
+    }
+#else
+    cpi->ambient_err = vpx_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+  }
+
+  // If the encoder forced a KEY_FRAME decision
+  if (cm->frame_type == KEY_FRAME) {
+    cpi->refresh_last_frame = 1;
+  }
+
+  cm->frame_to_show = get_frame_new_buffer(cm);
+  cm->frame_to_show->color_space = cm->color_space;
+  cm->frame_to_show->color_range = cm->color_range;
+  cm->frame_to_show->render_width = cm->render_width;
+  cm->frame_to_show->render_height = cm->render_height;
+
+#if CONFIG_EXT_REFS
+// TODO(zoeliu): For non-ref frames, loop filtering may need to be turned
+// off.
+#endif  // CONFIG_EXT_REFS
+
+  // Pick the loop filter level for the frame.
+  loopfilter_frame(cpi, cm);
+
+  // Build the bitstream
+  vp10_pack_bitstream(cpi, dest, size);
+
+#if DUMP_RECON_FRAMES == 1
+  // NOTE(zoeliu): For debug - Output the filtered reconstructed video.
+  if (cm->show_frame) dump_filtered_recon_frames(cpi);
+#endif  // DUMP_RECON_FRAMES
+
+  if (cm->seg.update_map) update_reference_segmentation_map(cpi);
+
+  if (frame_is_intra_only(cm) == 0) {
+    release_scaled_references(cpi);
+  }
+
+  vp10_update_reference_frames(cpi);
+
+  for (t = TX_4X4; t <= TX_32X32; t++)
+    vp10_full_to_model_counts(cpi->td.counts->coef[t],
+                              cpi->td.rd_counts.coef_counts[t]);
+
+  if (cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD) {
+#if CONFIG_ENTROPY
+    cm->partial_prob_update = 0;
+#endif  // CONFIG_ENTROPY
+    vp10_adapt_coef_probs(cm);
+    vp10_adapt_intra_frame_probs(cm);
+  }
+
+  if (!frame_is_intra_only(cm)) {
+    if (cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD) {
+      vp10_adapt_inter_frame_probs(cm);
+      vp10_adapt_mv_probs(cm, cm->allow_high_precision_mv);
+    }
+  }
+
+  if (cpi->refresh_golden_frame == 1)
+    cpi->frame_flags |= FRAMEFLAGS_GOLDEN;
+  else
+    cpi->frame_flags &= ~FRAMEFLAGS_GOLDEN;
+
+  if (cpi->refresh_alt_ref_frame == 1)
+    cpi->frame_flags |= FRAMEFLAGS_ALTREF;
+  else
+    cpi->frame_flags &= ~FRAMEFLAGS_ALTREF;
+
+#if CONFIG_EXT_REFS
+  if (cpi->refresh_bwd_ref_frame == 1)
+    cpi->frame_flags |= FRAMEFLAGS_BWDREF;
+  else
+    cpi->frame_flags &= ~FRAMEFLAGS_BWDREF;
+#endif  // CONFIG_EXT_REFS
+
+#if !CONFIG_EXT_REFS
+  cpi->ref_frame_flags = get_ref_frame_flags(cpi);
+#endif  // !CONFIG_EXT_REFS
+
+#if CONFIG_EXT_REFS
+  cm->last3_frame_type = cm->last2_frame_type;
+  cm->last2_frame_type = cm->last_frame_type;
+#endif  // CONFIG_EXT_REFS
+  cm->last_frame_type = cm->frame_type;
+
+  vp10_rc_postencode_update(cpi, *size);
+
+#if 0
+  output_frame_level_debug_stats(cpi);
+#endif
+
+  if (cm->frame_type == KEY_FRAME) {
+    // Tell the caller that the frame was coded as a key frame
+    *frame_flags = cpi->frame_flags | FRAMEFLAGS_KEY;
+  } else {
+    *frame_flags = cpi->frame_flags & ~FRAMEFLAGS_KEY;
+  }
+
+  // Clear the one shot update flags for segmentation map and mode/ref loop
+  // filter deltas.
+  cm->seg.update_map = 0;
+  cm->seg.update_data = 0;
+  cm->lf.mode_ref_delta_update = 0;
+
+  // keep track of the last coded dimensions
+  cm->last_width = cm->width;
+  cm->last_height = cm->height;
+
+  // reset to normal state now that we are done.
+  if (!cm->show_existing_frame) cm->last_show_frame = cm->show_frame;
+
+  if (cm->show_frame) {
+#if CONFIG_EXT_REFS
+// TODO(zoeliu): We may only swamp mi and prev_mi for those frames that are
+// being used as reference.
+#endif  // CONFIG_EXT_REFS
+    vp10_swap_mi_and_prev_mi(cm);
+    // Don't increment frame counters if this was an altref buffer
+    // update not a real frame
+    ++cm->current_video_frame;
+  }
+
+#if CONFIG_EXT_REFS
+  // NOTE: Shall not refer to any frame not used as reference.
+  if (cm->is_reference_frame)
+#endif  // CONFIG_EXT_REFS
+    cm->prev_frame = cm->cur_frame;
+}
+
+static void Pass0Encode(VP10_COMP *cpi, size_t *size, uint8_t *dest,
+                        unsigned int *frame_flags) {
+  if (cpi->oxcf.rc_mode == VPX_CBR) {
+    vp10_rc_get_one_pass_cbr_params(cpi);
+  } else {
+    vp10_rc_get_one_pass_vbr_params(cpi);
+  }
+  encode_frame_to_data_rate(cpi, size, dest, frame_flags);
+}
+
+static void Pass2Encode(VP10_COMP *cpi, size_t *size, uint8_t *dest,
+                        unsigned int *frame_flags) {
+  cpi->allow_encode_breakout = ENCODE_BREAKOUT_ENABLED;
+
+  encode_frame_to_data_rate(cpi, size, dest, frame_flags);
+
+#if CONFIG_EXT_REFS
+  // Do not do post-encoding update for those frames that do not have a spot in
+  // a gf group, but note that an OVERLAY frame always has a spot in a gf group,
+  // even when show_existing_frame is used.
+  if (!cpi->common.show_existing_frame || cpi->rc.is_src_frame_alt_ref) {
+    vp10_twopass_postencode_update(cpi);
+  }
+  check_show_existing_frame(cpi);
+#else
+  vp10_twopass_postencode_update(cpi);
+#endif  // CONFIG_EXT_REFS
+}
+
+static void init_ref_frame_bufs(VP10_COMMON *cm) {
+  int i;
+  BufferPool *const pool = cm->buffer_pool;
+  cm->new_fb_idx = INVALID_IDX;
+  for (i = 0; i < REF_FRAMES; ++i) {
+    cm->ref_frame_map[i] = INVALID_IDX;
+    pool->frame_bufs[i].ref_count = 0;
+  }
+}
+
+static void check_initial_width(VP10_COMP *cpi,
+#if CONFIG_VP9_HIGHBITDEPTH
+                                int use_highbitdepth,
+#endif
+                                int subsampling_x, int subsampling_y) {
+  VP10_COMMON *const cm = &cpi->common;
+
+  if (!cpi->initial_width ||
+#if CONFIG_VP9_HIGHBITDEPTH
+      cm->use_highbitdepth != use_highbitdepth ||
+#endif
+      cm->subsampling_x != subsampling_x ||
+      cm->subsampling_y != subsampling_y) {
+    cm->subsampling_x = subsampling_x;
+    cm->subsampling_y = subsampling_y;
+#if CONFIG_VP9_HIGHBITDEPTH
+    cm->use_highbitdepth = use_highbitdepth;
+#endif
+
+    alloc_raw_frame_buffers(cpi);
+    init_ref_frame_bufs(cm);
+    alloc_util_frame_buffers(cpi);
+
+    init_motion_estimation(cpi);  // TODO(agrange) This can be removed.
+
+    cpi->initial_width = cm->width;
+    cpi->initial_height = cm->height;
+    cpi->initial_mbs = cm->MBs;
+  }
+}
+
+int vp10_receive_raw_frame(VP10_COMP *cpi, unsigned int frame_flags,
+                           YV12_BUFFER_CONFIG *sd, int64_t time_stamp,
+                           int64_t end_time) {
+  VP10_COMMON *const cm = &cpi->common;
+  struct vpx_usec_timer timer;
+  int res = 0;
+  const int subsampling_x = sd->subsampling_x;
+  const int subsampling_y = sd->subsampling_y;
+#if CONFIG_VP9_HIGHBITDEPTH
+  const int use_highbitdepth = (sd->flags & YV12_FLAG_HIGHBITDEPTH) != 0;
+#endif
+
+#if CONFIG_VP9_HIGHBITDEPTH
+  check_initial_width(cpi, use_highbitdepth, subsampling_x, subsampling_y);
+#else
+  check_initial_width(cpi, subsampling_x, subsampling_y);
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+  vpx_usec_timer_start(&timer);
+
+  if (vp10_lookahead_push(cpi->lookahead, sd, time_stamp, end_time,
+#if CONFIG_VP9_HIGHBITDEPTH
+                          use_highbitdepth,
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+                          frame_flags))
+    res = -1;
+  vpx_usec_timer_mark(&timer);
+  cpi->time_receive_data += vpx_usec_timer_elapsed(&timer);
+
+  if ((cm->profile == PROFILE_0 || cm->profile == PROFILE_2) &&
+      (subsampling_x != 1 || subsampling_y != 1)) {
+    vpx_internal_error(&cm->error, VPX_CODEC_INVALID_PARAM,
+                       "Non-4:2:0 color format requires profile 1 or 3");
+    res = -1;
+  }
+  if ((cm->profile == PROFILE_1 || cm->profile == PROFILE_3) &&
+      (subsampling_x == 1 && subsampling_y == 1)) {
+    vpx_internal_error(&cm->error, VPX_CODEC_INVALID_PARAM,
+                       "4:2:0 color format requires profile 0 or 2");
+    res = -1;
+  }
+
+  return res;
+}
+
+static int frame_is_reference(const VP10_COMP *cpi) {
+  const VP10_COMMON *cm = &cpi->common;
+
+  return cm->frame_type == KEY_FRAME || cpi->refresh_last_frame ||
+         cpi->refresh_golden_frame ||
+#if CONFIG_EXT_REFS
+         cpi->refresh_bwd_ref_frame ||
+#endif  // CONFIG_EXT_REFS
+         cpi->refresh_alt_ref_frame || !cm->error_resilient_mode ||
+         cm->lf.mode_ref_delta_update || cm->seg.update_map ||
+         cm->seg.update_data;
+}
+
+static void adjust_frame_rate(VP10_COMP *cpi,
+                              const struct lookahead_entry *source) {
+  int64_t this_duration;
+  int step = 0;
+
+  if (source->ts_start == cpi->first_time_stamp_ever) {
+    this_duration = source->ts_end - source->ts_start;
+    step = 1;
+  } else {
+    int64_t last_duration =
+        cpi->last_end_time_stamp_seen - cpi->last_time_stamp_seen;
+
+    this_duration = source->ts_end - cpi->last_end_time_stamp_seen;
+
+    // do a step update if the duration changes by 10%
+    if (last_duration)
+      step = (int)((this_duration - last_duration) * 10 / last_duration);
+  }
+
+  if (this_duration) {
+    if (step) {
+      vp10_new_framerate(cpi, 10000000.0 / this_duration);
+    } else {
+      // Average this frame's rate into the last second's average
+      // frame rate. If we haven't seen 1 second yet, then average
+      // over the whole interval seen.
+      const double interval = VPXMIN(
+          (double)(source->ts_end - cpi->first_time_stamp_ever), 10000000.0);
+      double avg_duration = 10000000.0 / cpi->framerate;
+      avg_duration *= (interval - avg_duration + this_duration);
+      avg_duration /= interval;
+
+      vp10_new_framerate(cpi, 10000000.0 / avg_duration);
+    }
+  }
+  cpi->last_time_stamp_seen = source->ts_start;
+  cpi->last_end_time_stamp_seen = source->ts_end;
+}
+
+// Returns 0 if this is not an alt ref else the offset of the source frame
+// used as the arf midpoint.
+static int get_arf_src_index(VP10_COMP *cpi) {
+  RATE_CONTROL *const rc = &cpi->rc;
+  int arf_src_index = 0;
+  if (is_altref_enabled(cpi)) {
+    if (cpi->oxcf.pass == 2) {
+      const GF_GROUP *const gf_group = &cpi->twopass.gf_group;
+      if (gf_group->update_type[gf_group->index] == ARF_UPDATE) {
+        arf_src_index = gf_group->arf_src_offset[gf_group->index];
+      }
+    } else if (rc->source_alt_ref_pending) {
+      arf_src_index = rc->frames_till_gf_update_due;
+    }
+  }
+  return arf_src_index;
+}
+
+#if CONFIG_EXT_REFS
+static int get_brf_src_index(VP10_COMP *cpi) {
+  int brf_src_index = 0;
+  const GF_GROUP *const gf_group = &cpi->twopass.gf_group;
+
+  // TODO(zoeliu): We need to add the check on the -bwd_ref command line setup
+  //               flag.
+  if (gf_group->bidir_pred_enabled[gf_group->index]) {
+    if (cpi->oxcf.pass == 2) {
+      if (gf_group->update_type[gf_group->index] == BRF_UPDATE)
+        brf_src_index = gf_group->brf_src_offset[gf_group->index];
+    } else {
+      // TODO(zoeliu): To re-visit the setup for this scenario
+      brf_src_index = cpi->rc.bipred_group_interval - 1;
+    }
+  }
+
+  return brf_src_index;
+}
+#endif  // CONFIG_EXT_REFS
+
+static void check_src_altref(VP10_COMP *cpi,
+                             const struct lookahead_entry *source) {
+  RATE_CONTROL *const rc = &cpi->rc;
+
+  // If pass == 2, the parameters set here will be reset in
+  // vp10_rc_get_second_pass_params()
+
+  if (cpi->oxcf.pass == 2) {
+    const GF_GROUP *const gf_group = &cpi->twopass.gf_group;
+    rc->is_src_frame_alt_ref =
+#if CONFIG_EXT_REFS
+        (gf_group->update_type[gf_group->index] == INTNL_OVERLAY_UPDATE) ||
+#endif
+        (gf_group->update_type[gf_group->index] == OVERLAY_UPDATE);
+  } else {
+    rc->is_src_frame_alt_ref =
+        cpi->alt_ref_source && (source == cpi->alt_ref_source);
+  }
+
+  if (rc->is_src_frame_alt_ref) {
+    // Current frame is an ARF overlay frame.
+    cpi->alt_ref_source = NULL;
+
+    // Don't refresh the last buffer for an ARF overlay frame. It will
+    // become the GF so preserve last as an alternative prediction option.
+    cpi->refresh_last_frame = 0;
+  }
+}
+
+#if CONFIG_INTERNAL_STATS
+extern double vp10_get_blockiness(const unsigned char *img1, int img1_pitch,
+                                  const unsigned char *img2, int img2_pitch,
+                                  int width, int height);
+
+static void adjust_image_stat(double y, double u, double v, double all,
+                              ImageStat *s) {
+  s->stat[Y] += y;
+  s->stat[U] += u;
+  s->stat[V] += v;
+  s->stat[ALL] += all;
+  s->worst = VPXMIN(s->worst, all);
+}
+
+static void compute_internal_stats(VP10_COMP *cpi) {
+  VP10_COMMON *const cm = &cpi->common;
+  double samples = 0.0;
+  uint32_t in_bit_depth = 8;
+  uint32_t bit_depth = 8;
+
+#if CONFIG_VP9_HIGHBITDEPTH
+  if (cm->use_highbitdepth) {
+    in_bit_depth = cpi->oxcf.input_bit_depth;
+    bit_depth = cm->bit_depth;
+  }
+#endif
+  if (cm->show_frame) {
+    const YV12_BUFFER_CONFIG *orig = cpi->Source;
+    const YV12_BUFFER_CONFIG *recon = cpi->common.frame_to_show;
+    double y, u, v, frame_all;
+
+    cpi->count++;
+    if (cpi->b_calculate_psnr) {
+      PSNR_STATS psnr;
+      double frame_ssim2 = 0.0, weight = 0.0;
+      vpx_clear_system_state();
+// TODO(yaowu): unify these two versions into one.
+#if CONFIG_VP9_HIGHBITDEPTH
+      vpx_calc_highbd_psnr(orig, recon, &psnr, bit_depth, in_bit_depth);
+#else
+      vpx_calc_psnr(orig, recon, &psnr);
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+      adjust_image_stat(psnr.psnr[1], psnr.psnr[2], psnr.psnr[3], psnr.psnr[0],
+                        &cpi->psnr);
+      cpi->total_sq_error += psnr.sse[0];
+      cpi->total_samples += psnr.samples[0];
+      samples = psnr.samples[0];
+// TODO(yaowu): unify these two versions into one.
+#if CONFIG_VP9_HIGHBITDEPTH
+      if (cm->use_highbitdepth)
+        frame_ssim2 =
+            vpx_highbd_calc_ssim(orig, recon, &weight, bit_depth, in_bit_depth);
+      else
+        frame_ssim2 = vpx_calc_ssim(orig, recon, &weight);
+#else
+      frame_ssim2 = vpx_calc_ssim(orig, recon, &weight);
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+      cpi->worst_ssim = VPXMIN(cpi->worst_ssim, frame_ssim2);
+      cpi->summed_quality += frame_ssim2 * weight;
+      cpi->summed_weights += weight;
+
+#if 0
+      {
+        FILE *f = fopen("q_used.stt", "a");
+        fprintf(f, "%5d : Y%f7.3:U%f7.3:V%f7.3:F%f7.3:S%7.3f\n",
+                cpi->common.current_video_frame, y2, u2, v2,
+                frame_psnr2, frame_ssim2);
+        fclose(f);
+      }
+#endif
+    }
+    if (cpi->b_calculate_blockiness) {
+#if CONFIG_VP9_HIGHBITDEPTH
+      if (!cm->use_highbitdepth)
+#endif
+      {
+        const double frame_blockiness =
+            vp10_get_blockiness(orig->y_buffer, orig->y_stride, recon->y_buffer,
+                                recon->y_stride, orig->y_width, orig->y_height);
+        cpi->worst_blockiness = VPXMAX(cpi->worst_blockiness, frame_blockiness);
+        cpi->total_blockiness += frame_blockiness;
+      }
+
+      if (cpi->b_calculate_consistency) {
+#if CONFIG_VP9_HIGHBITDEPTH
+        if (!cm->use_highbitdepth)
+#endif
+        {
+          const double this_inconsistency = vpx_get_ssim_metrics(
+              orig->y_buffer, orig->y_stride, recon->y_buffer, recon->y_stride,
+              orig->y_width, orig->y_height, cpi->ssim_vars, &cpi->metrics, 1);
+
+          const double peak = (double)((1 << in_bit_depth) - 1);
+          const double consistency =
+              vpx_sse_to_psnr(samples, peak, cpi->total_inconsistency);
+          if (consistency > 0.0)
+            cpi->worst_consistency =
+                VPXMIN(cpi->worst_consistency, consistency);
+          cpi->total_inconsistency += this_inconsistency;
+        }
+      }
+    }
+
+    frame_all =
+        vpx_calc_fastssim(orig, recon, &y, &u, &v, bit_depth, in_bit_depth);
+    adjust_image_stat(y, u, v, frame_all, &cpi->fastssim);
+    frame_all = vpx_psnrhvs(orig, recon, &y, &u, &v, bit_depth, in_bit_depth);
+    adjust_image_stat(y, u, v, frame_all, &cpi->psnrhvs);
+  }
+}
+#endif  // CONFIG_INTERNAL_STATS
+
+int vp10_get_compressed_data(VP10_COMP *cpi, unsigned int *frame_flags,
+                             size_t *size, uint8_t *dest, int64_t *time_stamp,
+                             int64_t *time_end, int flush) {
+  const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+  VP10_COMMON *const cm = &cpi->common;
+  BufferPool *const pool = cm->buffer_pool;
+  RATE_CONTROL *const rc = &cpi->rc;
+  struct vpx_usec_timer cmptimer;
+  YV12_BUFFER_CONFIG *force_src_buffer = NULL;
+  struct lookahead_entry *last_source = NULL;
+  struct lookahead_entry *source = NULL;
+  int arf_src_index;
+#if CONFIG_EXT_REFS
+  int brf_src_index;
+#endif  // CONFIG_EXT_REFS
+  int i;
+
+#if CONFIG_BITSTREAM_DEBUG
+  assert(cpi->oxcf.max_threads == 0 &&
+         "bitstream debug tool does not support multithreading");
+  bitstream_queue_record_write();
+#endif
+
+  vpx_usec_timer_start(&cmptimer);
+
+  vp10_set_high_precision_mv(cpi, ALTREF_HIGH_PRECISION_MV);
+
+  // Is multi-arf enabled.
+  // Note that at the moment multi_arf is only configured for 2 pass VBR
+  if ((oxcf->pass == 2) && (cpi->oxcf.enable_auto_arf > 1))
+    cpi->multi_arf_allowed = 1;
+  else
+    cpi->multi_arf_allowed = 0;
+
+  // Normal defaults
+  cm->reset_frame_context = RESET_FRAME_CONTEXT_NONE;
+  cm->refresh_frame_context =
+      (oxcf->error_resilient_mode || oxcf->frame_parallel_decoding_mode)
+          ? REFRESH_FRAME_CONTEXT_FORWARD
+          : REFRESH_FRAME_CONTEXT_BACKWARD;
+
+  cpi->refresh_last_frame = 1;
+  cpi->refresh_golden_frame = 0;
+#if CONFIG_EXT_REFS
+  cpi->refresh_bwd_ref_frame = 0;
+#endif  // CONFIG_EXT_REFS
+  cpi->refresh_alt_ref_frame = 0;
+
+#if CONFIG_EXT_REFS
+  if (oxcf->pass == 2 && cm->show_existing_frame) {
+    // Manage the source buffer and flush out the source frame that has been
+    // coded already; Also get prepared for PSNR calculation if needed.
+    if ((source = vp10_lookahead_pop(cpi->lookahead, flush)) == NULL) {
+      *size = 0;
+      return -1;
+    }
+    cpi->Source = &source->img;
+    // TODO(zoeliu): To track down to determine whether it's needed to adjust
+    // the frame rate.
+    *time_stamp = source->ts_start;
+    *time_end = source->ts_end;
+
+    // We need to adjust frame rate for an overlay frame
+    if (cpi->rc.is_src_frame_alt_ref) {
+      adjust_frame_rate(cpi, source);
+    }
+
+    // Find a free buffer for the new frame, releasing the reference previously
+    // held.
+    if (cm->new_fb_idx != INVALID_IDX) {
+      --pool->frame_bufs[cm->new_fb_idx].ref_count;
+    }
+    cm->new_fb_idx = get_free_fb(cm);
+
+    if (cm->new_fb_idx == INVALID_IDX) return -1;
+
+    // Clear down mmx registers
+    vpx_clear_system_state();
+
+    // Start with a 0 size frame.
+    *size = 0;
+
+    // We need to update the gf_group for show_existing overlay frame
+    if (cpi->rc.is_src_frame_alt_ref) {
+      vp10_rc_get_second_pass_params(cpi);
+    }
+
+    Pass2Encode(cpi, size, dest, frame_flags);
+
+    if (cpi->b_calculate_psnr) generate_psnr_packet(cpi);
+
+#if CONFIG_INTERNAL_STATS
+    compute_internal_stats(cpi);
+    cpi->bytes += (int)(*size);
+#endif  // CONFIG_INTERNAL_STATS
+
+    // Clear down mmx registers
+    vpx_clear_system_state();
+
+    cm->show_existing_frame = 0;
+    return 0;
+  }
+#endif  // CONFIG_EXT_REFS
+
+  // Should we encode an arf frame.
+  arf_src_index = get_arf_src_index(cpi);
+  if (arf_src_index) {
+    for (i = 0; i <= arf_src_index; ++i) {
+      struct lookahead_entry *e = vp10_lookahead_peek(cpi->lookahead, i);
+      // Avoid creating an alt-ref if there's a forced keyframe pending.
+      if (e == NULL) {
+        break;
+      } else if (e->flags == VPX_EFLAG_FORCE_KF) {
+        arf_src_index = 0;
+        flush = 1;
+        break;
+      }
+    }
+  }
+
+  if (arf_src_index) {
+    assert(arf_src_index <= rc->frames_to_key);
+
+    if ((source = vp10_lookahead_peek(cpi->lookahead, arf_src_index)) != NULL) {
+      cpi->alt_ref_source = source;
+
+      if (oxcf->arnr_max_frames > 0) {
+        // Produce the filtered ARF frame.
+        vp10_temporal_filter(cpi, arf_src_index);
+        vpx_extend_frame_borders(&cpi->alt_ref_buffer);
+        force_src_buffer = &cpi->alt_ref_buffer;
+      }
+
+      cm->show_frame = 0;
+      cm->intra_only = 0;
+      cpi->refresh_alt_ref_frame = 1;
+      cpi->refresh_golden_frame = 0;
+      cpi->refresh_last_frame = 0;
+      rc->is_src_frame_alt_ref = 0;
+    }
+    rc->source_alt_ref_pending = 0;
+  }
+
+#if CONFIG_EXT_REFS
+  rc->is_bwd_ref_frame = 0;
+  brf_src_index = get_brf_src_index(cpi);
+  if (brf_src_index) {
+    assert(brf_src_index <= rc->frames_to_key);
+    if ((source = vp10_lookahead_peek(cpi->lookahead, brf_src_index)) != NULL) {
+      cm->show_frame = 0;
+      cm->intra_only = 0;
+
+      cpi->refresh_bwd_ref_frame = 1;
+      cpi->refresh_last_frame = 0;
+      cpi->refresh_golden_frame = 0;
+      cpi->refresh_alt_ref_frame = 0;
+
+      rc->is_bwd_ref_frame = 1;
+    }
+  }
+#endif  // CONFIG_EXT_REFS
+
+  if (!source) {
+    // Get last frame source.
+    if (cm->current_video_frame > 0) {
+      if ((last_source = vp10_lookahead_peek(cpi->lookahead, -1)) == NULL)
+        return -1;
+    }
+
+    // Read in the source frame.
+    source = vp10_lookahead_pop(cpi->lookahead, flush);
+
+    if (source != NULL) {
+      cm->show_frame = 1;
+      cm->intra_only = 0;
+
+      // Check to see if the frame should be encoded as an arf overlay.
+      check_src_altref(cpi, source);
+    }
+  }
+
+  if (source) {
+    cpi->un_scaled_source = cpi->Source =
+        force_src_buffer ? force_src_buffer : &source->img;
+
+    cpi->unscaled_last_source = last_source != NULL ? &last_source->img : NULL;
+
+    *time_stamp = source->ts_start;
+    *time_end = source->ts_end;
+    *frame_flags = (source->flags & VPX_EFLAG_FORCE_KF) ? FRAMEFLAGS_KEY : 0;
+
+  } else {
+    *size = 0;
+    if (flush && oxcf->pass == 1 && !cpi->twopass.first_pass_done) {
+      vp10_end_first_pass(cpi); /* get last stats packet */
+      cpi->twopass.first_pass_done = 1;
+    }
+    return -1;
+  }
+
+  if (source->ts_start < cpi->first_time_stamp_ever) {
+    cpi->first_time_stamp_ever = source->ts_start;
+    cpi->last_end_time_stamp_seen = source->ts_start;
+  }
+
+  // Clear down mmx registers
+  vpx_clear_system_state();
+
+  // adjust frame rates based on timestamps given
+  if (cm->show_frame) adjust_frame_rate(cpi, source);
+
+  // Find a free buffer for the new frame, releasing the reference previously
+  // held.
+  if (cm->new_fb_idx != INVALID_IDX) {
+    --pool->frame_bufs[cm->new_fb_idx].ref_count;
+  }
+  cm->new_fb_idx = get_free_fb(cm);
+
+  if (cm->new_fb_idx == INVALID_IDX) return -1;
+
+  cm->cur_frame = &pool->frame_bufs[cm->new_fb_idx];
+
+#if CONFIG_EXT_REFS
+  if (oxcf->pass == 2) {
+    const GF_GROUP *const gf_group = &cpi->twopass.gf_group;
+    cpi->alt_fb_idx = cpi->arf_map[gf_group->arf_ref_idx[gf_group->index]];
+  }
+#else
+  if (cpi->multi_arf_allowed) {
+    if (cm->frame_type == KEY_FRAME) {
+      init_buffer_indices(cpi);
+    } else if (oxcf->pass == 2) {
+      const GF_GROUP *const gf_group = &cpi->twopass.gf_group;
+      cpi->alt_fb_idx = gf_group->arf_ref_idx[gf_group->index];
+    }
+  }
+#endif
+  // Start with a 0 size frame.
+  *size = 0;
+
+  cpi->frame_flags = *frame_flags;
+
+  if (oxcf->pass == 2) {
+    vp10_rc_get_second_pass_params(cpi);
+  } else if (oxcf->pass == 1) {
+    set_frame_size(cpi);
+  }
+
+  if (cpi->oxcf.pass != 0 || frame_is_intra_only(cm) == 1) {
+    for (i = 0; i < TOTAL_REFS_PER_FRAME; ++i)
+      cpi->scaled_ref_idx[i] = INVALID_IDX;
+  }
+
+#if CONFIG_AOM_QM
+  cm->using_qmatrix = cpi->oxcf.using_qm;
+  cm->min_qmlevel = cpi->oxcf.qm_minlevel;
+  cm->max_qmlevel = cpi->oxcf.qm_maxlevel;
+#endif
+
+  if (oxcf->pass == 1) {
+    cpi->td.mb.e_mbd.lossless[0] = is_lossless_requested(oxcf);
+    vp10_first_pass(cpi, source);
+  } else if (oxcf->pass == 2) {
+    Pass2Encode(cpi, size, dest, frame_flags);
+  } else {
+    // One pass encode
+    Pass0Encode(cpi, size, dest, frame_flags);
+  }
+
+  if (!cm->error_resilient_mode)
+    cm->frame_contexts[cm->frame_context_idx] = *cm->fc;
+
+  // No frame encoded, or frame was dropped, release scaled references.
+  if ((*size == 0) && (frame_is_intra_only(cm) == 0)) {
+    release_scaled_references(cpi);
+  }
+
+  if (*size > 0) {
+    cpi->droppable = !frame_is_reference(cpi);
+  }
+
+  vpx_usec_timer_mark(&cmptimer);
+  cpi->time_compress_data += vpx_usec_timer_elapsed(&cmptimer);
+
+  if (cpi->b_calculate_psnr && oxcf->pass != 1 && cm->show_frame)
+    generate_psnr_packet(cpi);
+
+#if CONFIG_INTERNAL_STATS
+  if (oxcf->pass != 1) {
+    compute_internal_stats(cpi);
+    cpi->bytes += (int)(*size);
+  }
+#endif  // CONFIG_INTERNAL_STATS
+
+  vpx_clear_system_state();
+
+  return 0;
+}
+
+int vp10_get_preview_raw_frame(VP10_COMP *cpi, YV12_BUFFER_CONFIG *dest) {
+  VP10_COMMON *cm = &cpi->common;
+  if (!cm->show_frame) {
+    return -1;
+  } else {
+    int ret;
+    if (cm->frame_to_show) {
+      *dest = *cm->frame_to_show;
+      dest->y_width = cm->width;
+      dest->y_height = cm->height;
+      dest->uv_width = cm->width >> cm->subsampling_x;
+      dest->uv_height = cm->height >> cm->subsampling_y;
+      ret = 0;
+    } else {
+      ret = -1;
+    }
+    vpx_clear_system_state();
+    return ret;
+  }
+}
+
+int vp10_get_last_show_frame(VP10_COMP *cpi, YV12_BUFFER_CONFIG *frame) {
+  if (cpi->last_show_frame_buf_idx == INVALID_IDX) return -1;
+
+  *frame =
+      cpi->common.buffer_pool->frame_bufs[cpi->last_show_frame_buf_idx].buf;
+  return 0;
+}
+
+int vp10_set_internal_size(VP10_COMP *cpi, VPX_SCALING horiz_mode,
+                           VPX_SCALING vert_mode) {
+  VP10_COMMON *cm = &cpi->common;
+  int hr = 0, hs = 0, vr = 0, vs = 0;
+
+  if (horiz_mode > ONETWO || vert_mode > ONETWO) return -1;
+
+  Scale2Ratio(horiz_mode, &hr, &hs);
+  Scale2Ratio(vert_mode, &vr, &vs);
+
+  // always go to the next whole number
+  cm->width = (hs - 1 + cpi->oxcf.width * hr) / hs;
+  cm->height = (vs - 1 + cpi->oxcf.height * vr) / vs;
+  assert(cm->width <= cpi->initial_width);
+  assert(cm->height <= cpi->initial_height);
+
+  update_frame_size(cpi);
+
+  return 0;
+}
+
+int vp10_set_size_literal(VP10_COMP *cpi, unsigned int width,
+                          unsigned int height) {
+  VP10_COMMON *cm = &cpi->common;
+#if CONFIG_VP9_HIGHBITDEPTH
+  check_initial_width(cpi, cm->use_highbitdepth, 1, 1);
+#else
+  check_initial_width(cpi, 1, 1);
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+  if (width) {
+    cm->width = width;
+    if (cm->width > cpi->initial_width) {
+      cm->width = cpi->initial_width;
+      printf("Warning: Desired width too large, changed to %d\n", cm->width);
+    }
+  }
+
+  if (height) {
+    cm->height = height;
+    if (cm->height > cpi->initial_height) {
+      cm->height = cpi->initial_height;
+      printf("Warning: Desired height too large, changed to %d\n", cm->height);
+    }
+  }
+  assert(cm->width <= cpi->initial_width);
+  assert(cm->height <= cpi->initial_height);
+
+  update_frame_size(cpi);
+
+  return 0;
+}
+
+int vp10_get_quantizer(VP10_COMP *cpi) { return cpi->common.base_qindex; }
+
+void vp10_apply_encoding_flags(VP10_COMP *cpi, vpx_enc_frame_flags_t flags) {
+  if (flags &
+      (VP8_EFLAG_NO_REF_LAST | VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF)) {
+    int ref = VPX_REFFRAME_ALL;
+
+    if (flags & VP8_EFLAG_NO_REF_LAST) {
+      ref ^= VPX_LAST_FLAG;
+#if CONFIG_EXT_REFS
+      ref ^= VPX_LAST2_FLAG;
+      ref ^= VPX_LAST3_FLAG;
+#endif  // CONFIG_EXT_REFS
+    }
+
+    if (flags & VP8_EFLAG_NO_REF_GF) ref ^= VPX_GOLD_FLAG;
+
+    if (flags & VP8_EFLAG_NO_REF_ARF) ref ^= VPX_ALT_FLAG;
+
+    vp10_use_as_reference(cpi, ref);
+  }
+
+  if (flags &
+      (VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
+       VP8_EFLAG_FORCE_GF | VP8_EFLAG_FORCE_ARF)) {
+    int upd = VPX_REFFRAME_ALL;
+
+    if (flags & VP8_EFLAG_NO_UPD_LAST) {
+      upd ^= VPX_LAST_FLAG;
+#if CONFIG_EXT_REFS
+      upd ^= VPX_LAST2_FLAG;
+      upd ^= VPX_LAST3_FLAG;
+#endif  // CONFIG_EXT_REFS
+    }
+
+    if (flags & VP8_EFLAG_NO_UPD_GF) upd ^= VPX_GOLD_FLAG;
+
+    if (flags & VP8_EFLAG_NO_UPD_ARF) upd ^= VPX_ALT_FLAG;
+
+    vp10_update_reference(cpi, upd);
+  }
+
+  if (flags & VP8_EFLAG_NO_UPD_ENTROPY) {
+    vp10_update_entropy(cpi, 0);
+  }
+}
diff --git a/av1/encoder/encoder.h b/av1/encoder/encoder.h
new file mode 100644
index 0000000..719615b
--- /dev/null
+++ b/av1/encoder/encoder.h
@@ -0,0 +1,833 @@
+/*
+ *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP10_ENCODER_ENCODER_H_
+#define VP10_ENCODER_ENCODER_H_
+
+#include <stdio.h>
+
+#include "./vpx_config.h"
+#include "aom/vp8cx.h"
+
+#include "av1/common/alloccommon.h"
+#include "av1/common/entropymode.h"
+#include "av1/common/thread_common.h"
+#include "av1/common/onyxc_int.h"
+#include "av1/encoder/aq_cyclicrefresh.h"
+#if CONFIG_ANS
+#include "av1/encoder/buf_ans.h"
+#endif
+#include "av1/encoder/context_tree.h"
+#include "av1/encoder/encodemb.h"
+#include "av1/encoder/firstpass.h"
+#include "av1/encoder/lookahead.h"
+#include "av1/encoder/mbgraph.h"
+#include "av1/encoder/mcomp.h"
+#include "av1/encoder/quantize.h"
+#include "av1/encoder/ratectrl.h"
+#include "av1/encoder/rd.h"
+#include "av1/encoder/speed_features.h"
+#include "av1/encoder/tokenize.h"
+#include "av1/encoder/variance_tree.h"
+
+#if CONFIG_INTERNAL_STATS
+#include "aom_dsp/ssim.h"
+#endif
+#include "aom_dsp/variance.h"
+#include "aom/internal/vpx_codec_internal.h"
+#include "aom_util/vpx_thread.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct {
+  int nmvjointcost[MV_JOINTS];
+  int nmvcosts[2][MV_VALS];
+  int nmvcosts_hp[2][MV_VALS];
+
+#if CONFIG_REF_MV
+  int nmv_vec_cost[NMV_CONTEXTS][MV_JOINTS];
+  int nmv_costs[NMV_CONTEXTS][2][MV_VALS];
+  int nmv_costs_hp[NMV_CONTEXTS][2][MV_VALS];
+#endif
+
+  unsigned char *last_frame_seg_map_copy;
+
+  // 0 = Intra, Last, GF, ARF
+  signed char last_ref_lf_deltas[TOTAL_REFS_PER_FRAME];
+  // 0 = ZERO_MV, MV
+  signed char last_mode_lf_deltas[MAX_MODE_LF_DELTAS];
+
+  FRAME_CONTEXT fc;
+} CODING_CONTEXT;
+
+typedef enum {
+  // regular inter frame
+  REGULAR_FRAME = 0,
+  // alternate reference frame
+  ARF_FRAME = 1,
+  // overlay frame
+  OVERLAY_FRAME = 2,
+  // golden frame
+  GLD_FRAME = 3,
+#if CONFIG_EXT_REFS
+  // backward reference frame
+  BRF_FRAME = 4,
+  // extra alternate reference frame
+  EXT_ARF_FRAME = 5
+#endif
+} FRAME_CONTEXT_INDEX;
+
+typedef enum {
+  // encode_breakout is disabled.
+  ENCODE_BREAKOUT_DISABLED = 0,
+  // encode_breakout is enabled.
+  ENCODE_BREAKOUT_ENABLED = 1,
+  // encode_breakout is enabled with small max_thresh limit.
+  ENCODE_BREAKOUT_LIMITED = 2
+} ENCODE_BREAKOUT_TYPE;
+
+typedef enum {
+  NORMAL = 0,
+  FOURFIVE = 1,
+  THREEFIVE = 2,
+  ONETWO = 3
+} VPX_SCALING;
+
+typedef enum {
+  // Good Quality Fast Encoding. The encoder balances quality with the amount of
+  // time it takes to encode the output. Speed setting controls how fast.
+  GOOD,
+
+  // The encoder places priority on the quality of the output over encoding
+  // speed. The output is compressed at the highest possible quality. This
+  // option takes the longest amount of time to encode. Speed setting ignored.
+  BEST,
+
+  // Realtime/Live Encoding. This mode is optimized for realtime encoding (for
+  // example, capturing a television signal or feed from a live camera). Speed
+  // setting controls how fast.
+  REALTIME
+} MODE;
+
+typedef enum {
+  FRAMEFLAGS_KEY = 1 << 0,
+  FRAMEFLAGS_GOLDEN = 1 << 1,
+#if CONFIG_EXT_REFS
+  FRAMEFLAGS_BWDREF = 1 << 2,
+  FRAMEFLAGS_ALTREF = 1 << 3,
+#else
+  FRAMEFLAGS_ALTREF = 1 << 2,
+#endif  // CONFIG_EXT_REFS
+} FRAMETYPE_FLAGS;
+
+typedef enum {
+  NO_AQ = 0,
+  VARIANCE_AQ = 1,
+  COMPLEXITY_AQ = 2,
+  CYCLIC_REFRESH_AQ = 3,
+  AQ_MODE_COUNT  // This should always be the last member of the enum
+} AQ_MODE;
+
+typedef enum {
+  RESIZE_NONE = 0,    // No frame resizing allowed.
+  RESIZE_FIXED = 1,   // All frames are coded at the specified dimension.
+  RESIZE_DYNAMIC = 2  // Coded size of each frame is determined by the codec.
+} RESIZE_TYPE;
+
+typedef struct VP10EncoderConfig {
+  BITSTREAM_PROFILE profile;
+  vpx_bit_depth_t bit_depth;     // Codec bit-depth.
+  int width;                     // width of data passed to the compressor
+  int height;                    // height of data passed to the compressor
+  unsigned int input_bit_depth;  // Input bit depth.
+  double init_framerate;         // set to passed in framerate
+  int64_t target_bandwidth;      // bandwidth to be used in bits per second
+
+  int noise_sensitivity;  // pre processing blur: recommendation 0
+  int sharpness;          // sharpening output: recommendation 0:
+  int speed;
+  // maximum allowed bitrate for any intra frame in % of bitrate target.
+  unsigned int rc_max_intra_bitrate_pct;
+  // maximum allowed bitrate for any inter frame in % of bitrate target.
+  unsigned int rc_max_inter_bitrate_pct;
+  // percent of rate boost for golden frame in CBR mode.
+  unsigned int gf_cbr_boost_pct;
+
+  MODE mode;
+  int pass;
+
+  // Key Framing Operations
+  int auto_key;  // autodetect cut scenes and set the keyframes
+  int key_freq;  // maximum distance to key frame.
+
+  int lag_in_frames;  // how many frames lag before we start encoding
+
+  // ----------------------------------------------------------------
+  // DATARATE CONTROL OPTIONS
+
+  // vbr, cbr, constrained quality or constant quality
+  enum vpx_rc_mode rc_mode;
+
+  // buffer targeting aggressiveness
+  int under_shoot_pct;
+  int over_shoot_pct;
+
+  // buffering parameters
+  int64_t starting_buffer_level_ms;
+  int64_t optimal_buffer_level_ms;
+  int64_t maximum_buffer_size_ms;
+
+  // Frame drop threshold.
+  int drop_frames_water_mark;
+
+  // controlling quality
+  int fixed_q;
+  int worst_allowed_q;
+  int best_allowed_q;
+  int cq_level;
+  AQ_MODE aq_mode;  // Adaptive Quantization mode
+#if CONFIG_AOM_QM
+  int using_qm;
+  int qm_minlevel;
+  int qm_maxlevel;
+#endif
+
+  // Internal frame size scaling.
+  RESIZE_TYPE resize_mode;
+  int scaled_frame_width;
+  int scaled_frame_height;
+
+  // Enable feature to reduce the frame quantization every x frames.
+  int frame_periodic_boost;
+
+  // two pass datarate control
+  int two_pass_vbrbias;  // two pass datarate control tweaks
+  int two_pass_vbrmin_section;
+  int two_pass_vbrmax_section;
+  // END DATARATE CONTROL OPTIONS
+  // ----------------------------------------------------------------
+
+  int enable_auto_arf;
+#if CONFIG_EXT_REFS
+  int enable_auto_brf;  // (b)ackward (r)ef (f)rame
+#endif                  // CONFIG_EXT_REFS
+
+  int encode_breakout;  // early breakout : for video conf recommend 800
+
+  /* Bitfield defining the error resiliency features to enable.
+   * Can provide decodable frames after losses in previous
+   * frames and decodable partitions after losses in the same frame.
+   */
+  unsigned int error_resilient_mode;
+
+  /* Bitfield defining the parallel decoding mode where the
+   * decoding in successive frames may be conducted in parallel
+   * just by decoding the frame headers.
+   */
+  unsigned int frame_parallel_decoding_mode;
+
+  int arnr_max_frames;
+  int arnr_strength;
+
+  int min_gf_interval;
+  int max_gf_interval;
+
+  int tile_columns;
+  int tile_rows;
+
+  int max_threads;
+
+  vpx_fixed_buf_t two_pass_stats_in;
+  struct vpx_codec_pkt_list *output_pkt_list;
+
+#if CONFIG_FP_MB_STATS
+  vpx_fixed_buf_t firstpass_mb_stats_in;
+#endif
+
+  vpx_tune_metric tuning;
+  vpx_tune_content content;
+#if CONFIG_VP9_HIGHBITDEPTH
+  int use_highbitdepth;
+#endif
+  vpx_color_space_t color_space;
+  int color_range;
+  int render_width;
+  int render_height;
+
+#if CONFIG_EXT_PARTITION
+  vpx_superblock_size_t superblock_size;
+#endif  // CONFIG_EXT_PARTITION
+} VP10EncoderConfig;
+
+static INLINE int is_lossless_requested(const VP10EncoderConfig *cfg) {
+  return cfg->best_allowed_q == 0 && cfg->worst_allowed_q == 0;
+}
+
+// TODO(jingning) All spatially adaptive variables should go to TileDataEnc.
+typedef struct TileDataEnc {
+  TileInfo tile_info;
+  int thresh_freq_fact[BLOCK_SIZES][MAX_MODES];
+  int mode_map[BLOCK_SIZES][MAX_MODES];
+} TileDataEnc;
+
+typedef struct RD_COUNTS {
+  vp10_coeff_count coef_counts[TX_SIZES][PLANE_TYPES];
+  int64_t comp_pred_diff[REFERENCE_MODES];
+  int m_search_count;
+  int ex_search_count;
+} RD_COUNTS;
+
+typedef struct ThreadData {
+  MACROBLOCK mb;
+  RD_COUNTS rd_counts;
+  FRAME_COUNTS *counts;
+
+  PICK_MODE_CONTEXT *leaf_tree;
+  PC_TREE *pc_tree;
+  PC_TREE *pc_root[MAX_MIB_SIZE_LOG2 - MIN_MIB_SIZE_LOG2 + 1];
+
+  VAR_TREE *var_tree;
+  VAR_TREE *var_root[MAX_MIB_SIZE_LOG2 - MIN_MIB_SIZE_LOG2 + 1];
+} ThreadData;
+
+struct EncWorkerData;
+
+typedef struct ActiveMap {
+  int enabled;
+  int update;
+  unsigned char *map;
+} ActiveMap;
+
+typedef enum { Y, U, V, ALL } STAT_TYPE;
+
+typedef struct IMAGE_STAT {
+  double stat[ALL + 1];
+  double worst;
+} ImageStat;
+
+typedef struct {
+  int ref_count;
+  YV12_BUFFER_CONFIG buf;
+} EncRefCntBuffer;
+
+#if CONFIG_ENTROPY
+typedef struct SUBFRAME_STATS {
+  vp10_coeff_probs_model coef_probs_buf[COEF_PROBS_BUFS][TX_SIZES][PLANE_TYPES];
+  vp10_coeff_count coef_counts_buf[COEF_PROBS_BUFS][TX_SIZES][PLANE_TYPES];
+  unsigned int eob_counts_buf[COEF_PROBS_BUFS][TX_SIZES][PLANE_TYPES][REF_TYPES]
+                             [COEF_BANDS][COEFF_CONTEXTS];
+  vp10_coeff_probs_model enc_starting_coef_probs[TX_SIZES][PLANE_TYPES];
+} SUBFRAME_STATS;
+#endif  // CONFIG_ENTROPY
+
+typedef struct TileBufferEnc {
+  uint8_t *data;
+  size_t size;
+} TileBufferEnc;
+
+typedef struct VP10_COMP {
+  QUANTS quants;
+  ThreadData td;
+  MB_MODE_INFO_EXT *mbmi_ext_base;
+  DECLARE_ALIGNED(16, int16_t, y_dequant[QINDEX_RANGE][8]);   // 8: SIMD width
+  DECLARE_ALIGNED(16, int16_t, uv_dequant[QINDEX_RANGE][8]);  // 8: SIMD width
+#if CONFIG_NEW_QUANT
+  DECLARE_ALIGNED(16, dequant_val_type_nuq,
+                  y_dequant_val_nuq[QUANT_PROFILES][QINDEX_RANGE][COEF_BANDS]);
+  DECLARE_ALIGNED(16, dequant_val_type_nuq,
+                  uv_dequant_val_nuq[QUANT_PROFILES][QINDEX_RANGE][COEF_BANDS]);
+#endif  // CONFIG_NEW_QUANT
+  VP10_COMMON common;
+  VP10EncoderConfig oxcf;
+  struct lookahead_ctx *lookahead;
+  struct lookahead_entry *alt_ref_source;
+
+  YV12_BUFFER_CONFIG *Source;
+  YV12_BUFFER_CONFIG *Last_Source;  // NULL for first frame and alt_ref frames
+  YV12_BUFFER_CONFIG *un_scaled_source;
+  YV12_BUFFER_CONFIG scaled_source;
+  YV12_BUFFER_CONFIG *unscaled_last_source;
+  YV12_BUFFER_CONFIG scaled_last_source;
+
+  // Up-sampled reference buffers
+  // NOTE(zoeliu): It is needed to allocate sufficient space to the up-sampled
+  // reference buffers, which should include the up-sampled version of all the
+  // possibly stored references plus the currently coded frame itself.
+  EncRefCntBuffer upsampled_ref_bufs[REF_FRAMES + 1];
+  int upsampled_ref_idx[REF_FRAMES + 1];
+
+  // For a still frame, this flag is set to 1 to skip partition search.
+  int partition_search_skippable_frame;
+
+  int scaled_ref_idx[TOTAL_REFS_PER_FRAME];
+#if CONFIG_EXT_REFS
+  int lst_fb_idxes[LAST_REF_FRAMES];
+#else
+  int lst_fb_idx;
+#endif  // CONFIG_EXT_REFS
+  int gld_fb_idx;
+#if CONFIG_EXT_REFS
+  int bwd_fb_idx;  // BWD_REF_FRAME
+#endif             // CONFIG_EXT_REFS
+  int alt_fb_idx;
+
+  int last_show_frame_buf_idx;  // last show frame buffer index
+
+  int refresh_last_frame;
+  int refresh_golden_frame;
+#if CONFIG_EXT_REFS
+  int refresh_bwd_ref_frame;
+#endif  // CONFIG_EXT_REFS
+  int refresh_alt_ref_frame;
+
+  int ext_refresh_frame_flags_pending;
+  int ext_refresh_last_frame;
+  int ext_refresh_golden_frame;
+  int ext_refresh_alt_ref_frame;
+
+  int ext_refresh_frame_context_pending;
+  int ext_refresh_frame_context;
+
+  YV12_BUFFER_CONFIG last_frame_uf;
+#if CONFIG_LOOP_RESTORATION
+  YV12_BUFFER_CONFIG last_frame_db;
+#endif  // CONFIG_LOOP_RESTORATION
+
+  // Ambient reconstruction err target for force key frames
+  int64_t ambient_err;
+
+  RD_OPT rd;
+
+  CODING_CONTEXT coding_context;
+
+#if CONFIG_REF_MV
+  int *nmv_costs[NMV_CONTEXTS][2];
+  int *nmv_costs_hp[NMV_CONTEXTS][2];
+#endif
+
+  int *nmvcosts[2];
+  int *nmvcosts_hp[2];
+  int *nmvsadcosts[2];
+  int *nmvsadcosts_hp[2];
+
+  int64_t last_time_stamp_seen;
+  int64_t last_end_time_stamp_seen;
+  int64_t first_time_stamp_ever;
+
+  RATE_CONTROL rc;
+  double framerate;
+
+  // NOTE(zoeliu): Any inter frame allows maximum of REF_FRAMES inter
+  // references; Plus the currently coded frame itself, it is needed to allocate
+  // sufficient space to the size of the maximum possible number of frames.
+  int interp_filter_selected[REF_FRAMES + 1][SWITCHABLE];
+
+  struct vpx_codec_pkt_list *output_pkt_list;
+
+  MBGRAPH_FRAME_STATS mbgraph_stats[MAX_LAG_BUFFERS];
+  int mbgraph_n_frames;  // number of frames filled in the above
+  int static_mb_pct;     // % forced skip mbs by segmentation
+  int ref_frame_flags;
+
+  SPEED_FEATURES sf;
+
+  unsigned int max_mv_magnitude;
+  int mv_step_param;
+
+  int allow_comp_inter_inter;
+
+  // Default value is 1. From first pass stats, encode_breakout may be disabled.
+  ENCODE_BREAKOUT_TYPE allow_encode_breakout;
+
+  // Get threshold from external input. A suggested threshold is 800 for HD
+  // clips, and 300 for < HD clips.
+  int encode_breakout;
+
+  uint8_t *segmentation_map;
+
+  // segment threashold for encode breakout
+  int segment_encode_breakout[MAX_SEGMENTS];
+
+  CYCLIC_REFRESH *cyclic_refresh;
+  ActiveMap active_map;
+
+  fractional_mv_step_fp *find_fractional_mv_step;
+  vp10_full_search_fn_t full_search_sad;  // It is currently unused.
+  vp10_diamond_search_fn_t diamond_search_sad;
+  vpx_variance_fn_ptr_t fn_ptr[BLOCK_SIZES];
+  uint64_t time_receive_data;
+  uint64_t time_compress_data;
+  uint64_t time_pick_lpf;
+  uint64_t time_encode_sb_row;
+
+#if CONFIG_FP_MB_STATS
+  int use_fp_mb_stats;
+#endif
+
+  TWO_PASS twopass;
+
+  YV12_BUFFER_CONFIG alt_ref_buffer;
+
+#if CONFIG_INTERNAL_STATS
+  unsigned int mode_chosen_counts[MAX_MODES];
+
+  int count;
+  uint64_t total_sq_error;
+  uint64_t total_samples;
+  ImageStat psnr;
+
+  double total_blockiness;
+  double worst_blockiness;
+
+  int bytes;
+  double summed_quality;
+  double summed_weights;
+  unsigned int tot_recode_hits;
+  double worst_ssim;
+
+  ImageStat fastssim;
+  ImageStat psnrhvs;
+
+  int b_calculate_blockiness;
+  int b_calculate_consistency;
+
+  double total_inconsistency;
+  double worst_consistency;
+  Ssimv *ssim_vars;
+  Metrics metrics;
+#endif
+  int b_calculate_psnr;
+
+  int droppable;
+
+  int initial_width;
+  int initial_height;
+  int initial_mbs;  // Number of MBs in the full-size frame; to be used to
+                    // normalize the firstpass stats. This will differ from the
+                    // number of MBs in the current frame when the frame is
+                    // scaled.
+
+  // Store frame variance info in SOURCE_VAR_BASED_PARTITION search type.
+  diff *source_diff_var;
+  // The threshold used in SOURCE_VAR_BASED_PARTITION search type.
+  unsigned int source_var_thresh;
+  int frames_till_next_var_check;
+
+  int frame_flags;
+
+  search_site_config ss_cfg;
+
+  int mbmode_cost[BLOCK_SIZE_GROUPS][INTRA_MODES];
+#if CONFIG_REF_MV
+  int newmv_mode_cost[NEWMV_MODE_CONTEXTS][2];
+  int zeromv_mode_cost[ZEROMV_MODE_CONTEXTS][2];
+  int refmv_mode_cost[REFMV_MODE_CONTEXTS][2];
+  int drl_mode_cost0[DRL_MODE_CONTEXTS][2];
+#if CONFIG_EXT_INTER
+  int new2mv_mode_cost[2];
+#endif  // CONFIG_EXT_INTER
+#endif
+
+  unsigned int inter_mode_cost[INTER_MODE_CONTEXTS][INTER_MODES];
+#if CONFIG_EXT_INTER
+  unsigned int
+      inter_compound_mode_cost[INTER_MODE_CONTEXTS][INTER_COMPOUND_MODES];
+  unsigned int interintra_mode_cost[BLOCK_SIZE_GROUPS][INTERINTRA_MODES];
+#endif  // CONFIG_EXT_INTER
+#if CONFIG_OBMC || CONFIG_WARPED_MOTION
+  int motvar_cost[BLOCK_SIZES][MOTION_VARIATIONS];
+#endif  // CONFIG_OBMC || CONFIG_WARPED_MOTION
+  int intra_uv_mode_cost[INTRA_MODES][INTRA_MODES];
+  int y_mode_costs[INTRA_MODES][INTRA_MODES][INTRA_MODES];
+  int switchable_interp_costs[SWITCHABLE_FILTER_CONTEXTS][SWITCHABLE_FILTERS];
+#if CONFIG_EXT_PARTITION_TYPES
+  int partition_cost[PARTITION_CONTEXTS][EXT_PARTITION_TYPES];
+#else
+  int partition_cost[PARTITION_CONTEXTS][PARTITION_TYPES];
+#endif
+  int palette_y_size_cost[PALETTE_BLOCK_SIZES][PALETTE_SIZES];
+  int palette_uv_size_cost[PALETTE_BLOCK_SIZES][PALETTE_SIZES];
+  int palette_y_color_cost[PALETTE_MAX_SIZE -
+                           1][PALETTE_COLOR_CONTEXTS][PALETTE_COLORS];
+  int palette_uv_color_cost[PALETTE_MAX_SIZE -
+                            1][PALETTE_COLOR_CONTEXTS][PALETTE_COLORS];
+  int tx_size_cost[TX_SIZES - 1][TX_SIZE_CONTEXTS][TX_SIZES];
+#if CONFIG_EXT_TX
+  int inter_tx_type_costs[EXT_TX_SETS_INTER][EXT_TX_SIZES][TX_TYPES];
+  int intra_tx_type_costs[EXT_TX_SETS_INTRA][EXT_TX_SIZES][INTRA_MODES]
+                         [TX_TYPES];
+#else
+  int intra_tx_type_costs[EXT_TX_SIZES][TX_TYPES][TX_TYPES];
+  int inter_tx_type_costs[EXT_TX_SIZES][TX_TYPES];
+#endif  // CONFIG_EXT_TX
+#if CONFIG_EXT_INTRA
+  int intra_filter_cost[INTRA_FILTERS + 1][INTRA_FILTERS];
+#endif  // CONFIG_EXT_INTRA
+
+  int multi_arf_allowed;
+  int multi_arf_enabled;
+  int multi_arf_last_grp_enabled;
+
+  TileDataEnc *tile_data;
+  int allocated_tiles;  // Keep track of memory allocated for tiles.
+
+  TOKENEXTRA *tile_tok[MAX_TILE_ROWS][MAX_TILE_COLS];
+  unsigned int tok_count[MAX_TILE_ROWS][MAX_TILE_COLS];
+
+  TileBufferEnc tile_buffers[MAX_TILE_ROWS][MAX_TILE_COLS];
+
+  int resize_pending;
+  int resize_state;
+  int resize_scale_num;
+  int resize_scale_den;
+  int resize_avg_qp;
+  int resize_buffer_underflow;
+  int resize_count;
+
+  // VAR_BASED_PARTITION thresholds
+  // 0 - threshold_128x128;
+  // 1 - threshold_64x64;
+  // 2 - threshold_32x32;
+  // 3 - threshold_16x16;
+  // 4 - threshold_8x8;
+  int64_t vbp_thresholds[5];
+  int64_t vbp_threshold_minmax;
+  int64_t vbp_threshold_sad;
+  BLOCK_SIZE vbp_bsize_min;
+
+  // VARIANCE_AQ segment map refresh
+  int vaq_refresh;
+
+  // Multi-threading
+  int num_workers;
+  VPxWorker *workers;
+  struct EncWorkerData *tile_thr_data;
+  VP10LfSync lf_row_sync;
+#if CONFIG_ENTROPY
+  SUBFRAME_STATS subframe_stats;
+  // TODO(yaowu): minimize the size of count buffers
+  SUBFRAME_STATS wholeframe_stats;
+  vp10_coeff_stats branch_ct_buf[COEF_PROBS_BUFS][TX_SIZES][PLANE_TYPES];
+#endif  // CONFIG_ENTROPY
+#if CONFIG_ANS
+  struct BufAnsCoder buf_ans;
+#endif
+#if CONFIG_EXT_REFS
+  int refresh_frame_mask;
+  int existing_fb_idx_to_show;
+  int is_arf_filter_off[MAX_EXT_ARFS + 1];
+  int num_extra_arfs;
+  int arf_map[MAX_EXT_ARFS + 1];
+#endif  // CONFIG_EXT_REFS
+#if CONFIG_GLOBAL_MOTION
+  int global_motion_used[TOTAL_REFS_PER_FRAME];
+#endif
+} VP10_COMP;
+
+void vp10_initialize_enc(void);
+
+struct VP10_COMP *vp10_create_compressor(VP10EncoderConfig *oxcf,
+                                         BufferPool *const pool);
+void vp10_remove_compressor(VP10_COMP *cpi);
+
+void vp10_change_config(VP10_COMP *cpi, const VP10EncoderConfig *oxcf);
+
+// receive a frames worth of data. caller can assume that a copy of this
+// frame is made and not just a copy of the pointer..
+int vp10_receive_raw_frame(VP10_COMP *cpi, unsigned int frame_flags,
+                           YV12_BUFFER_CONFIG *sd, int64_t time_stamp,
+                           int64_t end_time_stamp);
+
+int vp10_get_compressed_data(VP10_COMP *cpi, unsigned int *frame_flags,
+                             size_t *size, uint8_t *dest, int64_t *time_stamp,
+                             int64_t *time_end, int flush);
+
+int vp10_get_preview_raw_frame(VP10_COMP *cpi, YV12_BUFFER_CONFIG *dest);
+
+int vp10_get_last_show_frame(VP10_COMP *cpi, YV12_BUFFER_CONFIG *frame);
+
+int vp10_use_as_reference(VP10_COMP *cpi, int ref_frame_flags);
+
+void vp10_update_reference(VP10_COMP *cpi, int ref_frame_flags);
+
+int vp10_copy_reference_enc(VP10_COMP *cpi, VPX_REFFRAME ref_frame_flag,
+                            YV12_BUFFER_CONFIG *sd);
+
+int vp10_set_reference_enc(VP10_COMP *cpi, VPX_REFFRAME ref_frame_flag,
+                           YV12_BUFFER_CONFIG *sd);
+
+int vp10_update_entropy(VP10_COMP *cpi, int update);
+
+int vp10_set_active_map(VP10_COMP *cpi, unsigned char *map, int rows, int cols);
+
+int vp10_get_active_map(VP10_COMP *cpi, unsigned char *map, int rows, int cols);
+
+int vp10_set_internal_size(VP10_COMP *cpi, VPX_SCALING horiz_mode,
+                           VPX_SCALING vert_mode);
+
+int vp10_set_size_literal(VP10_COMP *cpi, unsigned int width,
+                          unsigned int height);
+
+int vp10_get_quantizer(struct VP10_COMP *cpi);
+
+void vp10_full_to_model_counts(vp10_coeff_count_model *model_count,
+                               vp10_coeff_count *full_count);
+
+static INLINE int frame_is_kf_gf_arf(const VP10_COMP *cpi) {
+  return frame_is_intra_only(&cpi->common) || cpi->refresh_alt_ref_frame ||
+         (cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref);
+}
+
+static INLINE int get_ref_frame_map_idx(const VP10_COMP *cpi,
+                                        MV_REFERENCE_FRAME ref_frame) {
+#if CONFIG_EXT_REFS
+  if (ref_frame >= LAST_FRAME && ref_frame <= LAST3_FRAME)
+    return cpi->lst_fb_idxes[ref_frame - 1];
+#else
+  if (ref_frame == LAST_FRAME) return cpi->lst_fb_idx;
+#endif  // CONFIG_EXT_REFS
+  else if (ref_frame == GOLDEN_FRAME)
+    return cpi->gld_fb_idx;
+#if CONFIG_EXT_REFS
+  else if (ref_frame == BWDREF_FRAME)
+    return cpi->bwd_fb_idx;
+#endif  // CONFIG_EXT_REFS
+  else
+    return cpi->alt_fb_idx;
+}
+
+static INLINE int get_ref_frame_buf_idx(const VP10_COMP *const cpi,
+                                        MV_REFERENCE_FRAME ref_frame) {
+  const VP10_COMMON *const cm = &cpi->common;
+  const int map_idx = get_ref_frame_map_idx(cpi, ref_frame);
+  return (map_idx != INVALID_IDX) ? cm->ref_frame_map[map_idx] : INVALID_IDX;
+}
+
+static INLINE YV12_BUFFER_CONFIG *get_ref_frame_buffer(
+    VP10_COMP *cpi, MV_REFERENCE_FRAME ref_frame) {
+  VP10_COMMON *const cm = &cpi->common;
+  const int buf_idx = get_ref_frame_buf_idx(cpi, ref_frame);
+  return buf_idx != INVALID_IDX ? &cm->buffer_pool->frame_bufs[buf_idx].buf
+                                : NULL;
+}
+
+static INLINE const YV12_BUFFER_CONFIG *get_upsampled_ref(
+    VP10_COMP *cpi, const MV_REFERENCE_FRAME ref_frame) {
+  // Use up-sampled reference frames.
+  const int buf_idx =
+      cpi->upsampled_ref_idx[get_ref_frame_map_idx(cpi, ref_frame)];
+  return &cpi->upsampled_ref_bufs[buf_idx].buf;
+}
+
+#if CONFIG_EXT_REFS
+static INLINE int enc_is_ref_frame_buf(VP10_COMP *cpi,
+                                       RefCntBuffer *frame_buf) {
+  MV_REFERENCE_FRAME ref_frame;
+  VP10_COMMON *const cm = &cpi->common;
+  for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
+    const int buf_idx = get_ref_frame_buf_idx(cpi, ref_frame);
+    if (buf_idx == INVALID_IDX) continue;
+    if (frame_buf == &cm->buffer_pool->frame_bufs[buf_idx]) break;
+  }
+  return (ref_frame <= ALTREF_FRAME);
+}
+#endif  // CONFIG_EXT_REFS
+
+static INLINE unsigned int get_token_alloc(int mb_rows, int mb_cols) {
+  // TODO(JBB): double check we can't exceed this token count if we have a
+  // 32x32 transform crossing a boundary at a multiple of 16.
+  // mb_rows, cols are in units of 16 pixels. We assume 3 planes all at full
+  // resolution. We assume up to 1 token per pixel, and then allow
+  // a head room of 1 EOSB token per 8x8 block per plane.
+  return mb_rows * mb_cols * (16 * 16 + 4) * 3;
+}
+
+// Get the allocated token size for a tile. It does the same calculation as in
+// the frame token allocation.
+static INLINE unsigned int allocated_tokens(TileInfo tile) {
+  int tile_mb_rows = (tile.mi_row_end - tile.mi_row_start + 1) >> 1;
+  int tile_mb_cols = (tile.mi_col_end - tile.mi_col_start + 1) >> 1;
+
+  return get_token_alloc(tile_mb_rows, tile_mb_cols);
+}
+
+void vp10_alloc_compressor_data(VP10_COMP *cpi);
+
+void vp10_scale_references(VP10_COMP *cpi);
+
+void vp10_update_reference_frames(VP10_COMP *cpi);
+
+void vp10_set_high_precision_mv(VP10_COMP *cpi, int allow_high_precision_mv);
+
+YV12_BUFFER_CONFIG *vp10_scale_if_required_fast(VP10_COMMON *cm,
+                                                YV12_BUFFER_CONFIG *unscaled,
+                                                YV12_BUFFER_CONFIG *scaled);
+
+YV12_BUFFER_CONFIG *vp10_scale_if_required(VP10_COMMON *cm,
+                                           YV12_BUFFER_CONFIG *unscaled,
+                                           YV12_BUFFER_CONFIG *scaled);
+
+void vp10_apply_encoding_flags(VP10_COMP *cpi, vpx_enc_frame_flags_t flags);
+
+static INLINE int is_altref_enabled(const VP10_COMP *const cpi) {
+  return cpi->oxcf.mode != REALTIME && cpi->oxcf.lag_in_frames > 0 &&
+         cpi->oxcf.enable_auto_arf;
+}
+
+// TODO(zoeliu): To set up cpi->oxcf.enable_auto_brf
+#if 0 && CONFIG_EXT_REFS
+static INLINE int is_bwdref_enabled(const VP10_COMP *const cpi) {
+  // NOTE(zoeliu): The enabling of bi-predictive frames depends on the use of
+  //               alt_ref, and now will be off when the alt_ref interval is
+  //               not sufficiently large.
+  return is_altref_enabled(cpi) && cpi->oxcf.enable_auto_brf;
+}
+#endif  // CONFIG_EXT_REFS
+
+static INLINE void set_ref_ptrs(VP10_COMMON *cm, MACROBLOCKD *xd,
+                                MV_REFERENCE_FRAME ref0,
+                                MV_REFERENCE_FRAME ref1) {
+  xd->block_refs[0] =
+      &cm->frame_refs[ref0 >= LAST_FRAME ? ref0 - LAST_FRAME : 0];
+  xd->block_refs[1] =
+      &cm->frame_refs[ref1 >= LAST_FRAME ? ref1 - LAST_FRAME : 0];
+}
+
+static INLINE int get_chessboard_index(const int frame_index) {
+  return frame_index & 0x1;
+}
+
+static INLINE int *cond_cost_list(const struct VP10_COMP *cpi, int *cost_list) {
+  return cpi->sf.mv.subpel_search_method != SUBPEL_TREE ? cost_list : NULL;
+}
+
+void vp10_new_framerate(VP10_COMP *cpi, double framerate);
+
+#define LAYER_IDS_TO_IDX(sl, tl, num_tl) ((sl) * (num_tl) + (tl))
+
+// Update up-sampled reference frame index.
+static INLINE void uref_cnt_fb(EncRefCntBuffer *ubufs, int *uidx,
+                               int new_uidx) {
+  const int ref_index = *uidx;
+
+  if (ref_index >= 0 && ubufs[ref_index].ref_count > 0)
+    ubufs[ref_index].ref_count--;
+
+  *uidx = new_uidx;
+  ubufs[new_uidx].ref_count++;
+}
+
+#ifdef __cplusplus
+}  // extern "C"
+#endif
+
+#endif  // VP10_ENCODER_ENCODER_H_
diff --git a/av1/encoder/ethread.c b/av1/encoder/ethread.c
new file mode 100644
index 0000000..63d716c
--- /dev/null
+++ b/av1/encoder/ethread.c
@@ -0,0 +1,171 @@
+/*
+ *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "av1/encoder/encodeframe.h"
+#include "av1/encoder/encoder.h"
+#include "av1/encoder/ethread.h"
+#include "aom_dsp/vpx_dsp_common.h"
+
+static void accumulate_rd_opt(ThreadData *td, ThreadData *td_t) {
+  int i, j, k, l, m, n;
+
+  for (i = 0; i < REFERENCE_MODES; i++)
+    td->rd_counts.comp_pred_diff[i] += td_t->rd_counts.comp_pred_diff[i];
+
+  for (i = 0; i < TX_SIZES; i++)
+    for (j = 0; j < PLANE_TYPES; j++)
+      for (k = 0; k < REF_TYPES; k++)
+        for (l = 0; l < COEF_BANDS; l++)
+          for (m = 0; m < COEFF_CONTEXTS; m++)
+            for (n = 0; n < ENTROPY_TOKENS; n++)
+              td->rd_counts.coef_counts[i][j][k][l][m][n] +=
+                  td_t->rd_counts.coef_counts[i][j][k][l][m][n];
+
+  // Counts of all motion searches and exhuastive mesh searches.
+  td->rd_counts.m_search_count += td_t->rd_counts.m_search_count;
+  td->rd_counts.ex_search_count += td_t->rd_counts.ex_search_count;
+}
+
+static int enc_worker_hook(EncWorkerData *const thread_data, void *unused) {
+  VP10_COMP *const cpi = thread_data->cpi;
+  const VP10_COMMON *const cm = &cpi->common;
+  const int tile_cols = cm->tile_cols;
+  const int tile_rows = cm->tile_rows;
+  int t;
+
+  (void)unused;
+
+  for (t = thread_data->start; t < tile_rows * tile_cols;
+       t += cpi->num_workers) {
+    int tile_row = t / tile_cols;
+    int tile_col = t % tile_cols;
+
+    vp10_encode_tile(cpi, thread_data->td, tile_row, tile_col);
+  }
+
+  return 0;
+}
+
+void vp10_encode_tiles_mt(VP10_COMP *cpi) {
+  VP10_COMMON *const cm = &cpi->common;
+  const int tile_cols = cm->tile_cols;
+  const VPxWorkerInterface *const winterface = vpx_get_worker_interface();
+  const int num_workers = VPXMIN(cpi->oxcf.max_threads, tile_cols);
+  int i;
+
+  vp10_init_tile_data(cpi);
+
+  // Only run once to create threads and allocate thread data.
+  if (cpi->num_workers == 0) {
+    CHECK_MEM_ERROR(cm, cpi->workers,
+                    vpx_malloc(num_workers * sizeof(*cpi->workers)));
+
+    CHECK_MEM_ERROR(cm, cpi->tile_thr_data,
+                    vpx_calloc(num_workers, sizeof(*cpi->tile_thr_data)));
+
+    for (i = 0; i < num_workers; i++) {
+      VPxWorker *const worker = &cpi->workers[i];
+      EncWorkerData *const thread_data = &cpi->tile_thr_data[i];
+
+      ++cpi->num_workers;
+      winterface->init(worker);
+
+      thread_data->cpi = cpi;
+
+      if (i < num_workers - 1) {
+        // Allocate thread data.
+        CHECK_MEM_ERROR(cm, thread_data->td,
+                        vpx_memalign(32, sizeof(*thread_data->td)));
+        vp10_zero(*thread_data->td);
+
+        // Set up pc_tree.
+        thread_data->td->leaf_tree = NULL;
+        thread_data->td->pc_tree = NULL;
+        vp10_setup_pc_tree(cm, thread_data->td);
+
+        // Set up variance tree if needed.
+        if (cpi->sf.partition_search_type == VAR_BASED_PARTITION)
+          vp10_setup_var_tree(cm, &cpi->td);
+
+        // Allocate frame counters in thread data.
+        CHECK_MEM_ERROR(cm, thread_data->td->counts,
+                        vpx_calloc(1, sizeof(*thread_data->td->counts)));
+
+        // Create threads
+        if (!winterface->reset(worker))
+          vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
+                             "Tile encoder thread creation failed");
+      } else {
+        // Main thread acts as a worker and uses the thread data in cpi.
+        thread_data->td = &cpi->td;
+      }
+
+      winterface->sync(worker);
+    }
+  }
+
+  for (i = 0; i < num_workers; i++) {
+    VPxWorker *const worker = &cpi->workers[i];
+    EncWorkerData *thread_data;
+
+    worker->hook = (VPxWorkerHook)enc_worker_hook;
+    worker->data1 = &cpi->tile_thr_data[i];
+    worker->data2 = NULL;
+    thread_data = (EncWorkerData *)worker->data1;
+
+    // Before encoding a frame, copy the thread data from cpi.
+    if (thread_data->td != &cpi->td) {
+      thread_data->td->mb = cpi->td.mb;
+      thread_data->td->rd_counts = cpi->td.rd_counts;
+    }
+    if (thread_data->td->counts != &cpi->common.counts) {
+      memcpy(thread_data->td->counts, &cpi->common.counts,
+             sizeof(cpi->common.counts));
+    }
+
+    // Allocate buffers used by palette coding mode.
+    if (cpi->common.allow_screen_content_tools && i < num_workers - 1) {
+      MACROBLOCK *x = &thread_data->td->mb;
+      CHECK_MEM_ERROR(cm, x->palette_buffer,
+                      vpx_memalign(16, sizeof(*x->palette_buffer)));
+    }
+  }
+
+  // Encode a frame
+  for (i = 0; i < num_workers; i++) {
+    VPxWorker *const worker = &cpi->workers[i];
+    EncWorkerData *const thread_data = (EncWorkerData *)worker->data1;
+
+    // Set the starting tile for each thread.
+    thread_data->start = i;
+
+    if (i == cpi->num_workers - 1)
+      winterface->execute(worker);
+    else
+      winterface->launch(worker);
+  }
+
+  // Encoding ends.
+  for (i = 0; i < num_workers; i++) {
+    VPxWorker *const worker = &cpi->workers[i];
+    winterface->sync(worker);
+  }
+
+  for (i = 0; i < num_workers; i++) {
+    VPxWorker *const worker = &cpi->workers[i];
+    EncWorkerData *const thread_data = (EncWorkerData *)worker->data1;
+
+    // Accumulate counters.
+    if (i < cpi->num_workers - 1) {
+      vp10_accumulate_frame_counts(cm, thread_data->td->counts);
+      accumulate_rd_opt(&cpi->td, thread_data->td);
+    }
+  }
+}
diff --git a/av1/encoder/ethread.h b/av1/encoder/ethread.h
new file mode 100644
index 0000000..d72816cd
--- /dev/null
+++ b/av1/encoder/ethread.h
@@ -0,0 +1,33 @@
+/*
+ *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP10_ENCODER_ETHREAD_H_
+#define VP10_ENCODER_ETHREAD_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct VP10_COMP;
+struct ThreadData;
+
+typedef struct EncWorkerData {
+  struct VP10_COMP *cpi;
+  struct ThreadData *td;
+  int start;
+} EncWorkerData;
+
+void vp10_encode_tiles_mt(struct VP10_COMP *cpi);
+
+#ifdef __cplusplus
+}  // extern "C"
+#endif
+
+#endif  // VP10_ENCODER_ETHREAD_H_
diff --git a/av1/encoder/extend.c b/av1/encoder/extend.c
new file mode 100644
index 0000000..1b0c442
--- /dev/null
+++ b/av1/encoder/extend.c
@@ -0,0 +1,191 @@
+/*
+ *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_mem/vpx_mem.h"
+#include "aom_ports/mem.h"
+
+#include "av1/common/common.h"
+#include "av1/encoder/extend.h"
+
+static void copy_and_extend_plane(const uint8_t *src, int src_pitch,
+                                  uint8_t *dst, int dst_pitch, int w, int h,
+                                  int extend_top, int extend_left,
+                                  int extend_bottom, int extend_right) {
+  int i, linesize;
+
+  // copy the left and right most columns out
+  const uint8_t *src_ptr1 = src;
+  const uint8_t *src_ptr2 = src + w - 1;
+  uint8_t *dst_ptr1 = dst - extend_left;
+  uint8_t *dst_ptr2 = dst + w;
+
+  for (i = 0; i < h; i++) {
+    memset(dst_ptr1, src_ptr1[0], extend_left);
+    memcpy(dst_ptr1 + extend_left, src_ptr1, w);
+    memset(dst_ptr2, src_ptr2[0], extend_right);
+    src_ptr1 += src_pitch;
+    src_ptr2 += src_pitch;
+    dst_ptr1 += dst_pitch;
+    dst_ptr2 += dst_pitch;
+  }
+
+  // Now copy the top and bottom lines into each line of the respective
+  // borders
+  src_ptr1 = dst - extend_left;
+  src_ptr2 = dst + dst_pitch * (h - 1) - extend_left;
+  dst_ptr1 = dst + dst_pitch * (-extend_top) - extend_left;
+  dst_ptr2 = dst + dst_pitch * (h)-extend_left;
+  linesize = extend_left + extend_right + w;
+
+  for (i = 0; i < extend_top; i++) {
+    memcpy(dst_ptr1, src_ptr1, linesize);
+    dst_ptr1 += dst_pitch;
+  }
+
+  for (i = 0; i < extend_bottom; i++) {
+    memcpy(dst_ptr2, src_ptr2, linesize);
+    dst_ptr2 += dst_pitch;
+  }
+}
+
+#if CONFIG_VP9_HIGHBITDEPTH
+static void highbd_copy_and_extend_plane(const uint8_t *src8, int src_pitch,
+                                         uint8_t *dst8, int dst_pitch, int w,
+                                         int h, int extend_top, int extend_left,
+                                         int extend_bottom, int extend_right) {
+  int i, linesize;
+  uint16_t *src = CONVERT_TO_SHORTPTR(src8);
+  uint16_t *dst = CONVERT_TO_SHORTPTR(dst8);
+
+  // copy the left and right most columns out
+  const uint16_t *src_ptr1 = src;
+  const uint16_t *src_ptr2 = src + w - 1;
+  uint16_t *dst_ptr1 = dst - extend_left;
+  uint16_t *dst_ptr2 = dst + w;
+
+  for (i = 0; i < h; i++) {
+    vpx_memset16(dst_ptr1, src_ptr1[0], extend_left);
+    memcpy(dst_ptr1 + extend_left, src_ptr1, w * sizeof(src_ptr1[0]));
+    vpx_memset16(dst_ptr2, src_ptr2[0], extend_right);
+    src_ptr1 += src_pitch;
+    src_ptr2 += src_pitch;
+    dst_ptr1 += dst_pitch;
+    dst_ptr2 += dst_pitch;
+  }
+
+  // Now copy the top and bottom lines into each line of the respective
+  // borders
+  src_ptr1 = dst - extend_left;
+  src_ptr2 = dst + dst_pitch * (h - 1) - extend_left;
+  dst_ptr1 = dst + dst_pitch * (-extend_top) - extend_left;
+  dst_ptr2 = dst + dst_pitch * (h)-extend_left;
+  linesize = extend_left + extend_right + w;
+
+  for (i = 0; i < extend_top; i++) {
+    memcpy(dst_ptr1, src_ptr1, linesize * sizeof(src_ptr1[0]));
+    dst_ptr1 += dst_pitch;
+  }
+
+  for (i = 0; i < extend_bottom; i++) {
+    memcpy(dst_ptr2, src_ptr2, linesize * sizeof(src_ptr2[0]));
+    dst_ptr2 += dst_pitch;
+  }
+}
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+void vp10_copy_and_extend_frame(const YV12_BUFFER_CONFIG *src,
+                                YV12_BUFFER_CONFIG *dst) {
+  // Extend src frame in buffer
+  // Altref filtering assumes 16 pixel extension
+  const int et_y = 16;
+  const int el_y = 16;
+  // Motion estimation may use src block variance with the block size up
+  // to 64x64, so the right and bottom need to be extended to 64 multiple
+  // or up to 16, whichever is greater.
+  const int er_y =
+      VPXMAX(src->y_width + 16, ALIGN_POWER_OF_TWO(src->y_width, 6)) -
+      src->y_crop_width;
+  const int eb_y =
+      VPXMAX(src->y_height + 16, ALIGN_POWER_OF_TWO(src->y_height, 6)) -
+      src->y_crop_height;
+  const int uv_width_subsampling = (src->uv_width != src->y_width);
+  const int uv_height_subsampling = (src->uv_height != src->y_height);
+  const int et_uv = et_y >> uv_height_subsampling;
+  const int el_uv = el_y >> uv_width_subsampling;
+  const int eb_uv = eb_y >> uv_height_subsampling;
+  const int er_uv = er_y >> uv_width_subsampling;
+
+#if CONFIG_VP9_HIGHBITDEPTH
+  if (src->flags & YV12_FLAG_HIGHBITDEPTH) {
+    highbd_copy_and_extend_plane(src->y_buffer, src->y_stride, dst->y_buffer,
+                                 dst->y_stride, src->y_crop_width,
+                                 src->y_crop_height, et_y, el_y, eb_y, er_y);
+
+    highbd_copy_and_extend_plane(
+        src->u_buffer, src->uv_stride, dst->u_buffer, dst->uv_stride,
+        src->uv_crop_width, src->uv_crop_height, et_uv, el_uv, eb_uv, er_uv);
+
+    highbd_copy_and_extend_plane(
+        src->v_buffer, src->uv_stride, dst->v_buffer, dst->uv_stride,
+        src->uv_crop_width, src->uv_crop_height, et_uv, el_uv, eb_uv, er_uv);
+    return;
+  }
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+  copy_and_extend_plane(src->y_buffer, src->y_stride, dst->y_buffer,
+                        dst->y_stride, src->y_crop_width, src->y_crop_height,
+                        et_y, el_y, eb_y, er_y);
+
+  copy_and_extend_plane(src->u_buffer, src->uv_stride, dst->u_buffer,
+                        dst->uv_stride, src->uv_crop_width, src->uv_crop_height,
+                        et_uv, el_uv, eb_uv, er_uv);
+
+  copy_and_extend_plane(src->v_buffer, src->uv_stride, dst->v_buffer,
+                        dst->uv_stride, src->uv_crop_width, src->uv_crop_height,
+                        et_uv, el_uv, eb_uv, er_uv);
+}
+
+void vp10_copy_and_extend_frame_with_rect(const YV12_BUFFER_CONFIG *src,
+                                          YV12_BUFFER_CONFIG *dst, int srcy,
+                                          int srcx, int srch, int srcw) {
+  // If the side is not touching the bounder then don't extend.
+  const int et_y = srcy ? 0 : dst->border;
+  const int el_y = srcx ? 0 : dst->border;
+  const int eb_y = srcy + srch != src->y_height
+                       ? 0
+                       : dst->border + dst->y_height - src->y_height;
+  const int er_y = srcx + srcw != src->y_width
+                       ? 0
+                       : dst->border + dst->y_width - src->y_width;
+  const int src_y_offset = srcy * src->y_stride + srcx;
+  const int dst_y_offset = srcy * dst->y_stride + srcx;
+
+  const int et_uv = ROUND_POWER_OF_TWO(et_y, 1);
+  const int el_uv = ROUND_POWER_OF_TWO(el_y, 1);
+  const int eb_uv = ROUND_POWER_OF_TWO(eb_y, 1);
+  const int er_uv = ROUND_POWER_OF_TWO(er_y, 1);
+  const int src_uv_offset = ((srcy * src->uv_stride) >> 1) + (srcx >> 1);
+  const int dst_uv_offset = ((srcy * dst->uv_stride) >> 1) + (srcx >> 1);
+  const int srch_uv = ROUND_POWER_OF_TWO(srch, 1);
+  const int srcw_uv = ROUND_POWER_OF_TWO(srcw, 1);
+
+  copy_and_extend_plane(src->y_buffer + src_y_offset, src->y_stride,
+                        dst->y_buffer + dst_y_offset, dst->y_stride, srcw, srch,
+                        et_y, el_y, eb_y, er_y);
+
+  copy_and_extend_plane(src->u_buffer + src_uv_offset, src->uv_stride,
+                        dst->u_buffer + dst_uv_offset, dst->uv_stride, srcw_uv,
+                        srch_uv, et_uv, el_uv, eb_uv, er_uv);
+
+  copy_and_extend_plane(src->v_buffer + src_uv_offset, src->uv_stride,
+                        dst->v_buffer + dst_uv_offset, dst->uv_stride, srcw_uv,
+                        srch_uv, et_uv, el_uv, eb_uv, er_uv);
+}
diff --git a/av1/encoder/extend.h b/av1/encoder/extend.h
new file mode 100644
index 0000000..1ad763e
--- /dev/null
+++ b/av1/encoder/extend.h
@@ -0,0 +1,31 @@
+/*
+ *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP10_ENCODER_EXTEND_H_
+#define VP10_ENCODER_EXTEND_H_
+
+#include "aom_scale/yv12config.h"
+#include "aom/vpx_integer.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void vp10_copy_and_extend_frame(const YV12_BUFFER_CONFIG *src,
+                                YV12_BUFFER_CONFIG *dst);
+
+void vp10_copy_and_extend_frame_with_rect(const YV12_BUFFER_CONFIG *src,
+                                          YV12_BUFFER_CONFIG *dst, int srcy,
+                                          int srcx, int srch, int srcw);
+#ifdef __cplusplus
+}  // extern "C"
+#endif
+
+#endif  // VP10_ENCODER_EXTEND_H_
diff --git a/av1/encoder/firstpass.c b/av1/encoder/firstpass.c
new file mode 100644
index 0000000..b23b839
--- /dev/null
+++ b/av1/encoder/firstpass.c
@@ -0,0 +1,2924 @@
+/*
+ *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <limits.h>
+#include <math.h>
+#include <stdio.h>
+
+#include "./vpx_dsp_rtcd.h"
+#include "./vpx_scale_rtcd.h"
+
+#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_mem/vpx_mem.h"
+#include "aom_ports/mem.h"
+#include "aom_ports/system_state.h"
+#include "aom_scale/vpx_scale.h"
+#include "aom_scale/yv12config.h"
+
+#include "av1/common/entropymv.h"
+#include "av1/common/quant_common.h"
+#include "av1/common/reconinter.h"  // vp10_setup_dst_planes()
+#include "av1/encoder/aq_variance.h"
+#include "av1/encoder/block.h"
+#include "av1/encoder/encodeframe.h"
+#include "av1/encoder/encodemb.h"
+#include "av1/encoder/encodemv.h"
+#include "av1/encoder/encoder.h"
+#include "av1/encoder/extend.h"
+#include "av1/encoder/firstpass.h"
+#include "av1/encoder/mcomp.h"
+#include "av1/encoder/quantize.h"
+#include "av1/encoder/rd.h"
+#include "aom_dsp/variance.h"
+
+#define OUTPUT_FPF 0
+#define ARF_STATS_OUTPUT 0
+
+#define GROUP_ADAPTIVE_MAXQ 1
+
+#define BOOST_BREAKOUT 12.5
+#define BOOST_FACTOR 12.5
+#define FACTOR_PT_LOW 0.70
+#define FACTOR_PT_HIGH 0.90
+#define FIRST_PASS_Q 10.0
+#define GF_MAX_BOOST 96.0
+#define INTRA_MODE_PENALTY 1024
+#define KF_MAX_BOOST 128.0
+#define MIN_ARF_GF_BOOST 240
+#define MIN_DECAY_FACTOR 0.01
+#define MIN_KF_BOOST 300
+#define NEW_MV_MODE_PENALTY 32
+#define DARK_THRESH 64
+#define DEFAULT_GRP_WEIGHT 1.0
+#define RC_FACTOR_MIN 0.75
+#define RC_FACTOR_MAX 1.75
+
+#define NCOUNT_INTRA_THRESH 8192
+#define NCOUNT_INTRA_FACTOR 3
+#define NCOUNT_FRAME_II_THRESH 5.0
+
+#define DOUBLE_DIVIDE_CHECK(x) ((x) < 0 ? (x)-0.000001 : (x) + 0.000001)
+
+#if ARF_STATS_OUTPUT
+unsigned int arf_count = 0;
+#endif
+
+// Resets the first pass file to the given position using a relative seek from
+// the current position.
+static void reset_fpf_position(TWO_PASS *p, const FIRSTPASS_STATS *position) {
+  p->stats_in = position;
+}
+
+// Read frame stats at an offset from the current position.
+static const FIRSTPASS_STATS *read_frame_stats(const TWO_PASS *p, int offset) {
+  if ((offset >= 0 && p->stats_in + offset >= p->stats_in_end) ||
+      (offset < 0 && p->stats_in + offset < p->stats_in_start)) {
+    return NULL;
+  }
+
+  return &p->stats_in[offset];
+}
+
+static int input_stats(TWO_PASS *p, FIRSTPASS_STATS *fps) {
+  if (p->stats_in >= p->stats_in_end) return EOF;
+
+  *fps = *p->stats_in;
+  ++p->stats_in;
+  return 1;
+}
+
+static void output_stats(FIRSTPASS_STATS *stats,
+                         struct vpx_codec_pkt_list *pktlist) {
+  struct vpx_codec_cx_pkt pkt;
+  pkt.kind = VPX_CODEC_STATS_PKT;
+  pkt.data.twopass_stats.buf = stats;
+  pkt.data.twopass_stats.sz = sizeof(FIRSTPASS_STATS);
+  vpx_codec_pkt_list_add(pktlist, &pkt);
+
+// TEMP debug code
+#if OUTPUT_FPF
+  {
+    FILE *fpfile;
+    fpfile = fopen("firstpass.stt", "a");
+
+    fprintf(fpfile,
+            "%12.0lf %12.4lf %12.0lf %12.0lf %12.0lf %12.4lf %12.4lf"
+            "%12.4lf %12.4lf %12.4lf %12.4lf %12.4lf %12.4lf %12.4lf %12.4lf"
+            "%12.4lf %12.4lf %12.0lf %12.0lf %12.0lf %12.4lf\n",
+            stats->frame, stats->weight, stats->intra_error, stats->coded_error,
+            stats->sr_coded_error, stats->pcnt_inter, stats->pcnt_motion,
+            stats->pcnt_second_ref, stats->pcnt_neutral, stats->intra_skip_pct,
+            stats->inactive_zone_rows, stats->inactive_zone_cols, stats->MVr,
+            stats->mvr_abs, stats->MVc, stats->mvc_abs, stats->MVrv,
+            stats->MVcv, stats->mv_in_out_count, stats->new_mv_count,
+            stats->count, stats->duration);
+    fclose(fpfile);
+  }
+#endif
+}
+
+#if CONFIG_FP_MB_STATS
+static void output_fpmb_stats(uint8_t *this_frame_mb_stats, VP10_COMMON *cm,
+                              struct vpx_codec_pkt_list *pktlist) {
+  struct vpx_codec_cx_pkt pkt;
+  pkt.kind = VPX_CODEC_FPMB_STATS_PKT;
+  pkt.data.firstpass_mb_stats.buf = this_frame_mb_stats;
+  pkt.data.firstpass_mb_stats.sz = cm->initial_mbs * sizeof(uint8_t);
+  vpx_codec_pkt_list_add(pktlist, &pkt);
+}
+#endif
+
+static void zero_stats(FIRSTPASS_STATS *section) {
+  section->frame = 0.0;
+  section->weight = 0.0;
+  section->intra_error = 0.0;
+  section->coded_error = 0.0;
+  section->sr_coded_error = 0.0;
+  section->pcnt_inter = 0.0;
+  section->pcnt_motion = 0.0;
+  section->pcnt_second_ref = 0.0;
+  section->pcnt_neutral = 0.0;
+  section->intra_skip_pct = 0.0;
+  section->inactive_zone_rows = 0.0;
+  section->inactive_zone_cols = 0.0;
+  section->MVr = 0.0;
+  section->mvr_abs = 0.0;
+  section->MVc = 0.0;
+  section->mvc_abs = 0.0;
+  section->MVrv = 0.0;
+  section->MVcv = 0.0;
+  section->mv_in_out_count = 0.0;
+  section->new_mv_count = 0.0;
+  section->count = 0.0;
+  section->duration = 1.0;
+}
+
+static void accumulate_stats(FIRSTPASS_STATS *section,
+                             const FIRSTPASS_STATS *frame) {
+  section->frame += frame->frame;
+  section->weight += frame->weight;
+  section->intra_error += frame->intra_error;
+  section->coded_error += frame->coded_error;
+  section->sr_coded_error += frame->sr_coded_error;
+  section->pcnt_inter += frame->pcnt_inter;
+  section->pcnt_motion += frame->pcnt_motion;
+  section->pcnt_second_ref += frame->pcnt_second_ref;
+  section->pcnt_neutral += frame->pcnt_neutral;
+  section->intra_skip_pct += frame->intra_skip_pct;
+  section->inactive_zone_rows += frame->inactive_zone_rows;
+  section->inactive_zone_cols += frame->inactive_zone_cols;
+  section->MVr += frame->MVr;
+  section->mvr_abs += frame->mvr_abs;
+  section->MVc += frame->MVc;
+  section->mvc_abs += frame->mvc_abs;
+  section->MVrv += frame->MVrv;
+  section->MVcv += frame->MVcv;
+  section->mv_in_out_count += frame->mv_in_out_count;
+  section->new_mv_count += frame->new_mv_count;
+  section->count += frame->count;
+  section->duration += frame->duration;
+}
+
+static void subtract_stats(FIRSTPASS_STATS *section,
+                           const FIRSTPASS_STATS *frame) {
+  section->frame -= frame->frame;
+  section->weight -= frame->weight;
+  section->intra_error -= frame->intra_error;
+  section->coded_error -= frame->coded_error;
+  section->sr_coded_error -= frame->sr_coded_error;
+  section->pcnt_inter -= frame->pcnt_inter;
+  section->pcnt_motion -= frame->pcnt_motion;
+  section->pcnt_second_ref -= frame->pcnt_second_ref;
+  section->pcnt_neutral -= frame->pcnt_neutral;
+  section->intra_skip_pct -= frame->intra_skip_pct;
+  section->inactive_zone_rows -= frame->inactive_zone_rows;
+  section->inactive_zone_cols -= frame->inactive_zone_cols;
+  section->MVr -= frame->MVr;
+  section->mvr_abs -= frame->mvr_abs;
+  section->MVc -= frame->MVc;
+  section->mvc_abs -= frame->mvc_abs;
+  section->MVrv -= frame->MVrv;
+  section->MVcv -= frame->MVcv;
+  section->mv_in_out_count -= frame->mv_in_out_count;
+  section->new_mv_count -= frame->new_mv_count;
+  section->count -= frame->count;
+  section->duration -= frame->duration;
+}
+
+// Calculate the linear size relative to a baseline of 1080P
+#define BASE_SIZE 2073600.0  // 1920x1080
+static double get_linear_size_factor(const VP10_COMP *cpi) {
+  const double this_area = cpi->initial_width * cpi->initial_height;
+  return pow(this_area / BASE_SIZE, 0.5);
+}
+
+// Calculate an active area of the image that discounts formatting
+// bars and partially discounts other 0 energy areas.
+#define MIN_ACTIVE_AREA 0.5
+#define MAX_ACTIVE_AREA 1.0
+static double calculate_active_area(const VP10_COMP *cpi,
+                                    const FIRSTPASS_STATS *this_frame) {
+  double active_pct;
+
+  active_pct =
+      1.0 -
+      ((this_frame->intra_skip_pct / 2) +
+       ((this_frame->inactive_zone_rows * 2) / (double)cpi->common.mb_rows));
+  return fclamp(active_pct, MIN_ACTIVE_AREA, MAX_ACTIVE_AREA);
+}
+
+// Calculate a modified Error used in distributing bits between easier and
+// harder frames.
+#define ACT_AREA_CORRECTION 0.5
+static double calculate_modified_err(const VP10_COMP *cpi,
+                                     const TWO_PASS *twopass,
+                                     const VP10EncoderConfig *oxcf,
+                                     const FIRSTPASS_STATS *this_frame) {
+  const FIRSTPASS_STATS *const stats = &twopass->total_stats;
+  const double av_weight = stats->weight / stats->count;
+  const double av_err = (stats->coded_error * av_weight) / stats->count;
+  double modified_error =
+      av_err * pow(this_frame->coded_error * this_frame->weight /
+                       DOUBLE_DIVIDE_CHECK(av_err),
+                   oxcf->two_pass_vbrbias / 100.0);
+
+  // Correction for active area. Frames with a reduced active area
+  // (eg due to formatting bars) have a higher error per mb for the
+  // remaining active MBs. The correction here assumes that coding
+  // 0.5N blocks of complexity 2X is a little easier than coding N
+  // blocks of complexity X.
+  modified_error *=
+      pow(calculate_active_area(cpi, this_frame), ACT_AREA_CORRECTION);
+
+  return fclamp(modified_error, twopass->modified_error_min,
+                twopass->modified_error_max);
+}
+
+// This function returns the maximum target rate per frame.
+static int frame_max_bits(const RATE_CONTROL *rc,
+                          const VP10EncoderConfig *oxcf) {
+  int64_t max_bits = ((int64_t)rc->avg_frame_bandwidth *
+                      (int64_t)oxcf->two_pass_vbrmax_section) /
+                     100;
+  if (max_bits < 0)
+    max_bits = 0;
+  else if (max_bits > rc->max_frame_bandwidth)
+    max_bits = rc->max_frame_bandwidth;
+
+  return (int)max_bits;
+}
+
+void vp10_init_first_pass(VP10_COMP *cpi) {
+  zero_stats(&cpi->twopass.total_stats);
+}
+
+void vp10_end_first_pass(VP10_COMP *cpi) {
+  output_stats(&cpi->twopass.total_stats, cpi->output_pkt_list);
+}
+
+static vpx_variance_fn_t get_block_variance_fn(BLOCK_SIZE bsize) {
+  switch (bsize) {
+    case BLOCK_8X8: return vpx_mse8x8;
+    case BLOCK_16X8: return vpx_mse16x8;
+    case BLOCK_8X16: return vpx_mse8x16;
+    default: return vpx_mse16x16;
+  }
+}
+
+static unsigned int get_prediction_error(BLOCK_SIZE bsize,
+                                         const struct buf_2d *src,
+                                         const struct buf_2d *ref) {
+  unsigned int sse;
+  const vpx_variance_fn_t fn = get_block_variance_fn(bsize);
+  fn(src->buf, src->stride, ref->buf, ref->stride, &sse);
+  return sse;
+}
+
+#if CONFIG_VP9_HIGHBITDEPTH
+static vpx_variance_fn_t highbd_get_block_variance_fn(BLOCK_SIZE bsize,
+                                                      int bd) {
+  switch (bd) {
+    default:
+      switch (bsize) {
+        case BLOCK_8X8: return vpx_highbd_8_mse8x8;
+        case BLOCK_16X8: return vpx_highbd_8_mse16x8;
+        case BLOCK_8X16: return vpx_highbd_8_mse8x16;
+        default: return vpx_highbd_8_mse16x16;
+      }
+      break;
+    case 10:
+      switch (bsize) {
+        case BLOCK_8X8: return vpx_highbd_10_mse8x8;
+        case BLOCK_16X8: return vpx_highbd_10_mse16x8;
+        case BLOCK_8X16: return vpx_highbd_10_mse8x16;
+        default: return vpx_highbd_10_mse16x16;
+      }
+      break;
+    case 12:
+      switch (bsize) {
+        case BLOCK_8X8: return vpx_highbd_12_mse8x8;
+        case BLOCK_16X8: return vpx_highbd_12_mse16x8;
+        case BLOCK_8X16: return vpx_highbd_12_mse8x16;
+        default: return vpx_highbd_12_mse16x16;
+      }
+      break;
+  }
+}
+
+static unsigned int highbd_get_prediction_error(BLOCK_SIZE bsize,
+                                                const struct buf_2d *src,
+                                                const struct buf_2d *ref,
+                                                int bd) {
+  unsigned int sse;
+  const vpx_variance_fn_t fn = highbd_get_block_variance_fn(bsize, bd);
+  fn(src->buf, src->stride, ref->buf, ref->stride, &sse);
+  return sse;
+}
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+// Refine the motion search range according to the frame dimension
+// for first pass test.
+static int get_search_range(const VP10_COMP *cpi) {
+  int sr = 0;
+  const int dim = VPXMIN(cpi->initial_width, cpi->initial_height);
+
+  while ((dim << sr) < MAX_FULL_PEL_VAL) ++sr;
+  return sr;
+}
+
+static void first_pass_motion_search(VP10_COMP *cpi, MACROBLOCK *x,
+                                     const MV *ref_mv, MV *best_mv,
+                                     int *best_motion_err) {
+  MACROBLOCKD *const xd = &x->e_mbd;
+  MV tmp_mv = { 0, 0 };
+  MV ref_mv_full = { ref_mv->row >> 3, ref_mv->col >> 3 };
+  int num00, tmp_err, n;
+  const BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
+  vpx_variance_fn_ptr_t v_fn_ptr = cpi->fn_ptr[bsize];
+  const int new_mv_mode_penalty = NEW_MV_MODE_PENALTY;
+
+  int step_param = 3;
+  int further_steps = (MAX_MVSEARCH_STEPS - 1) - step_param;
+  const int sr = get_search_range(cpi);
+  step_param += sr;
+  further_steps -= sr;
+
+  // Override the default variance function to use MSE.
+  v_fn_ptr.vf = get_block_variance_fn(bsize);
+#if CONFIG_VP9_HIGHBITDEPTH
+  if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+    v_fn_ptr.vf = highbd_get_block_variance_fn(bsize, xd->bd);
+  }
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+  // Center the initial step/diamond search on best mv.
+  tmp_err = cpi->diamond_search_sad(x, &cpi->ss_cfg, &ref_mv_full, &tmp_mv,
+                                    step_param, x->sadperbit16, &num00,
+                                    &v_fn_ptr, ref_mv);
+  if (tmp_err < INT_MAX)
+    tmp_err = vp10_get_mvpred_var(x, &tmp_mv, ref_mv, &v_fn_ptr, 1);
+  if (tmp_err < INT_MAX - new_mv_mode_penalty) tmp_err += new_mv_mode_penalty;
+
+  if (tmp_err < *best_motion_err) {
+    *best_motion_err = tmp_err;
+    *best_mv = tmp_mv;
+  }
+
+  // Carry out further step/diamond searches as necessary.
+  n = num00;
+  num00 = 0;
+
+  while (n < further_steps) {
+    ++n;
+
+    if (num00) {
+      --num00;
+    } else {
+      tmp_err = cpi->diamond_search_sad(x, &cpi->ss_cfg, &ref_mv_full, &tmp_mv,
+                                        step_param + n, x->sadperbit16, &num00,
+                                        &v_fn_ptr, ref_mv);
+      if (tmp_err < INT_MAX)
+        tmp_err = vp10_get_mvpred_var(x, &tmp_mv, ref_mv, &v_fn_ptr, 1);
+      if (tmp_err < INT_MAX - new_mv_mode_penalty)
+        tmp_err += new_mv_mode_penalty;
+
+      if (tmp_err < *best_motion_err) {
+        *best_motion_err = tmp_err;
+        *best_mv = tmp_mv;
+      }
+    }
+  }
+}
+
+static BLOCK_SIZE get_bsize(const VP10_COMMON *cm, int mb_row, int mb_col) {
+  if (2 * mb_col + 1 < cm->mi_cols) {
+    return 2 * mb_row + 1 < cm->mi_rows ? BLOCK_16X16 : BLOCK_16X8;
+  } else {
+    return 2 * mb_row + 1 < cm->mi_rows ? BLOCK_8X16 : BLOCK_8X8;
+  }
+}
+
+static int find_fp_qindex(vpx_bit_depth_t bit_depth) {
+  int i;
+
+  for (i = 0; i < QINDEX_RANGE; ++i)
+    if (vp10_convert_qindex_to_q(i, bit_depth) >= FIRST_PASS_Q) break;
+
+  if (i == QINDEX_RANGE) i--;
+
+  return i;
+}
+
+static void set_first_pass_params(VP10_COMP *cpi) {
+  VP10_COMMON *const cm = &cpi->common;
+  if (!cpi->refresh_alt_ref_frame &&
+      (cm->current_video_frame == 0 || (cpi->frame_flags & FRAMEFLAGS_KEY))) {
+    cm->frame_type = KEY_FRAME;
+  } else {
+    cm->frame_type = INTER_FRAME;
+  }
+  // Do not use periodic key frames.
+  cpi->rc.frames_to_key = INT_MAX;
+}
+
+#define UL_INTRA_THRESH 50
+#define INVALID_ROW -1
+void vp10_first_pass(VP10_COMP *cpi, const struct lookahead_entry *source) {
+  int mb_row, mb_col;
+  MACROBLOCK *const x = &cpi->td.mb;
+  VP10_COMMON *const cm = &cpi->common;
+  MACROBLOCKD *const xd = &x->e_mbd;
+  TileInfo tile;
+  struct macroblock_plane *const p = x->plane;
+  struct macroblockd_plane *const pd = xd->plane;
+  const PICK_MODE_CONTEXT *ctx =
+      &cpi->td.pc_root[MAX_MIB_SIZE_LOG2 - MIN_MIB_SIZE_LOG2]->none;
+  int i;
+
+  int recon_yoffset, recon_uvoffset;
+  int64_t intra_error = 0;
+  int64_t coded_error = 0;
+  int64_t sr_coded_error = 0;
+
+  int sum_mvr = 0, sum_mvc = 0;
+  int sum_mvr_abs = 0, sum_mvc_abs = 0;
+  int64_t sum_mvrs = 0, sum_mvcs = 0;
+  int mvcount = 0;
+  int intercount = 0;
+  int second_ref_count = 0;
+  const int intrapenalty = INTRA_MODE_PENALTY;
+  double neutral_count;
+  int intra_skip_count = 0;
+  int image_data_start_row = INVALID_ROW;
+  int new_mv_count = 0;
+  int sum_in_vectors = 0;
+  MV lastmv = { 0, 0 };
+  TWO_PASS *twopass = &cpi->twopass;
+  const MV zero_mv = { 0, 0 };
+  int recon_y_stride, recon_uv_stride, uv_mb_height;
+
+  YV12_BUFFER_CONFIG *const lst_yv12 = get_ref_frame_buffer(cpi, LAST_FRAME);
+  YV12_BUFFER_CONFIG *gld_yv12 = get_ref_frame_buffer(cpi, GOLDEN_FRAME);
+  YV12_BUFFER_CONFIG *const new_yv12 = get_frame_new_buffer(cm);
+  const YV12_BUFFER_CONFIG *first_ref_buf = lst_yv12;
+  double intra_factor;
+  double brightness_factor;
+  BufferPool *const pool = cm->buffer_pool;
+
+  // First pass code requires valid last and new frame buffers.
+  assert(new_yv12 != NULL);
+  assert(frame_is_intra_only(cm) || (lst_yv12 != NULL));
+
+#if CONFIG_FP_MB_STATS
+  if (cpi->use_fp_mb_stats) {
+    vp10_zero_array(cpi->twopass.frame_mb_stats_buf, cm->initial_mbs);
+  }
+#endif
+
+  vpx_clear_system_state();
+
+  intra_factor = 0.0;
+  brightness_factor = 0.0;
+  neutral_count = 0.0;
+
+  set_first_pass_params(cpi);
+  vp10_set_quantizer(cm, find_fp_qindex(cm->bit_depth));
+
+  vp10_setup_block_planes(&x->e_mbd, cm->subsampling_x, cm->subsampling_y);
+
+  vp10_setup_src_planes(x, cpi->Source, 0, 0);
+  vp10_setup_dst_planes(xd->plane, new_yv12, 0, 0);
+
+  if (!frame_is_intra_only(cm)) {
+    vp10_setup_pre_planes(xd, 0, first_ref_buf, 0, 0, NULL);
+  }
+
+  xd->mi = cm->mi_grid_visible;
+  xd->mi[0] = cm->mi;
+
+  vp10_frame_init_quantizer(cpi);
+
+  for (i = 0; i < MAX_MB_PLANE; ++i) {
+    p[i].coeff = ctx->coeff[i][1];
+    p[i].qcoeff = ctx->qcoeff[i][1];
+    pd[i].dqcoeff = ctx->dqcoeff[i][1];
+    p[i].eobs = ctx->eobs[i][1];
+  }
+
+  vp10_init_mv_probs(cm);
+  vp10_initialize_rd_consts(cpi);
+
+  // Tiling is ignored in the first pass.
+  vp10_tile_init(&tile, cm, 0, 0);
+
+  recon_y_stride = new_yv12->y_stride;
+  recon_uv_stride = new_yv12->uv_stride;
+  uv_mb_height = 16 >> (new_yv12->y_height > new_yv12->uv_height);
+
+  for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) {
+    MV best_ref_mv = { 0, 0 };
+
+    // Reset above block coeffs.
+    xd->up_available = (mb_row != 0);
+    recon_yoffset = (mb_row * recon_y_stride * 16);
+    recon_uvoffset = (mb_row * recon_uv_stride * uv_mb_height);
+
+    // Set up limit values for motion vectors to prevent them extending
+    // outside the UMV borders.
+    x->mv_row_min = -((mb_row * 16) + BORDER_MV_PIXELS_B16);
+    x->mv_row_max = ((cm->mb_rows - 1 - mb_row) * 16) + BORDER_MV_PIXELS_B16;
+
+    for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) {
+      int this_error;
+      const int use_dc_pred = (mb_col || mb_row) && (!mb_col || !mb_row);
+      const BLOCK_SIZE bsize = get_bsize(cm, mb_row, mb_col);
+      double log_intra;
+      int level_sample;
+
+#if CONFIG_FP_MB_STATS
+      const int mb_index = mb_row * cm->mb_cols + mb_col;
+#endif
+
+      vpx_clear_system_state();
+
+      xd->plane[0].dst.buf = new_yv12->y_buffer + recon_yoffset;
+      xd->plane[1].dst.buf = new_yv12->u_buffer + recon_uvoffset;
+      xd->plane[2].dst.buf = new_yv12->v_buffer + recon_uvoffset;
+      xd->left_available = (mb_col != 0);
+      xd->mi[0]->mbmi.sb_type = bsize;
+      xd->mi[0]->mbmi.ref_frame[0] = INTRA_FRAME;
+      set_mi_row_col(xd, &tile, mb_row << 1, num_8x8_blocks_high_lookup[bsize],
+                     mb_col << 1, num_8x8_blocks_wide_lookup[bsize],
+                     cm->mi_rows, cm->mi_cols);
+
+      // Do intra 16x16 prediction.
+      xd->mi[0]->mbmi.segment_id = 0;
+#if CONFIG_SUPERTX
+      xd->mi[0]->mbmi.segment_id_supertx = 0;
+#endif  // CONFIG_SUPERTX
+      xd->mi[0]->mbmi.mode = DC_PRED;
+      xd->mi[0]->mbmi.tx_size =
+          use_dc_pred ? (bsize >= BLOCK_16X16 ? TX_16X16 : TX_8X8) : TX_4X4;
+      vp10_encode_intra_block_plane(x, bsize, 0, 0);
+      this_error = vpx_get_mb_ss(x->plane[0].src_diff);
+
+      // Keep a record of blocks that have almost no intra error residual
+      // (i.e. are in effect completely flat and untextured in the intra
+      // domain). In natural videos this is uncommon, but it is much more
+      // common in animations, graphics and screen content, so may be used
+      // as a signal to detect these types of content.
+      if (this_error < UL_INTRA_THRESH) {
+        ++intra_skip_count;
+      } else if ((mb_col > 0) && (image_data_start_row == INVALID_ROW)) {
+        image_data_start_row = mb_row;
+      }
+
+#if CONFIG_VP9_HIGHBITDEPTH
+      if (cm->use_highbitdepth) {
+        switch (cm->bit_depth) {
+          case VPX_BITS_8: break;
+          case VPX_BITS_10: this_error >>= 4; break;
+          case VPX_BITS_12: this_error >>= 8; break;
+          default:
+            assert(0 &&
+                   "cm->bit_depth should be VPX_BITS_8, "
+                   "VPX_BITS_10 or VPX_BITS_12");
+            return;
+        }
+      }
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+      vpx_clear_system_state();
+      log_intra = log(this_error + 1.0);
+      if (log_intra < 10.0)
+        intra_factor += 1.0 + ((10.0 - log_intra) * 0.05);
+      else
+        intra_factor += 1.0;
+
+#if CONFIG_VP9_HIGHBITDEPTH
+      if (cm->use_highbitdepth)
+        level_sample = CONVERT_TO_SHORTPTR(x->plane[0].src.buf)[0];
+      else
+        level_sample = x->plane[0].src.buf[0];
+#else
+      level_sample = x->plane[0].src.buf[0];
+#endif
+      if ((level_sample < DARK_THRESH) && (log_intra < 9.0))
+        brightness_factor += 1.0 + (0.01 * (DARK_THRESH - level_sample));
+      else
+        brightness_factor += 1.0;
+
+      // Intrapenalty below deals with situations where the intra and inter
+      // error scores are very low (e.g. a plain black frame).
+      // We do not have special cases in first pass for 0,0 and nearest etc so
+      // all inter modes carry an overhead cost estimate for the mv.
+      // When the error score is very low this causes us to pick all or lots of
+      // INTRA modes and throw lots of key frames.
+      // This penalty adds a cost matching that of a 0,0 mv to the intra case.
+      this_error += intrapenalty;
+
+      // Accumulate the intra error.
+      intra_error += (int64_t)this_error;
+
+#if CONFIG_FP_MB_STATS
+      if (cpi->use_fp_mb_stats) {
+        // initialization
+        cpi->twopass.frame_mb_stats_buf[mb_index] = 0;
+      }
+#endif
+
+      // Set up limit values for motion vectors to prevent them extending
+      // outside the UMV borders.
+      x->mv_col_min = -((mb_col * 16) + BORDER_MV_PIXELS_B16);
+      x->mv_col_max = ((cm->mb_cols - 1 - mb_col) * 16) + BORDER_MV_PIXELS_B16;
+
+      // Other than for the first frame do a motion search.
+      if (cm->current_video_frame > 0) {
+        int tmp_err, motion_error, raw_motion_error;
+        // Assume 0,0 motion with no mv overhead.
+        MV mv = { 0, 0 }, tmp_mv = { 0, 0 };
+        struct buf_2d unscaled_last_source_buf_2d;
+
+        xd->plane[0].pre[0].buf = first_ref_buf->y_buffer + recon_yoffset;
+#if CONFIG_VP9_HIGHBITDEPTH
+        if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+          motion_error = highbd_get_prediction_error(
+              bsize, &x->plane[0].src, &xd->plane[0].pre[0], xd->bd);
+        } else {
+          motion_error = get_prediction_error(bsize, &x->plane[0].src,
+                                              &xd->plane[0].pre[0]);
+        }
+#else
+        motion_error =
+            get_prediction_error(bsize, &x->plane[0].src, &xd->plane[0].pre[0]);
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+        // Compute the motion error of the 0,0 motion using the last source
+        // frame as the reference. Skip the further motion search on
+        // reconstructed frame if this error is small.
+        unscaled_last_source_buf_2d.buf =
+            cpi->unscaled_last_source->y_buffer + recon_yoffset;
+        unscaled_last_source_buf_2d.stride =
+            cpi->unscaled_last_source->y_stride;
+#if CONFIG_VP9_HIGHBITDEPTH
+        if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+          raw_motion_error = highbd_get_prediction_error(
+              bsize, &x->plane[0].src, &unscaled_last_source_buf_2d, xd->bd);
+        } else {
+          raw_motion_error = get_prediction_error(bsize, &x->plane[0].src,
+                                                  &unscaled_last_source_buf_2d);
+        }
+#else
+        raw_motion_error = get_prediction_error(bsize, &x->plane[0].src,
+                                                &unscaled_last_source_buf_2d);
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+        // TODO(pengchong): Replace the hard-coded threshold
+        if (raw_motion_error > 25) {
+          // Test last reference frame using the previous best mv as the
+          // starting point (best reference) for the search.
+          first_pass_motion_search(cpi, x, &best_ref_mv, &mv, &motion_error);
+
+          // If the current best reference mv is not centered on 0,0 then do a
+          // 0,0 based search as well.
+          if (!is_zero_mv(&best_ref_mv)) {
+            tmp_err = INT_MAX;
+            first_pass_motion_search(cpi, x, &zero_mv, &tmp_mv, &tmp_err);
+
+            if (tmp_err < motion_error) {
+              motion_error = tmp_err;
+              mv = tmp_mv;
+            }
+          }
+
+          // Search in an older reference frame.
+          if ((cm->current_video_frame > 1) && gld_yv12 != NULL) {
+            // Assume 0,0 motion with no mv overhead.
+            int gf_motion_error;
+
+            xd->plane[0].pre[0].buf = gld_yv12->y_buffer + recon_yoffset;
+#if CONFIG_VP9_HIGHBITDEPTH
+            if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+              gf_motion_error = highbd_get_prediction_error(
+                  bsize, &x->plane[0].src, &xd->plane[0].pre[0], xd->bd);
+            } else {
+              gf_motion_error = get_prediction_error(bsize, &x->plane[0].src,
+                                                     &xd->plane[0].pre[0]);
+            }
+#else
+            gf_motion_error = get_prediction_error(bsize, &x->plane[0].src,
+                                                   &xd->plane[0].pre[0]);
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+            first_pass_motion_search(cpi, x, &zero_mv, &tmp_mv,
+                                     &gf_motion_error);
+
+            if (gf_motion_error < motion_error && gf_motion_error < this_error)
+              ++second_ref_count;
+
+            // Reset to last frame as reference buffer.
+            xd->plane[0].pre[0].buf = first_ref_buf->y_buffer + recon_yoffset;
+            xd->plane[1].pre[0].buf = first_ref_buf->u_buffer + recon_uvoffset;
+            xd->plane[2].pre[0].buf = first_ref_buf->v_buffer + recon_uvoffset;
+
+            // In accumulating a score for the older reference frame take the
+            // best of the motion predicted score and the intra coded error
+            // (just as will be done for) accumulation of "coded_error" for
+            // the last frame.
+            if (gf_motion_error < this_error)
+              sr_coded_error += gf_motion_error;
+            else
+              sr_coded_error += this_error;
+          } else {
+            sr_coded_error += motion_error;
+          }
+        } else {
+          sr_coded_error += motion_error;
+        }
+
+        // Start by assuming that intra mode is best.
+        best_ref_mv.row = 0;
+        best_ref_mv.col = 0;
+
+#if CONFIG_FP_MB_STATS
+        if (cpi->use_fp_mb_stats) {
+          // intra predication statistics
+          cpi->twopass.frame_mb_stats_buf[mb_index] = 0;
+          cpi->twopass.frame_mb_stats_buf[mb_index] |= FPMB_DCINTRA_MASK;
+          cpi->twopass.frame_mb_stats_buf[mb_index] |= FPMB_MOTION_ZERO_MASK;
+          if (this_error > FPMB_ERROR_LARGE_TH) {
+            cpi->twopass.frame_mb_stats_buf[mb_index] |= FPMB_ERROR_LARGE_MASK;
+          } else if (this_error < FPMB_ERROR_SMALL_TH) {
+            cpi->twopass.frame_mb_stats_buf[mb_index] |= FPMB_ERROR_SMALL_MASK;
+          }
+        }
+#endif
+
+        if (motion_error <= this_error) {
+          vpx_clear_system_state();
+
+          // Keep a count of cases where the inter and intra were very close
+          // and very low. This helps with scene cut detection for example in
+          // cropped clips with black bars at the sides or top and bottom.
+          if (((this_error - intrapenalty) * 9 <= motion_error * 10) &&
+              (this_error < (2 * intrapenalty))) {
+            neutral_count += 1.0;
+            // Also track cases where the intra is not much worse than the inter
+            // and use this in limiting the GF/arf group length.
+          } else if ((this_error > NCOUNT_INTRA_THRESH) &&
+                     (this_error < (NCOUNT_INTRA_FACTOR * motion_error))) {
+            neutral_count +=
+                (double)motion_error / DOUBLE_DIVIDE_CHECK((double)this_error);
+          }
+
+          mv.row *= 8;
+          mv.col *= 8;
+          this_error = motion_error;
+          xd->mi[0]->mbmi.mode = NEWMV;
+          xd->mi[0]->mbmi.mv[0].as_mv = mv;
+          xd->mi[0]->mbmi.tx_size = TX_4X4;
+          xd->mi[0]->mbmi.ref_frame[0] = LAST_FRAME;
+          xd->mi[0]->mbmi.ref_frame[1] = NONE;
+          vp10_build_inter_predictors_sby(xd, mb_row << 1, mb_col << 1, bsize);
+          vp10_encode_sby_pass1(x, bsize);
+          sum_mvr += mv.row;
+          sum_mvr_abs += abs(mv.row);
+          sum_mvc += mv.col;
+          sum_mvc_abs += abs(mv.col);
+          sum_mvrs += mv.row * mv.row;
+          sum_mvcs += mv.col * mv.col;
+          ++intercount;
+
+          best_ref_mv = mv;
+
+#if CONFIG_FP_MB_STATS
+          if (cpi->use_fp_mb_stats) {
+            // inter predication statistics
+            cpi->twopass.frame_mb_stats_buf[mb_index] = 0;
+            cpi->twopass.frame_mb_stats_buf[mb_index] &= ~FPMB_DCINTRA_MASK;
+            cpi->twopass.frame_mb_stats_buf[mb_index] |= FPMB_MOTION_ZERO_MASK;
+            if (this_error > FPMB_ERROR_LARGE_TH) {
+              cpi->twopass.frame_mb_stats_buf[mb_index] |=
+                  FPMB_ERROR_LARGE_MASK;
+            } else if (this_error < FPMB_ERROR_SMALL_TH) {
+              cpi->twopass.frame_mb_stats_buf[mb_index] |=
+                  FPMB_ERROR_SMALL_MASK;
+            }
+          }
+#endif
+
+          if (!is_zero_mv(&mv)) {
+            ++mvcount;
+
+#if CONFIG_FP_MB_STATS
+            if (cpi->use_fp_mb_stats) {
+              cpi->twopass.frame_mb_stats_buf[mb_index] &=
+                  ~FPMB_MOTION_ZERO_MASK;
+              // check estimated motion direction
+              if (mv.as_mv.col > 0 && mv.as_mv.col >= abs(mv.as_mv.row)) {
+                // right direction
+                cpi->twopass.frame_mb_stats_buf[mb_index] |=
+                    FPMB_MOTION_RIGHT_MASK;
+              } else if (mv.as_mv.row < 0 &&
+                         abs(mv.as_mv.row) >= abs(mv.as_mv.col)) {
+                // up direction
+                cpi->twopass.frame_mb_stats_buf[mb_index] |=
+                    FPMB_MOTION_UP_MASK;
+              } else if (mv.as_mv.col < 0 &&
+                         abs(mv.as_mv.col) >= abs(mv.as_mv.row)) {
+                // left direction
+                cpi->twopass.frame_mb_stats_buf[mb_index] |=
+                    FPMB_MOTION_LEFT_MASK;
+              } else {
+                // down direction
+                cpi->twopass.frame_mb_stats_buf[mb_index] |=
+                    FPMB_MOTION_DOWN_MASK;
+              }
+            }
+#endif
+
+            // Non-zero vector, was it different from the last non zero vector?
+            if (!is_equal_mv(&mv, &lastmv)) ++new_mv_count;
+            lastmv = mv;
+
+            // Does the row vector point inwards or outwards?
+            if (mb_row < cm->mb_rows / 2) {
+              if (mv.row > 0)
+                --sum_in_vectors;
+              else if (mv.row < 0)
+                ++sum_in_vectors;
+            } else if (mb_row > cm->mb_rows / 2) {
+              if (mv.row > 0)
+                ++sum_in_vectors;
+              else if (mv.row < 0)
+                --sum_in_vectors;
+            }
+
+            // Does the col vector point inwards or outwards?
+            if (mb_col < cm->mb_cols / 2) {
+              if (mv.col > 0)
+                --sum_in_vectors;
+              else if (mv.col < 0)
+                ++sum_in_vectors;
+            } else if (mb_col > cm->mb_cols / 2) {
+              if (mv.col > 0)
+                ++sum_in_vectors;
+              else if (mv.col < 0)
+                --sum_in_vectors;
+            }
+          }
+        }
+      } else {
+        sr_coded_error += (int64_t)this_error;
+      }
+      coded_error += (int64_t)this_error;
+
+      // Adjust to the next column of MBs.
+      x->plane[0].src.buf += 16;
+      x->plane[1].src.buf += uv_mb_height;
+      x->plane[2].src.buf += uv_mb_height;
+
+      recon_yoffset += 16;
+      recon_uvoffset += uv_mb_height;
+    }
+
+    // Adjust to the next row of MBs.
+    x->plane[0].src.buf += 16 * x->plane[0].src.stride - 16 * cm->mb_cols;
+    x->plane[1].src.buf +=
+        uv_mb_height * x->plane[1].src.stride - uv_mb_height * cm->mb_cols;
+    x->plane[2].src.buf +=
+        uv_mb_height * x->plane[1].src.stride - uv_mb_height * cm->mb_cols;
+
+    vpx_clear_system_state();
+  }
+
+  // Clamp the image start to rows/2. This number of rows is discarded top
+  // and bottom as dead data so rows / 2 means the frame is blank.
+  if ((image_data_start_row > cm->mb_rows / 2) ||
+      (image_data_start_row == INVALID_ROW)) {
+    image_data_start_row = cm->mb_rows / 2;
+  }
+  // Exclude any image dead zone
+  if (image_data_start_row > 0) {
+    intra_skip_count =
+        VPXMAX(0, intra_skip_count - (image_data_start_row * cm->mb_cols * 2));
+  }
+
+  {
+    FIRSTPASS_STATS fps;
+    // The minimum error here insures some bit allocation to frames even
+    // in static regions. The allocation per MB declines for larger formats
+    // where the typical "real" energy per MB also falls.
+    // Initial estimate here uses sqrt(mbs) to define the min_err, where the
+    // number of mbs is proportional to the image area.
+    const int num_mbs = (cpi->oxcf.resize_mode != RESIZE_NONE)
+                            ? cpi->initial_mbs
+                            : cpi->common.MBs;
+    const double min_err = 200 * sqrt(num_mbs);
+
+    intra_factor = intra_factor / (double)num_mbs;
+    brightness_factor = brightness_factor / (double)num_mbs;
+    fps.weight = intra_factor * brightness_factor;
+
+    fps.frame = cm->current_video_frame;
+    fps.coded_error = (double)(coded_error >> 8) + min_err;
+    fps.sr_coded_error = (double)(sr_coded_error >> 8) + min_err;
+    fps.intra_error = (double)(intra_error >> 8) + min_err;
+    fps.count = 1.0;
+    fps.pcnt_inter = (double)intercount / num_mbs;
+    fps.pcnt_second_ref = (double)second_ref_count / num_mbs;
+    fps.pcnt_neutral = (double)neutral_count / num_mbs;
+    fps.intra_skip_pct = (double)intra_skip_count / num_mbs;
+    fps.inactive_zone_rows = (double)image_data_start_row;
+    fps.inactive_zone_cols = (double)0;  // TODO(paulwilkins): fix
+
+    if (mvcount > 0) {
+      fps.MVr = (double)sum_mvr / mvcount;
+      fps.mvr_abs = (double)sum_mvr_abs / mvcount;
+      fps.MVc = (double)sum_mvc / mvcount;
+      fps.mvc_abs = (double)sum_mvc_abs / mvcount;
+      fps.MVrv =
+          ((double)sum_mvrs - ((double)sum_mvr * sum_mvr / mvcount)) / mvcount;
+      fps.MVcv =
+          ((double)sum_mvcs - ((double)sum_mvc * sum_mvc / mvcount)) / mvcount;
+      fps.mv_in_out_count = (double)sum_in_vectors / (mvcount * 2);
+      fps.new_mv_count = new_mv_count;
+      fps.pcnt_motion = (double)mvcount / num_mbs;
+    } else {
+      fps.MVr = 0.0;
+      fps.mvr_abs = 0.0;
+      fps.MVc = 0.0;
+      fps.mvc_abs = 0.0;
+      fps.MVrv = 0.0;
+      fps.MVcv = 0.0;
+      fps.mv_in_out_count = 0.0;
+      fps.new_mv_count = 0.0;
+      fps.pcnt_motion = 0.0;
+    }
+
+    // TODO(paulwilkins):  Handle the case when duration is set to 0, or
+    // something less than the full time between subsequent values of
+    // cpi->source_time_stamp.
+    fps.duration = (double)(source->ts_end - source->ts_start);
+
+    // Don't want to do output stats with a stack variable!
+    twopass->this_frame_stats = fps;
+    output_stats(&twopass->this_frame_stats, cpi->output_pkt_list);
+    accumulate_stats(&twopass->total_stats, &fps);
+
+#if CONFIG_FP_MB_STATS
+    if (cpi->use_fp_mb_stats) {
+      output_fpmb_stats(twopass->frame_mb_stats_buf, cm, cpi->output_pkt_list);
+    }
+#endif
+  }
+
+  // Copy the previous Last Frame back into gf and and arf buffers if
+  // the prediction is good enough... but also don't allow it to lag too far.
+  if ((twopass->sr_update_lag > 3) ||
+      ((cm->current_video_frame > 0) &&
+       (twopass->this_frame_stats.pcnt_inter > 0.20) &&
+       ((twopass->this_frame_stats.intra_error /
+         DOUBLE_DIVIDE_CHECK(twopass->this_frame_stats.coded_error)) > 2.0))) {
+    if (gld_yv12 != NULL) {
+#if CONFIG_EXT_REFS
+      ref_cnt_fb(pool->frame_bufs, &cm->ref_frame_map[cpi->gld_fb_idx],
+                 cm->ref_frame_map[cpi->lst_fb_idxes[LAST_FRAME - LAST_FRAME]]);
+#else
+      ref_cnt_fb(pool->frame_bufs, &cm->ref_frame_map[cpi->gld_fb_idx],
+                 cm->ref_frame_map[cpi->lst_fb_idx]);
+#endif  // CONFIG_EXT_REFS
+    }
+    twopass->sr_update_lag = 1;
+  } else {
+    ++twopass->sr_update_lag;
+  }
+
+  vpx_extend_frame_borders(new_yv12);
+
+// The frame we just compressed now becomes the last frame.
+#if CONFIG_EXT_REFS
+  ref_cnt_fb(pool->frame_bufs,
+             &cm->ref_frame_map[cpi->lst_fb_idxes[LAST_FRAME - LAST_FRAME]],
+             cm->new_fb_idx);
+#else
+  ref_cnt_fb(pool->frame_bufs, &cm->ref_frame_map[cpi->lst_fb_idx],
+             cm->new_fb_idx);
+#endif  // CONFIG_EXT_REFS
+
+  // Special case for the first frame. Copy into the GF buffer as a second
+  // reference.
+  if (cm->current_video_frame == 0 && cpi->gld_fb_idx != INVALID_IDX) {
+#if CONFIG_EXT_REFS
+    ref_cnt_fb(pool->frame_bufs, &cm->ref_frame_map[cpi->gld_fb_idx],
+               cm->ref_frame_map[cpi->lst_fb_idxes[LAST_FRAME - LAST_FRAME]]);
+#else
+    ref_cnt_fb(pool->frame_bufs, &cm->ref_frame_map[cpi->gld_fb_idx],
+               cm->ref_frame_map[cpi->lst_fb_idx]);
+#endif  // CONFIG_EXT_REFS
+  }
+
+  // Use this to see what the first pass reconstruction looks like.
+  if (0) {
+    char filename[512];
+    FILE *recon_file;
+    snprintf(filename, sizeof(filename), "enc%04d.yuv",
+             (int)cm->current_video_frame);
+
+    if (cm->current_video_frame == 0)
+      recon_file = fopen(filename, "wb");
+    else
+      recon_file = fopen(filename, "ab");
+
+    (void)fwrite(lst_yv12->buffer_alloc, lst_yv12->frame_size, 1, recon_file);
+    fclose(recon_file);
+  }
+
+  ++cm->current_video_frame;
+}
+
+static double calc_correction_factor(double err_per_mb, double err_divisor,
+                                     double pt_low, double pt_high, int q,
+                                     vpx_bit_depth_t bit_depth) {
+  const double error_term = err_per_mb / err_divisor;
+
+  // Adjustment based on actual quantizer to power term.
+  const double power_term =
+      VPXMIN(vp10_convert_qindex_to_q(q, bit_depth) * 0.01 + pt_low, pt_high);
+
+  // Calculate correction factor.
+  if (power_term < 1.0) assert(error_term >= 0.0);
+
+  return fclamp(pow(error_term, power_term), 0.05, 5.0);
+}
+
+#define ERR_DIVISOR 100.0
+static int get_twopass_worst_quality(const VP10_COMP *cpi,
+                                     const double section_err,
+                                     double inactive_zone,
+                                     int section_target_bandwidth,
+                                     double group_weight_factor) {
+  const RATE_CONTROL *const rc = &cpi->rc;
+  const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+
+  inactive_zone = fclamp(inactive_zone, 0.0, 1.0);
+
+  if (section_target_bandwidth <= 0) {
+    return rc->worst_quality;  // Highest value allowed
+  } else {
+    const int num_mbs = (cpi->oxcf.resize_mode != RESIZE_NONE)
+                            ? cpi->initial_mbs
+                            : cpi->common.MBs;
+    const int active_mbs = VPXMAX(1, num_mbs - (int)(num_mbs * inactive_zone));
+    const double av_err_per_mb = section_err / active_mbs;
+    const double speed_term = 1.0 + 0.04 * oxcf->speed;
+    double ediv_size_correction;
+    const int target_norm_bits_per_mb =
+        ((uint64_t)section_target_bandwidth << BPER_MB_NORMBITS) / active_mbs;
+    int q;
+
+    // Larger image formats are expected to be a little harder to code
+    // relatively given the same prediction error score. This in part at
+    // least relates to the increased size and hence coding overheads of
+    // motion vectors. Some account of this is made through adjustment of
+    // the error divisor.
+    ediv_size_correction =
+        VPXMAX(0.2, VPXMIN(5.0, get_linear_size_factor(cpi)));
+    if (ediv_size_correction < 1.0)
+      ediv_size_correction = -(1.0 / ediv_size_correction);
+    ediv_size_correction *= 4.0;
+
+    // Try and pick a max Q that will be high enough to encode the
+    // content at the given rate.
+    for (q = rc->best_quality; q < rc->worst_quality; ++q) {
+      const double factor = calc_correction_factor(
+          av_err_per_mb, ERR_DIVISOR - ediv_size_correction, FACTOR_PT_LOW,
+          FACTOR_PT_HIGH, q, cpi->common.bit_depth);
+      const int bits_per_mb = vp10_rc_bits_per_mb(
+          INTER_FRAME, q, factor * speed_term * group_weight_factor,
+          cpi->common.bit_depth);
+      if (bits_per_mb <= target_norm_bits_per_mb) break;
+    }
+
+    // Restriction on active max q for constrained quality mode.
+    if (cpi->oxcf.rc_mode == VPX_CQ) q = VPXMAX(q, oxcf->cq_level);
+    return q;
+  }
+}
+
+static void setup_rf_level_maxq(VP10_COMP *cpi) {
+  int i;
+  RATE_CONTROL *const rc = &cpi->rc;
+  for (i = INTER_NORMAL; i < RATE_FACTOR_LEVELS; ++i) {
+    int qdelta = vp10_frame_type_qdelta(cpi, i, rc->worst_quality);
+    rc->rf_level_maxq[i] = VPXMAX(rc->worst_quality + qdelta, rc->best_quality);
+  }
+}
+
+void vp10_init_subsampling(VP10_COMP *cpi) {
+  const VP10_COMMON *const cm = &cpi->common;
+  RATE_CONTROL *const rc = &cpi->rc;
+  const int w = cm->width;
+  const int h = cm->height;
+  int i;
+
+  for (i = 0; i < FRAME_SCALE_STEPS; ++i) {
+    // Note: Frames with odd-sized dimensions may result from this scaling.
+    rc->frame_width[i] = (w * 16) / frame_scale_factor[i];
+    rc->frame_height[i] = (h * 16) / frame_scale_factor[i];
+  }
+
+  setup_rf_level_maxq(cpi);
+}
+
+void vp10_calculate_coded_size(VP10_COMP *cpi, int *scaled_frame_width,
+                               int *scaled_frame_height) {
+  RATE_CONTROL *const rc = &cpi->rc;
+  *scaled_frame_width = rc->frame_width[rc->frame_size_selector];
+  *scaled_frame_height = rc->frame_height[rc->frame_size_selector];
+}
+
+void vp10_init_second_pass(VP10_COMP *cpi) {
+  const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+  TWO_PASS *const twopass = &cpi->twopass;
+  double frame_rate;
+  FIRSTPASS_STATS *stats;
+
+  zero_stats(&twopass->total_stats);
+  zero_stats(&twopass->total_left_stats);
+
+  if (!twopass->stats_in_end) return;
+
+  stats = &twopass->total_stats;
+
+  *stats = *twopass->stats_in_end;
+  twopass->total_left_stats = *stats;
+
+  frame_rate = 10000000.0 * stats->count / stats->duration;
+  // Each frame can have a different duration, as the frame rate in the source
+  // isn't guaranteed to be constant. The frame rate prior to the first frame
+  // encoded in the second pass is a guess. However, the sum duration is not.
+  // It is calculated based on the actual durations of all frames from the
+  // first pass.
+  vp10_new_framerate(cpi, frame_rate);
+  twopass->bits_left =
+      (int64_t)(stats->duration * oxcf->target_bandwidth / 10000000.0);
+
+  // This variable monitors how far behind the second ref update is lagging.
+  twopass->sr_update_lag = 1;
+
+  // Scan the first pass file and calculate a modified total error based upon
+  // the bias/power function used to allocate bits.
+  {
+    const double avg_error =
+        stats->coded_error / DOUBLE_DIVIDE_CHECK(stats->count);
+    const FIRSTPASS_STATS *s = twopass->stats_in;
+    double modified_error_total = 0.0;
+    twopass->modified_error_min =
+        (avg_error * oxcf->two_pass_vbrmin_section) / 100;
+    twopass->modified_error_max =
+        (avg_error * oxcf->two_pass_vbrmax_section) / 100;
+    while (s < twopass->stats_in_end) {
+      modified_error_total += calculate_modified_err(cpi, twopass, oxcf, s);
+      ++s;
+    }
+    twopass->modified_error_left = modified_error_total;
+  }
+
+  // Reset the vbr bits off target counters
+  cpi->rc.vbr_bits_off_target = 0;
+  cpi->rc.vbr_bits_off_target_fast = 0;
+
+  cpi->rc.rate_error_estimate = 0;
+
+  // Static sequence monitor variables.
+  twopass->kf_zeromotion_pct = 100;
+  twopass->last_kfgroup_zeromotion_pct = 100;
+
+  if (oxcf->resize_mode != RESIZE_NONE) {
+    vp10_init_subsampling(cpi);
+  }
+}
+
+#define SR_DIFF_PART 0.0015
+#define MOTION_AMP_PART 0.003
+#define INTRA_PART 0.005
+#define DEFAULT_DECAY_LIMIT 0.75
+#define LOW_SR_DIFF_TRHESH 0.1
+#define SR_DIFF_MAX 128.0
+
+static double get_sr_decay_rate(const VP10_COMP *cpi,
+                                const FIRSTPASS_STATS *frame) {
+  const int num_mbs = (cpi->oxcf.resize_mode != RESIZE_NONE) ? cpi->initial_mbs
+                                                             : cpi->common.MBs;
+  double sr_diff = (frame->sr_coded_error - frame->coded_error) / num_mbs;
+  double sr_decay = 1.0;
+  double modified_pct_inter;
+  double modified_pcnt_intra;
+  const double motion_amplitude_factor =
+      frame->pcnt_motion * ((frame->mvc_abs + frame->mvr_abs) / 2);
+
+  modified_pct_inter = frame->pcnt_inter;
+  if ((frame->intra_error / DOUBLE_DIVIDE_CHECK(frame->coded_error)) <
+      (double)NCOUNT_FRAME_II_THRESH) {
+    modified_pct_inter = frame->pcnt_inter - frame->pcnt_neutral;
+  }
+  modified_pcnt_intra = 100 * (1.0 - modified_pct_inter);
+
+  if ((sr_diff > LOW_SR_DIFF_TRHESH)) {
+    sr_diff = VPXMIN(sr_diff, SR_DIFF_MAX);
+    sr_decay = 1.0 - (SR_DIFF_PART * sr_diff) -
+               (MOTION_AMP_PART * motion_amplitude_factor) -
+               (INTRA_PART * modified_pcnt_intra);
+  }
+  return VPXMAX(sr_decay, VPXMIN(DEFAULT_DECAY_LIMIT, modified_pct_inter));
+}
+
+// This function gives an estimate of how badly we believe the prediction
+// quality is decaying from frame to frame.
+static double get_zero_motion_factor(const VP10_COMP *cpi,
+                                     const FIRSTPASS_STATS *frame) {
+  const double zero_motion_pct = frame->pcnt_inter - frame->pcnt_motion;
+  double sr_decay = get_sr_decay_rate(cpi, frame);
+  return VPXMIN(sr_decay, zero_motion_pct);
+}
+
+#define ZM_POWER_FACTOR 0.75
+
+static double get_prediction_decay_rate(const VP10_COMP *cpi,
+                                        const FIRSTPASS_STATS *next_frame) {
+  const double sr_decay_rate = get_sr_decay_rate(cpi, next_frame);
+  const double zero_motion_factor =
+      (0.95 * pow((next_frame->pcnt_inter - next_frame->pcnt_motion),
+                  ZM_POWER_FACTOR));
+
+  return VPXMAX(zero_motion_factor,
+                (sr_decay_rate + ((1.0 - sr_decay_rate) * zero_motion_factor)));
+}
+
+// Function to test for a condition where a complex transition is followed
+// by a static section. For example in slide shows where there is a fade
+// between slides. This is to help with more optimal kf and gf positioning.
+static int detect_transition_to_still(VP10_COMP *cpi, int frame_interval,
+                                      int still_interval,
+                                      double loop_decay_rate,
+                                      double last_decay_rate) {
+  TWO_PASS *const twopass = &cpi->twopass;
+  RATE_CONTROL *const rc = &cpi->rc;
+
+  // Break clause to detect very still sections after motion
+  // For example a static image after a fade or other transition
+  // instead of a clean scene cut.
+  if (frame_interval > rc->min_gf_interval && loop_decay_rate >= 0.999 &&
+      last_decay_rate < 0.9) {
+    int j;
+
+    // Look ahead a few frames to see if static condition persists...
+    for (j = 0; j < still_interval; ++j) {
+      const FIRSTPASS_STATS *stats = &twopass->stats_in[j];
+      if (stats >= twopass->stats_in_end) break;
+
+      if (stats->pcnt_inter - stats->pcnt_motion < 0.999) break;
+    }
+
+    // Only if it does do we signal a transition to still.
+    return j == still_interval;
+  }
+
+  return 0;
+}
+
+// This function detects a flash through the high relative pcnt_second_ref
+// score in the frame following a flash frame. The offset passed in should
+// reflect this.
+static int detect_flash(const TWO_PASS *twopass, int offset) {
+  const FIRSTPASS_STATS *const next_frame = read_frame_stats(twopass, offset);
+
+  // What we are looking for here is a situation where there is a
+  // brief break in prediction (such as a flash) but subsequent frames
+  // are reasonably well predicted by an earlier (pre flash) frame.
+  // The recovery after a flash is indicated by a high pcnt_second_ref
+  // compared to pcnt_inter.
+  return next_frame != NULL &&
+         next_frame->pcnt_second_ref > next_frame->pcnt_inter &&
+         next_frame->pcnt_second_ref >= 0.5;
+}
+
+// Update the motion related elements to the GF arf boost calculation.
+static void accumulate_frame_motion_stats(const FIRSTPASS_STATS *stats,
+                                          double *mv_in_out,
+                                          double *mv_in_out_accumulator,
+                                          double *abs_mv_in_out_accumulator,
+                                          double *mv_ratio_accumulator) {
+  const double pct = stats->pcnt_motion;
+
+  // Accumulate Motion In/Out of frame stats.
+  *mv_in_out = stats->mv_in_out_count * pct;
+  *mv_in_out_accumulator += *mv_in_out;
+  *abs_mv_in_out_accumulator += fabs(*mv_in_out);
+
+  // Accumulate a measure of how uniform (or conversely how random) the motion
+  // field is (a ratio of abs(mv) / mv).
+  if (pct > 0.05) {
+    const double mvr_ratio =
+        fabs(stats->mvr_abs) / DOUBLE_DIVIDE_CHECK(fabs(stats->MVr));
+    const double mvc_ratio =
+        fabs(stats->mvc_abs) / DOUBLE_DIVIDE_CHECK(fabs(stats->MVc));
+
+    *mv_ratio_accumulator +=
+        pct * (mvr_ratio < stats->mvr_abs ? mvr_ratio : stats->mvr_abs);
+    *mv_ratio_accumulator +=
+        pct * (mvc_ratio < stats->mvc_abs ? mvc_ratio : stats->mvc_abs);
+  }
+}
+
+#define BASELINE_ERR_PER_MB 1000.0
+static double calc_frame_boost(VP10_COMP *cpi,
+                               const FIRSTPASS_STATS *this_frame,
+                               double this_frame_mv_in_out, double max_boost) {
+  double frame_boost;
+  const double lq = vp10_convert_qindex_to_q(
+      cpi->rc.avg_frame_qindex[INTER_FRAME], cpi->common.bit_depth);
+  const double boost_q_correction = VPXMIN((0.5 + (lq * 0.015)), 1.5);
+  int num_mbs = (cpi->oxcf.resize_mode != RESIZE_NONE) ? cpi->initial_mbs
+                                                       : cpi->common.MBs;
+
+  // Correct for any inactive region in the image
+  num_mbs = (int)VPXMAX(1, num_mbs * calculate_active_area(cpi, this_frame));
+
+  // Underlying boost factor is based on inter error ratio.
+  frame_boost = (BASELINE_ERR_PER_MB * num_mbs) /
+                DOUBLE_DIVIDE_CHECK(this_frame->coded_error);
+  frame_boost = frame_boost * BOOST_FACTOR * boost_q_correction;
+
+  // Increase boost for frames where new data coming into frame (e.g. zoom out).
+  // Slightly reduce boost if there is a net balance of motion out of the frame
+  // (zoom in). The range for this_frame_mv_in_out is -1.0 to +1.0.
+  if (this_frame_mv_in_out > 0.0)
+    frame_boost += frame_boost * (this_frame_mv_in_out * 2.0);
+  // In the extreme case the boost is halved.
+  else
+    frame_boost += frame_boost * (this_frame_mv_in_out / 2.0);
+
+  return VPXMIN(frame_boost, max_boost * boost_q_correction);
+}
+
+static int calc_arf_boost(VP10_COMP *cpi, int offset, int f_frames,
+                          int b_frames, int *f_boost, int *b_boost) {
+  TWO_PASS *const twopass = &cpi->twopass;
+  int i;
+  double boost_score = 0.0;
+  double mv_ratio_accumulator = 0.0;
+  double decay_accumulator = 1.0;
+  double this_frame_mv_in_out = 0.0;
+  double mv_in_out_accumulator = 0.0;
+  double abs_mv_in_out_accumulator = 0.0;
+  int arf_boost;
+  int flash_detected = 0;
+
+  // Search forward from the proposed arf/next gf position.
+  for (i = 0; i < f_frames; ++i) {
+    const FIRSTPASS_STATS *this_frame = read_frame_stats(twopass, i + offset);
+    if (this_frame == NULL) break;
+
+    // Update the motion related elements to the boost calculation.
+    accumulate_frame_motion_stats(
+        this_frame, &this_frame_mv_in_out, &mv_in_out_accumulator,
+        &abs_mv_in_out_accumulator, &mv_ratio_accumulator);
+
+    // We want to discount the flash frame itself and the recovery
+    // frame that follows as both will have poor scores.
+    flash_detected = detect_flash(twopass, i + offset) ||
+                     detect_flash(twopass, i + offset + 1);
+
+    // Accumulate the effect of prediction quality decay.
+    if (!flash_detected) {
+      decay_accumulator *= get_prediction_decay_rate(cpi, this_frame);
+      decay_accumulator = decay_accumulator < MIN_DECAY_FACTOR
+                              ? MIN_DECAY_FACTOR
+                              : decay_accumulator;
+    }
+
+    boost_score +=
+        decay_accumulator *
+        calc_frame_boost(cpi, this_frame, this_frame_mv_in_out, GF_MAX_BOOST);
+  }
+
+  *f_boost = (int)boost_score;
+
+  // Reset for backward looking loop.
+  boost_score = 0.0;
+  mv_ratio_accumulator = 0.0;
+  decay_accumulator = 1.0;
+  this_frame_mv_in_out = 0.0;
+  mv_in_out_accumulator = 0.0;
+  abs_mv_in_out_accumulator = 0.0;
+
+  // Search backward towards last gf position.
+  for (i = -1; i >= -b_frames; --i) {
+    const FIRSTPASS_STATS *this_frame = read_frame_stats(twopass, i + offset);
+    if (this_frame == NULL) break;
+
+    // Update the motion related elements to the boost calculation.
+    accumulate_frame_motion_stats(
+        this_frame, &this_frame_mv_in_out, &mv_in_out_accumulator,
+        &abs_mv_in_out_accumulator, &mv_ratio_accumulator);
+
+    // We want to discount the the flash frame itself and the recovery
+    // frame that follows as both will have poor scores.
+    flash_detected = detect_flash(twopass, i + offset) ||
+                     detect_flash(twopass, i + offset + 1);
+
+    // Cumulative effect of prediction quality decay.
+    if (!flash_detected) {
+      decay_accumulator *= get_prediction_decay_rate(cpi, this_frame);
+      decay_accumulator = decay_accumulator < MIN_DECAY_FACTOR
+                              ? MIN_DECAY_FACTOR
+                              : decay_accumulator;
+    }
+
+    boost_score +=
+        decay_accumulator *
+        calc_frame_boost(cpi, this_frame, this_frame_mv_in_out, GF_MAX_BOOST);
+  }
+  *b_boost = (int)boost_score;
+
+  arf_boost = (*f_boost + *b_boost);
+  if (arf_boost < ((b_frames + f_frames) * 20))
+    arf_boost = ((b_frames + f_frames) * 20);
+  arf_boost = VPXMAX(arf_boost, MIN_ARF_GF_BOOST);
+
+  return arf_boost;
+}
+
+// Calculate a section intra ratio used in setting max loop filter.
+static int calculate_section_intra_ratio(const FIRSTPASS_STATS *begin,
+                                         const FIRSTPASS_STATS *end,
+                                         int section_length) {
+  const FIRSTPASS_STATS *s = begin;
+  double intra_error = 0.0;
+  double coded_error = 0.0;
+  int i = 0;
+
+  while (s < end && i < section_length) {
+    intra_error += s->intra_error;
+    coded_error += s->coded_error;
+    ++s;
+    ++i;
+  }
+
+  return (int)(intra_error / DOUBLE_DIVIDE_CHECK(coded_error));
+}
+
+// Calculate the total bits to allocate in this GF/ARF group.
+static int64_t calculate_total_gf_group_bits(VP10_COMP *cpi,
+                                             double gf_group_err) {
+  const RATE_CONTROL *const rc = &cpi->rc;
+  const TWO_PASS *const twopass = &cpi->twopass;
+  const int max_bits = frame_max_bits(rc, &cpi->oxcf);
+  int64_t total_group_bits;
+
+  // Calculate the bits to be allocated to the group as a whole.
+  if ((twopass->kf_group_bits > 0) && (twopass->kf_group_error_left > 0)) {
+    total_group_bits = (int64_t)(twopass->kf_group_bits *
+                                 (gf_group_err / twopass->kf_group_error_left));
+  } else {
+    total_group_bits = 0;
+  }
+
+  // Clamp odd edge cases.
+  total_group_bits =
+      (total_group_bits < 0) ? 0 : (total_group_bits > twopass->kf_group_bits)
+                                       ? twopass->kf_group_bits
+                                       : total_group_bits;
+
+  // Clip based on user supplied data rate variability limit.
+  if (total_group_bits > (int64_t)max_bits * rc->baseline_gf_interval)
+    total_group_bits = (int64_t)max_bits * rc->baseline_gf_interval;
+
+  return total_group_bits;
+}
+
+// Calculate the number bits extra to assign to boosted frames in a group.
+static int calculate_boost_bits(int frame_count, int boost,
+                                int64_t total_group_bits) {
+  int allocation_chunks;
+
+  // return 0 for invalid inputs (could arise e.g. through rounding errors)
+  if (!boost || (total_group_bits <= 0) || (frame_count <= 0)) return 0;
+
+  allocation_chunks = (frame_count * 100) + boost;
+
+  // Prevent overflow.
+  if (boost > 1023) {
+    int divisor = boost >> 10;
+    boost /= divisor;
+    allocation_chunks /= divisor;
+  }
+
+  // Calculate the number of extra bits for use in the boosted frame or frames.
+  return VPXMAX((int)(((int64_t)boost * total_group_bits) / allocation_chunks),
+                0);
+}
+
+#if !CONFIG_EXT_REFS
+// Current limit on maximum number of active arfs in a GF/ARF group.
+#define MAX_ACTIVE_ARFS 2
+#define ARF_SLOT1 2
+#define ARF_SLOT2 3
+// This function indirects the choice of buffers for arfs.
+// At the moment the values are fixed but this may change as part of
+// the integration process with other codec features that swap buffers around.
+static void get_arf_buffer_indices(unsigned char *arf_buffer_indices) {
+  arf_buffer_indices[0] = ARF_SLOT1;
+  arf_buffer_indices[1] = ARF_SLOT2;
+}
+#endif
+
+static void allocate_gf_group_bits(VP10_COMP *cpi, int64_t gf_group_bits,
+                                   double group_error, int gf_arf_bits) {
+  RATE_CONTROL *const rc = &cpi->rc;
+  const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+  TWO_PASS *const twopass = &cpi->twopass;
+  GF_GROUP *const gf_group = &twopass->gf_group;
+  FIRSTPASS_STATS frame_stats;
+  int i;
+  int frame_index = 0;
+  int target_frame_size;
+  int key_frame;
+  const int max_bits = frame_max_bits(&cpi->rc, &cpi->oxcf);
+  int64_t total_group_bits = gf_group_bits;
+  double modified_err = 0.0;
+  double err_fraction;
+  int mid_boost_bits = 0;
+#if !CONFIG_EXT_REFS
+  int mid_frame_idx;
+  unsigned char arf_buffer_indices[MAX_ACTIVE_ARFS];
+#endif
+#if CONFIG_EXT_REFS
+  // The use of bi-predictive frames are only enabled when following 3
+  // conditions are met:
+  // (1) Alt-ref is enabled;
+  // (2) The bi-predictive group interval is at least 2; and
+  // (3) The bi-predictive group interval is strictly smaller than the
+  //     golden group interval.
+  const int is_bipred_enabled =
+      rc->source_alt_ref_pending && rc->bipred_group_interval &&
+      rc->bipred_group_interval <=
+          (rc->baseline_gf_interval - rc->source_alt_ref_pending);
+  int bipred_group_end = 0;
+  int bipred_frame_index = 0;
+  int arf_pos[MAX_EXT_ARFS + 1];
+  const unsigned char ext_arf_interval =
+      (unsigned char)(rc->baseline_gf_interval / (cpi->num_extra_arfs + 1) - 1);
+  int which_arf = cpi->num_extra_arfs;
+  int subgroup_interval[MAX_EXT_ARFS + 1];
+  int ext_arf_boost[MAX_EXT_ARFS];
+  int is_sg_bipred_enabled = is_bipred_enabled;
+  int accumulative_subgroup_interval = 0;
+#endif  // CONFIG_EXT_REFS
+
+#if CONFIG_EXT_REFS
+  vp10_zero_array(ext_arf_boost, MAX_EXT_ARFS);
+#endif
+
+  key_frame = cpi->common.frame_type == KEY_FRAME;
+
+#if !CONFIG_EXT_REFS
+  get_arf_buffer_indices(arf_buffer_indices);
+#endif
+
+  // For key frames the frame target rate is already set and it
+  // is also the golden frame.
+  if (!key_frame) {
+    if (rc->source_alt_ref_active) {
+      gf_group->update_type[frame_index] = OVERLAY_UPDATE;
+      gf_group->rf_level[frame_index] = INTER_NORMAL;
+      gf_group->bit_allocation[frame_index] = 0;
+    } else {
+      gf_group->update_type[frame_index] = GF_UPDATE;
+      gf_group->rf_level[frame_index] = GF_ARF_STD;
+      gf_group->bit_allocation[frame_index] = gf_arf_bits;
+    }
+#if CONFIG_EXT_REFS
+    gf_group->arf_update_idx[frame_index] = 0;
+    gf_group->arf_ref_idx[frame_index] = 0;
+#else
+    gf_group->arf_update_idx[frame_index] = arf_buffer_indices[0];
+    gf_group->arf_ref_idx[frame_index] = arf_buffer_indices[0];
+#endif
+    // Step over the golden frame / overlay frame
+    if (EOF == input_stats(twopass, &frame_stats)) return;
+  }
+
+#if CONFIG_EXT_REFS
+  gf_group->bidir_pred_enabled[frame_index] = 0;
+  gf_group->brf_src_offset[frame_index] = 0;
+#endif  // CONFIG_EXT_REFS
+
+  // Deduct the boost bits for arf (or gf if it is not a key frame)
+  // from the group total.
+  if (rc->source_alt_ref_pending || !key_frame) total_group_bits -= gf_arf_bits;
+
+  frame_index++;
+
+#if CONFIG_EXT_REFS
+  bipred_frame_index++;
+#endif  // CONFIG_EXT_REFS
+
+  // Store the bits to spend on the ARF if there is one.
+  if (rc->source_alt_ref_pending) {
+    gf_group->update_type[frame_index] = ARF_UPDATE;
+    gf_group->rf_level[frame_index] = GF_ARF_STD;
+    gf_group->bit_allocation[frame_index] = gf_arf_bits;
+
+    gf_group->arf_src_offset[frame_index] =
+        (unsigned char)(rc->baseline_gf_interval - 1);
+
+#if CONFIG_EXT_REFS
+    gf_group->arf_update_idx[frame_index] = 0;
+    gf_group->arf_ref_idx[frame_index] = 0;
+#else
+    gf_group->arf_update_idx[frame_index] = arf_buffer_indices[0];
+    gf_group->arf_ref_idx[frame_index] =
+        arf_buffer_indices[cpi->multi_arf_last_grp_enabled &&
+                           rc->source_alt_ref_active];
+#endif  // CONFIG_EXT_REFS && CONFIG_EXT_ARFS
+#if CONFIG_EXT_REFS
+    gf_group->bidir_pred_enabled[frame_index] = 0;
+    gf_group->brf_src_offset[frame_index] = 0;
+// NOTE: "bidir_pred_frame_index" stays unchanged for ARF_UPDATE frames.
+#endif  // CONFIG_EXT_REFS
+
+#if CONFIG_EXT_REFS
+    // Work out the ARFs' positions in this gf group
+    // NOTE(weitinglin): ALT_REFs' are indexed inversely, but coded in display
+    // order (except for the original ARF). In the example of three ALT_REF's,
+    // We index ALTREF's as: KEY ----- ALT2 ----- ALT1 ----- ALT0
+    // but code them in the following order:
+    // KEY-ALT0-ALT2 ----- OVERLAY2-ALT1 ----- OVERLAY1 ----- OVERLAY0
+    arf_pos[0] =
+        frame_index + cpi->num_extra_arfs + gf_group->arf_src_offset[1] + 1;
+    for (i = 0; i < cpi->num_extra_arfs; ++i) {
+      arf_pos[i + 1] =
+          frame_index + (cpi->num_extra_arfs - i) * (ext_arf_interval + 2);
+      subgroup_interval[i] = arf_pos[i] - arf_pos[i + 1] - (i == 0 ? 1 : 2);
+    }
+    subgroup_interval[cpi->num_extra_arfs] = arf_pos[cpi->num_extra_arfs] -
+                                             frame_index -
+                                             (cpi->num_extra_arfs == 0 ? 1 : 2);
+#endif  // CONFIG_EXT_REFS
+
+    ++frame_index;
+
+#if CONFIG_EXT_REFS
+    // Insert an extra ARF
+    if (cpi->num_extra_arfs) {
+      gf_group->update_type[frame_index] = ARF_UPDATE;
+      // Note (weitinglin): GF_ARF_LOW is also used as an identifier
+      //                    for internal ALT_REF's:
+      gf_group->rf_level[frame_index] = GF_ARF_LOW;
+      gf_group->arf_src_offset[frame_index] = ext_arf_interval;
+      gf_group->arf_update_idx[frame_index] = which_arf;
+      gf_group->arf_ref_idx[frame_index] = 0;
+      ++frame_index;
+    }
+    accumulative_subgroup_interval += subgroup_interval[cpi->num_extra_arfs];
+#else
+    if (cpi->multi_arf_enabled) {
+      // Set aside a slot for a level 1 arf.
+      gf_group->update_type[frame_index] = ARF_UPDATE;
+      gf_group->rf_level[frame_index] = GF_ARF_LOW;
+      gf_group->arf_src_offset[frame_index] =
+          (unsigned char)((rc->baseline_gf_interval >> 1) - 1);
+      gf_group->arf_update_idx[frame_index] = arf_buffer_indices[1];
+      gf_group->arf_ref_idx[frame_index] = arf_buffer_indices[0];
+      ++frame_index;
+    }
+#endif  // CONFIG_EXT_ARFS
+  }
+
+#if !CONFIG_EXT_REFS
+  // Define middle frame
+  mid_frame_idx = frame_index + (rc->baseline_gf_interval >> 1) - 1;
+#endif
+
+  // Allocate bits to the other frames in the group.
+  for (i = 0; i < rc->baseline_gf_interval - rc->source_alt_ref_pending; ++i) {
+#if !CONFIG_EXT_REFS
+    int arf_idx = 0;
+#endif
+    if (EOF == input_stats(twopass, &frame_stats)) break;
+
+    modified_err = calculate_modified_err(cpi, twopass, oxcf, &frame_stats);
+
+    if (group_error > 0)
+      err_fraction = modified_err / DOUBLE_DIVIDE_CHECK(group_error);
+    else
+      err_fraction = 0.0;
+
+    target_frame_size = (int)((double)total_group_bits * err_fraction);
+
+    if (rc->source_alt_ref_pending && cpi->multi_arf_enabled) {
+      mid_boost_bits += (target_frame_size >> 4);
+      target_frame_size -= (target_frame_size >> 4);
+#if !CONFIG_EXT_REFS
+      if (frame_index <= mid_frame_idx) arf_idx = 1;
+#endif
+    }
+#if CONFIG_EXT_REFS
+    gf_group->arf_update_idx[frame_index] = which_arf;
+    gf_group->arf_ref_idx[frame_index] = which_arf;
+#else
+    gf_group->arf_update_idx[frame_index] = arf_buffer_indices[arf_idx];
+    gf_group->arf_ref_idx[frame_index] = arf_buffer_indices[arf_idx];
+#endif  // CONFIG_EXT_REFS
+    target_frame_size =
+        clamp(target_frame_size, 0, VPXMIN(max_bits, (int)total_group_bits));
+
+#if CONFIG_EXT_REFS
+    // If we are going to have ARFs, check if we can have BWDREF in this
+    // subgroup.
+    if (rc->source_alt_ref_pending) {
+      is_sg_bipred_enabled =
+          is_bipred_enabled &&
+          (subgroup_interval[which_arf] > rc->bipred_group_interval);
+    }
+    // NOTE: BIDIR_PRED is only enabled when the length of the bi-predictive
+    //       frame group interval is strictly smaller than that of the GOLDEN
+    //       FRAME group interval.
+    // TODO(zoeliu): Currently BIDIR_PRED is only enabled when alt-ref is on.
+    if (is_sg_bipred_enabled && !bipred_group_end) {
+      const int cur_brf_src_offset = rc->bipred_group_interval - 1;
+
+      // --- BRF_UPDATE ---
+      if (bipred_frame_index == 1) {
+        gf_group->update_type[frame_index] = BRF_UPDATE;
+        gf_group->bidir_pred_enabled[frame_index] = 1;
+        gf_group->brf_src_offset[frame_index] = cur_brf_src_offset;
+        // --- LAST_BIPRED_UPDATE ---
+      } else if (bipred_frame_index == rc->bipred_group_interval) {
+        gf_group->update_type[frame_index] = LAST_BIPRED_UPDATE;
+        gf_group->bidir_pred_enabled[frame_index] = 1;
+        gf_group->brf_src_offset[frame_index] = 0;
+        // Reset the bi-predictive frame index.
+        bipred_frame_index = 0;
+        // --- BIPRED_UPDATE ---
+      } else {
+        gf_group->update_type[frame_index] = BIPRED_UPDATE;
+        gf_group->bidir_pred_enabled[frame_index] = 1;
+        gf_group->brf_src_offset[frame_index] = 0;
+      }
+
+      bipred_frame_index++;
+      // Check whether the next bi-predictive frame group would entirely be
+      // included within the current golden frame group.
+      // In addition, we need to avoid coding a BRF right before an ARF.
+      if (bipred_frame_index == 1 &&
+          (i + 2 + cur_brf_src_offset) >= accumulative_subgroup_interval) {
+        bipred_group_end = 1;
+      }
+    } else {
+#endif  // CONFIG_EXT_REFS
+      gf_group->update_type[frame_index] = LF_UPDATE;
+#if CONFIG_EXT_REFS
+      gf_group->bidir_pred_enabled[frame_index] = 0;
+      gf_group->brf_src_offset[frame_index] = 0;
+    }
+#endif  // CONFIG_EXT_REFS
+
+#if CONFIG_EXT_REFS
+    if (gf_group->update_type[frame_index] == BRF_UPDATE) {
+      // Boost up the allocated bits on BWDREF_FRAME
+      gf_group->rf_level[frame_index] = INTER_HIGH;
+      gf_group->bit_allocation[frame_index] =
+          target_frame_size + (target_frame_size >> 2);
+    } else if (gf_group->update_type[frame_index] == LAST_BIPRED_UPDATE) {
+      // Press down the allocated bits on LAST_BIPRED_UPDATE frames
+      gf_group->rf_level[frame_index] = INTER_LOW;
+      gf_group->bit_allocation[frame_index] =
+          target_frame_size - (target_frame_size >> 1);
+    } else if (gf_group->update_type[frame_index] == BIPRED_UPDATE) {
+      // TODO(zoeliu): To investigate whether the allocated bits on
+      // BIPRED_UPDATE frames need to be further adjusted.
+      gf_group->rf_level[frame_index] = INTER_NORMAL;
+      gf_group->bit_allocation[frame_index] = target_frame_size;
+    } else {
+#endif  // CONFIG_EXT_REFS
+      gf_group->rf_level[frame_index] = INTER_NORMAL;
+      gf_group->bit_allocation[frame_index] = target_frame_size;
+#if CONFIG_EXT_REFS
+    }
+#endif  // CONFIG_EXT_REFS
+
+    ++frame_index;
+#if CONFIG_EXT_REFS
+    // Check if we need to update the ARF
+    if (cpi->num_extra_arfs && frame_index > arf_pos[which_arf]) {
+      --which_arf;
+      accumulative_subgroup_interval += subgroup_interval[which_arf] + 1;
+      // Meet the new subgroup. Reset the bipred_group_end flag;
+      bipred_group_end = 0;
+      // Insert another extra ARF after the overlay frame
+      if (which_arf) {
+        gf_group->update_type[frame_index] = ARF_UPDATE;
+        gf_group->rf_level[frame_index] = GF_ARF_LOW;
+        gf_group->arf_src_offset[frame_index] = ext_arf_interval;
+        gf_group->arf_update_idx[frame_index] = which_arf;
+        gf_group->arf_ref_idx[frame_index] = 0;
+        ++frame_index;
+      }
+    }
+#endif
+  }
+
+// Note:
+// We need to configure the frame at the end of the sequence + 1 that will be
+// the start frame for the next group. Otherwise prior to the call to
+// vp10_rc_get_second_pass_params() the data will be undefined.
+#if CONFIG_EXT_REFS
+  gf_group->arf_update_idx[frame_index] = 0;
+  gf_group->arf_ref_idx[frame_index] = 0;
+#else
+  gf_group->arf_update_idx[frame_index] = arf_buffer_indices[0];
+  gf_group->arf_ref_idx[frame_index] = arf_buffer_indices[0];
+#endif
+  if (rc->source_alt_ref_pending) {
+    gf_group->update_type[frame_index] = OVERLAY_UPDATE;
+    gf_group->rf_level[frame_index] = INTER_NORMAL;
+
+#if CONFIG_EXT_REFS
+    if (cpi->num_extra_arfs) {
+      for (i = cpi->num_extra_arfs; i > 0; --i) {
+        int arf_pos_in_gf = (i == cpi->num_extra_arfs ? 2 : arf_pos[i + 1] + 1);
+        gf_group->bit_allocation[arf_pos_in_gf] =
+            gf_group->bit_allocation[arf_pos[i]];
+        gf_group->update_type[arf_pos[i]] = INTNL_OVERLAY_UPDATE;
+        gf_group->bit_allocation[arf_pos[i]] = 0;
+        gf_group->rf_level[arf_pos[i]] = INTER_LOW;
+      }
+    }
+#endif
+#if !CONFIG_EXT_REFS
+    // Final setup for second arf and its overlay.
+    if (cpi->multi_arf_enabled) {
+      gf_group->bit_allocation[2] =
+          gf_group->bit_allocation[mid_frame_idx] + mid_boost_bits;
+      gf_group->update_type[mid_frame_idx] = OVERLAY_UPDATE;
+      gf_group->bit_allocation[mid_frame_idx] = 0;
+    }
+#endif
+  } else {
+    gf_group->update_type[frame_index] = GF_UPDATE;
+    gf_group->rf_level[frame_index] = GF_ARF_STD;
+  }
+#if CONFIG_EXT_REFS
+  gf_group->bidir_pred_enabled[frame_index] = 0;
+  gf_group->brf_src_offset[frame_index] = 0;
+#endif  // CONFIG_EXT_REFS
+
+  // Note whether multi-arf was enabled this group for next time.
+  cpi->multi_arf_last_grp_enabled = cpi->multi_arf_enabled;
+}
+// Analyse and define a gf/arf group.
+static void define_gf_group(VP10_COMP *cpi, FIRSTPASS_STATS *this_frame) {
+  VP10_COMMON *const cm = &cpi->common;
+  RATE_CONTROL *const rc = &cpi->rc;
+  VP10EncoderConfig *const oxcf = &cpi->oxcf;
+  TWO_PASS *const twopass = &cpi->twopass;
+  FIRSTPASS_STATS next_frame;
+  const FIRSTPASS_STATS *const start_pos = twopass->stats_in;
+  int i;
+
+  double boost_score = 0.0;
+  double old_boost_score = 0.0;
+  double gf_group_err = 0.0;
+#if GROUP_ADAPTIVE_MAXQ
+  double gf_group_raw_error = 0.0;
+#endif
+  double gf_group_skip_pct = 0.0;
+  double gf_group_inactive_zone_rows = 0.0;
+  double gf_first_frame_err = 0.0;
+  double mod_frame_err = 0.0;
+
+  double mv_ratio_accumulator = 0.0;
+  double decay_accumulator = 1.0;
+  double zero_motion_accumulator = 1.0;
+
+  double loop_decay_rate = 1.00;
+  double last_loop_decay_rate = 1.00;
+
+  double this_frame_mv_in_out = 0.0;
+  double mv_in_out_accumulator = 0.0;
+  double abs_mv_in_out_accumulator = 0.0;
+  double mv_ratio_accumulator_thresh;
+  unsigned int allow_alt_ref = is_altref_enabled(cpi);
+
+  int f_boost = 0;
+  int b_boost = 0;
+  int flash_detected;
+  int active_max_gf_interval;
+  int active_min_gf_interval;
+  int64_t gf_group_bits;
+  double gf_group_error_left;
+  int gf_arf_bits;
+  const int is_key_frame = frame_is_intra_only(cm);
+  const int arf_active_or_kf = is_key_frame || rc->source_alt_ref_active;
+
+  // Reset the GF group data structures unless this is a key
+  // frame in which case it will already have been done.
+  if (is_key_frame == 0) {
+    vp10_zero(twopass->gf_group);
+  }
+
+  vpx_clear_system_state();
+  vp10_zero(next_frame);
+
+  // Load stats for the current frame.
+  mod_frame_err = calculate_modified_err(cpi, twopass, oxcf, this_frame);
+
+  // Note the error of the frame at the start of the group. This will be
+  // the GF frame error if we code a normal gf.
+  gf_first_frame_err = mod_frame_err;
+
+  // If this is a key frame or the overlay from a previous arf then
+  // the error score / cost of this frame has already been accounted for.
+  if (arf_active_or_kf) {
+    gf_group_err -= gf_first_frame_err;
+#if GROUP_ADAPTIVE_MAXQ
+    gf_group_raw_error -= this_frame->coded_error;
+#endif
+    gf_group_skip_pct -= this_frame->intra_skip_pct;
+    gf_group_inactive_zone_rows -= this_frame->inactive_zone_rows;
+  }
+
+  // Motion breakout threshold for loop below depends on image size.
+  mv_ratio_accumulator_thresh =
+      (cpi->initial_height + cpi->initial_width) / 4.0;
+
+  // Set a maximum and minimum interval for the GF group.
+  // If the image appears almost completely static we can extend beyond this.
+  {
+    int int_max_q = (int)(vp10_convert_qindex_to_q(
+        twopass->active_worst_quality, cpi->common.bit_depth));
+    int int_lbq = (int)(vp10_convert_qindex_to_q(rc->last_boosted_qindex,
+                                                 cpi->common.bit_depth));
+
+    active_min_gf_interval = rc->min_gf_interval + VPXMIN(2, int_max_q / 200);
+    if (active_min_gf_interval > rc->max_gf_interval)
+      active_min_gf_interval = rc->max_gf_interval;
+
+    if (cpi->multi_arf_allowed) {
+      active_max_gf_interval = rc->max_gf_interval;
+    } else {
+      // The value chosen depends on the active Q range. At low Q we have
+      // bits to spare and are better with a smaller interval and smaller boost.
+      // At high Q when there are few bits to spare we are better with a longer
+      // interval to spread the cost of the GF.
+      active_max_gf_interval = 12 + VPXMIN(4, (int_lbq / 6));
+
+      // We have: active_min_gf_interval <= rc->max_gf_interval
+      if (active_max_gf_interval < active_min_gf_interval)
+        active_max_gf_interval = active_min_gf_interval;
+      else if (active_max_gf_interval > rc->max_gf_interval)
+        active_max_gf_interval = rc->max_gf_interval;
+    }
+  }
+
+  i = 0;
+  while (i < rc->static_scene_max_gf_interval && i < rc->frames_to_key) {
+    ++i;
+
+    // Accumulate error score of frames in this gf group.
+    mod_frame_err = calculate_modified_err(cpi, twopass, oxcf, this_frame);
+    gf_group_err += mod_frame_err;
+#if GROUP_ADAPTIVE_MAXQ
+    gf_group_raw_error += this_frame->coded_error;
+#endif
+    gf_group_skip_pct += this_frame->intra_skip_pct;
+    gf_group_inactive_zone_rows += this_frame->inactive_zone_rows;
+
+    if (EOF == input_stats(twopass, &next_frame)) break;
+
+    // Test for the case where there is a brief flash but the prediction
+    // quality back to an earlier frame is then restored.
+    flash_detected = detect_flash(twopass, 0);
+
+    // Update the motion related elements to the boost calculation.
+    accumulate_frame_motion_stats(
+        &next_frame, &this_frame_mv_in_out, &mv_in_out_accumulator,
+        &abs_mv_in_out_accumulator, &mv_ratio_accumulator);
+
+    // Accumulate the effect of prediction quality decay.
+    if (!flash_detected) {
+      last_loop_decay_rate = loop_decay_rate;
+      loop_decay_rate = get_prediction_decay_rate(cpi, &next_frame);
+
+      decay_accumulator = decay_accumulator * loop_decay_rate;
+
+      // Monitor for static sections.
+      zero_motion_accumulator = VPXMIN(
+          zero_motion_accumulator, get_zero_motion_factor(cpi, &next_frame));
+
+      // Break clause to detect very still sections after motion. For example,
+      // a static image after a fade or other transition.
+      if (detect_transition_to_still(cpi, i, 5, loop_decay_rate,
+                                     last_loop_decay_rate)) {
+        allow_alt_ref = 0;
+        break;
+      }
+    }
+
+    // Calculate a boost number for this frame.
+    boost_score +=
+        decay_accumulator *
+        calc_frame_boost(cpi, &next_frame, this_frame_mv_in_out, GF_MAX_BOOST);
+
+    // Break out conditions.
+    if (
+        // Break at active_max_gf_interval unless almost totally static.
+        (i >= (active_max_gf_interval + arf_active_or_kf) &&
+         zero_motion_accumulator < 0.995) ||
+        (
+            // Don't break out with a very short interval.
+            (i >= active_min_gf_interval + arf_active_or_kf) &&
+            (!flash_detected) &&
+            ((mv_ratio_accumulator > mv_ratio_accumulator_thresh) ||
+             (abs_mv_in_out_accumulator > 3.0) ||
+             (mv_in_out_accumulator < -2.0) ||
+             ((boost_score - old_boost_score) < BOOST_BREAKOUT)))) {
+      boost_score = old_boost_score;
+      break;
+    }
+
+    *this_frame = next_frame;
+    old_boost_score = boost_score;
+  }
+
+  twopass->gf_zeromotion_pct = (int)(zero_motion_accumulator * 1000.0);
+
+  // Was the group length constrained by the requirement for a new KF?
+  rc->constrained_gf_group = (i >= rc->frames_to_key) ? 1 : 0;
+
+  // Should we use the alternate reference frame.
+  if (allow_alt_ref && (i < cpi->oxcf.lag_in_frames) &&
+      (i >= rc->min_gf_interval)) {
+    // Calculate the boost for alt ref.
+    rc->gfu_boost =
+        calc_arf_boost(cpi, 0, (i - 1), (i - 1), &f_boost, &b_boost);
+    rc->source_alt_ref_pending = 1;
+
+    // Test to see if multi arf is appropriate.
+    cpi->multi_arf_enabled =
+        (cpi->multi_arf_allowed && (rc->baseline_gf_interval >= 6) &&
+         (zero_motion_accumulator < 0.995))
+            ? 1
+            : 0;
+  } else {
+    rc->gfu_boost = VPXMAX((int)boost_score, MIN_ARF_GF_BOOST);
+    rc->source_alt_ref_pending = 0;
+  }
+
+  // Set the interval until the next gf.
+  rc->baseline_gf_interval = i - (is_key_frame || rc->source_alt_ref_pending);
+
+#if CONFIG_EXT_REFS
+  // Compute how many extra alt_refs we can have
+  cpi->num_extra_arfs = get_number_of_extra_arfs(rc->baseline_gf_interval,
+                                                 rc->source_alt_ref_pending);
+  // Currently at maximum two extra ARFs' are allowed
+  assert(cpi->num_extra_arfs <= 2);
+#endif
+
+  rc->frames_till_gf_update_due = rc->baseline_gf_interval;
+
+#if CONFIG_EXT_REFS
+  rc->bipred_group_interval = BFG_INTERVAL;
+  // The minimum bi-predictive frame group interval is 2.
+  if (rc->bipred_group_interval < 2) rc->bipred_group_interval = 0;
+#endif  // CONFIG_EXT_REFS
+
+  // Reset the file position.
+  reset_fpf_position(twopass, start_pos);
+
+  // Calculate the bits to be allocated to the gf/arf group as a whole
+  gf_group_bits = calculate_total_gf_group_bits(cpi, gf_group_err);
+
+#if GROUP_ADAPTIVE_MAXQ
+  // Calculate an estimate of the maxq needed for the group.
+  // We are more agressive about correcting for sections
+  // where there could be significant overshoot than for easier
+  // sections where we do not wish to risk creating an overshoot
+  // of the allocated bit budget.
+  if ((cpi->oxcf.rc_mode != VPX_Q) && (rc->baseline_gf_interval > 1)) {
+    const int vbr_group_bits_per_frame =
+        (int)(gf_group_bits / rc->baseline_gf_interval);
+    const double group_av_err = gf_group_raw_error / rc->baseline_gf_interval;
+    const double group_av_skip_pct =
+        gf_group_skip_pct / rc->baseline_gf_interval;
+    const double group_av_inactive_zone =
+        ((gf_group_inactive_zone_rows * 2) /
+         (rc->baseline_gf_interval * (double)cm->mb_rows));
+
+    int tmp_q;
+    // rc factor is a weight factor that corrects for local rate control drift.
+    double rc_factor = 1.0;
+    if (rc->rate_error_estimate > 0) {
+      rc_factor = VPXMAX(RC_FACTOR_MIN,
+                         (double)(100 - rc->rate_error_estimate) / 100.0);
+    } else {
+      rc_factor = VPXMIN(RC_FACTOR_MAX,
+                         (double)(100 - rc->rate_error_estimate) / 100.0);
+    }
+    tmp_q = get_twopass_worst_quality(
+        cpi, group_av_err, (group_av_skip_pct + group_av_inactive_zone),
+        vbr_group_bits_per_frame, twopass->kfgroup_inter_fraction * rc_factor);
+    twopass->active_worst_quality =
+        VPXMAX(tmp_q, twopass->active_worst_quality >> 1);
+  }
+#endif
+
+  // Calculate the extra bits to be used for boosted frame(s)
+  gf_arf_bits = calculate_boost_bits(rc->baseline_gf_interval, rc->gfu_boost,
+                                     gf_group_bits);
+
+  // Adjust KF group bits and error remaining.
+  twopass->kf_group_error_left -= (int64_t)gf_group_err;
+
+  // If this is an arf update we want to remove the score for the overlay
+  // frame at the end which will usually be very cheap to code.
+  // The overlay frame has already, in effect, been coded so we want to spread
+  // the remaining bits among the other frames.
+  // For normal GFs remove the score for the GF itself unless this is
+  // also a key frame in which case it has already been accounted for.
+  if (rc->source_alt_ref_pending) {
+    gf_group_error_left = gf_group_err - mod_frame_err;
+  } else if (is_key_frame == 0) {
+    gf_group_error_left = gf_group_err - gf_first_frame_err;
+  } else {
+    gf_group_error_left = gf_group_err;
+  }
+
+  // Allocate bits to each of the frames in the GF group.
+  allocate_gf_group_bits(cpi, gf_group_bits, gf_group_error_left, gf_arf_bits);
+
+  // Reset the file position.
+  reset_fpf_position(twopass, start_pos);
+
+  // Calculate a section intra ratio used in setting max loop filter.
+  if (cpi->common.frame_type != KEY_FRAME) {
+    twopass->section_intra_rating = calculate_section_intra_ratio(
+        start_pos, twopass->stats_in_end, rc->baseline_gf_interval);
+  }
+
+  if (oxcf->resize_mode == RESIZE_DYNAMIC) {
+    // Default to starting GF groups at normal frame size.
+    cpi->rc.next_frame_size_selector = UNSCALED;
+  }
+}
+
+// Threshold for use of the lagging second reference frame. High second ref
+// usage may point to a transient event like a flash or occlusion rather than
+// a real scene cut.
+#define SECOND_REF_USEAGE_THRESH 0.1
+// Minimum % intra coding observed in first pass (1.0 = 100%)
+#define MIN_INTRA_LEVEL 0.25
+// Minimum ratio between the % of intra coding and inter coding in the first
+// pass after discounting neutral blocks (discounting neutral blocks in this
+// way helps catch scene cuts in clips with very flat areas or letter box
+// format clips with image padding.
+#define INTRA_VS_INTER_THRESH 2.0
+// Hard threshold where the first pass chooses intra for almost all blocks.
+// In such a case even if the frame is not a scene cut coding a key frame
+// may be a good option.
+#define VERY_LOW_INTER_THRESH 0.05
+// Maximum threshold for the relative ratio of intra error score vs best
+// inter error score.
+#define KF_II_ERR_THRESHOLD 2.5
+// In real scene cuts there is almost always a sharp change in the intra
+// or inter error score.
+#define ERR_CHANGE_THRESHOLD 0.4
+// For real scene cuts we expect an improvment in the intra inter error
+// ratio in the next frame.
+#define II_IMPROVEMENT_THRESHOLD 3.5
+#define KF_II_MAX 128.0
+
+static int test_candidate_kf(TWO_PASS *twopass,
+                             const FIRSTPASS_STATS *last_frame,
+                             const FIRSTPASS_STATS *this_frame,
+                             const FIRSTPASS_STATS *next_frame) {
+  int is_viable_kf = 0;
+  double pcnt_intra = 1.0 - this_frame->pcnt_inter;
+  double modified_pcnt_inter =
+      this_frame->pcnt_inter - this_frame->pcnt_neutral;
+
+  // Does the frame satisfy the primary criteria of a key frame?
+  // See above for an explanation of the test criteria.
+  // If so, then examine how well it predicts subsequent frames.
+  if ((this_frame->pcnt_second_ref < SECOND_REF_USEAGE_THRESH) &&
+      (next_frame->pcnt_second_ref < SECOND_REF_USEAGE_THRESH) &&
+      ((this_frame->pcnt_inter < VERY_LOW_INTER_THRESH) ||
+       ((pcnt_intra > MIN_INTRA_LEVEL) &&
+        (pcnt_intra > (INTRA_VS_INTER_THRESH * modified_pcnt_inter)) &&
+        ((this_frame->intra_error /
+          DOUBLE_DIVIDE_CHECK(this_frame->coded_error)) <
+         KF_II_ERR_THRESHOLD) &&
+        ((fabs(last_frame->coded_error - this_frame->coded_error) /
+              DOUBLE_DIVIDE_CHECK(this_frame->coded_error) >
+          ERR_CHANGE_THRESHOLD) ||
+         (fabs(last_frame->intra_error - this_frame->intra_error) /
+              DOUBLE_DIVIDE_CHECK(this_frame->intra_error) >
+          ERR_CHANGE_THRESHOLD) ||
+         ((next_frame->intra_error /
+           DOUBLE_DIVIDE_CHECK(next_frame->coded_error)) >
+          II_IMPROVEMENT_THRESHOLD))))) {
+    int i;
+    const FIRSTPASS_STATS *start_pos = twopass->stats_in;
+    FIRSTPASS_STATS local_next_frame = *next_frame;
+    double boost_score = 0.0;
+    double old_boost_score = 0.0;
+    double decay_accumulator = 1.0;
+
+    // Examine how well the key frame predicts subsequent frames.
+    for (i = 0; i < 16; ++i) {
+      double next_iiratio = (BOOST_FACTOR * local_next_frame.intra_error /
+                             DOUBLE_DIVIDE_CHECK(local_next_frame.coded_error));
+
+      if (next_iiratio > KF_II_MAX) next_iiratio = KF_II_MAX;
+
+      // Cumulative effect of decay in prediction quality.
+      if (local_next_frame.pcnt_inter > 0.85)
+        decay_accumulator *= local_next_frame.pcnt_inter;
+      else
+        decay_accumulator *= (0.85 + local_next_frame.pcnt_inter) / 2.0;
+
+      // Keep a running total.
+      boost_score += (decay_accumulator * next_iiratio);
+
+      // Test various breakout clauses.
+      if ((local_next_frame.pcnt_inter < 0.05) || (next_iiratio < 1.5) ||
+          (((local_next_frame.pcnt_inter - local_next_frame.pcnt_neutral) <
+            0.20) &&
+           (next_iiratio < 3.0)) ||
+          ((boost_score - old_boost_score) < 3.0) ||
+          (local_next_frame.intra_error < 200)) {
+        break;
+      }
+
+      old_boost_score = boost_score;
+
+      // Get the next frame details
+      if (EOF == input_stats(twopass, &local_next_frame)) break;
+    }
+
+    // If there is tolerable prediction for at least the next 3 frames then
+    // break out else discard this potential key frame and move on
+    if (boost_score > 30.0 && (i > 3)) {
+      is_viable_kf = 1;
+    } else {
+      // Reset the file position
+      reset_fpf_position(twopass, start_pos);
+
+      is_viable_kf = 0;
+    }
+  }
+
+  return is_viable_kf;
+}
+
+#define FRAMES_TO_CHECK_DECAY 8
+
+static void find_next_key_frame(VP10_COMP *cpi, FIRSTPASS_STATS *this_frame) {
+  int i, j;
+  RATE_CONTROL *const rc = &cpi->rc;
+  TWO_PASS *const twopass = &cpi->twopass;
+  GF_GROUP *const gf_group = &twopass->gf_group;
+  const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+  const FIRSTPASS_STATS first_frame = *this_frame;
+  const FIRSTPASS_STATS *const start_position = twopass->stats_in;
+  FIRSTPASS_STATS next_frame;
+  FIRSTPASS_STATS last_frame;
+  int kf_bits = 0;
+  int loop_decay_counter = 0;
+  double decay_accumulator = 1.0;
+  double av_decay_accumulator = 0.0;
+  double zero_motion_accumulator = 1.0;
+  double boost_score = 0.0;
+  double kf_mod_err = 0.0;
+  double kf_group_err = 0.0;
+  double recent_loop_decay[FRAMES_TO_CHECK_DECAY];
+
+  vp10_zero(next_frame);
+
+  cpi->common.frame_type = KEY_FRAME;
+
+  // Reset the GF group data structures.
+  vp10_zero(*gf_group);
+
+  // Is this a forced key frame by interval.
+  rc->this_key_frame_forced = rc->next_key_frame_forced;
+
+  // Clear the alt ref active flag and last group multi arf flags as they
+  // can never be set for a key frame.
+  rc->source_alt_ref_active = 0;
+  cpi->multi_arf_last_grp_enabled = 0;
+
+  // KF is always a GF so clear frames till next gf counter.
+  rc->frames_till_gf_update_due = 0;
+
+  rc->frames_to_key = 1;
+
+  twopass->kf_group_bits = 0;        // Total bits available to kf group
+  twopass->kf_group_error_left = 0;  // Group modified error score.
+
+  kf_mod_err = calculate_modified_err(cpi, twopass, oxcf, this_frame);
+
+  // Initialize the decay rates for the recent frames to check
+  for (j = 0; j < FRAMES_TO_CHECK_DECAY; ++j) recent_loop_decay[j] = 1.0;
+
+  // Find the next keyframe.
+  i = 0;
+  while (twopass->stats_in < twopass->stats_in_end &&
+         rc->frames_to_key < cpi->oxcf.key_freq) {
+    // Accumulate kf group error.
+    kf_group_err += calculate_modified_err(cpi, twopass, oxcf, this_frame);
+
+    // Load the next frame's stats.
+    last_frame = *this_frame;
+    input_stats(twopass, this_frame);
+
+    // Provided that we are not at the end of the file...
+    if (cpi->oxcf.auto_key && twopass->stats_in < twopass->stats_in_end) {
+      double loop_decay_rate;
+
+      // Check for a scene cut.
+      if (test_candidate_kf(twopass, &last_frame, this_frame,
+                            twopass->stats_in))
+        break;
+
+      // How fast is the prediction quality decaying?
+      loop_decay_rate = get_prediction_decay_rate(cpi, twopass->stats_in);
+
+      // We want to know something about the recent past... rather than
+      // as used elsewhere where we are concerned with decay in prediction
+      // quality since the last GF or KF.
+      recent_loop_decay[i % FRAMES_TO_CHECK_DECAY] = loop_decay_rate;
+      decay_accumulator = 1.0;
+      for (j = 0; j < FRAMES_TO_CHECK_DECAY; ++j)
+        decay_accumulator *= recent_loop_decay[j];
+
+      // Special check for transition or high motion followed by a
+      // static scene.
+      if (detect_transition_to_still(cpi, i, cpi->oxcf.key_freq - i,
+                                     loop_decay_rate, decay_accumulator))
+        break;
+
+      // Step on to the next frame.
+      ++rc->frames_to_key;
+
+      // If we don't have a real key frame within the next two
+      // key_freq intervals then break out of the loop.
+      if (rc->frames_to_key >= 2 * cpi->oxcf.key_freq) break;
+    } else {
+      ++rc->frames_to_key;
+    }
+    ++i;
+  }
+
+  // If there is a max kf interval set by the user we must obey it.
+  // We already breakout of the loop above at 2x max.
+  // This code centers the extra kf if the actual natural interval
+  // is between 1x and 2x.
+  if (cpi->oxcf.auto_key && rc->frames_to_key > cpi->oxcf.key_freq) {
+    FIRSTPASS_STATS tmp_frame = first_frame;
+
+    rc->frames_to_key /= 2;
+
+    // Reset to the start of the group.
+    reset_fpf_position(twopass, start_position);
+
+    kf_group_err = 0.0;
+
+    // Rescan to get the correct error data for the forced kf group.
+    for (i = 0; i < rc->frames_to_key; ++i) {
+      kf_group_err += calculate_modified_err(cpi, twopass, oxcf, &tmp_frame);
+      input_stats(twopass, &tmp_frame);
+    }
+    rc->next_key_frame_forced = 1;
+  } else if (twopass->stats_in == twopass->stats_in_end ||
+             rc->frames_to_key >= cpi->oxcf.key_freq) {
+    rc->next_key_frame_forced = 1;
+  } else {
+    rc->next_key_frame_forced = 0;
+  }
+
+  // Special case for the last key frame of the file.
+  if (twopass->stats_in >= twopass->stats_in_end) {
+    // Accumulate kf group error.
+    kf_group_err += calculate_modified_err(cpi, twopass, oxcf, this_frame);
+  }
+
+  // Calculate the number of bits that should be assigned to the kf group.
+  if (twopass->bits_left > 0 && twopass->modified_error_left > 0.0) {
+    // Maximum number of bits for a single normal frame (not key frame).
+    const int max_bits = frame_max_bits(rc, &cpi->oxcf);
+
+    // Maximum number of bits allocated to the key frame group.
+    int64_t max_grp_bits;
+
+    // Default allocation based on bits left and relative
+    // complexity of the section.
+    twopass->kf_group_bits = (int64_t)(
+        twopass->bits_left * (kf_group_err / twopass->modified_error_left));
+
+    // Clip based on maximum per frame rate defined by the user.
+    max_grp_bits = (int64_t)max_bits * (int64_t)rc->frames_to_key;
+    if (twopass->kf_group_bits > max_grp_bits)
+      twopass->kf_group_bits = max_grp_bits;
+  } else {
+    twopass->kf_group_bits = 0;
+  }
+  twopass->kf_group_bits = VPXMAX(0, twopass->kf_group_bits);
+
+  // Reset the first pass file position.
+  reset_fpf_position(twopass, start_position);
+
+  // Scan through the kf group collating various stats used to determine
+  // how many bits to spend on it.
+  decay_accumulator = 1.0;
+  boost_score = 0.0;
+  for (i = 0; i < (rc->frames_to_key - 1); ++i) {
+    if (EOF == input_stats(twopass, &next_frame)) break;
+
+    // Monitor for static sections.
+    zero_motion_accumulator = VPXMIN(zero_motion_accumulator,
+                                     get_zero_motion_factor(cpi, &next_frame));
+
+    // Not all frames in the group are necessarily used in calculating boost.
+    if ((i <= rc->max_gf_interval) ||
+        ((i <= (rc->max_gf_interval * 4)) && (decay_accumulator > 0.5))) {
+      const double frame_boost =
+          calc_frame_boost(cpi, this_frame, 0, KF_MAX_BOOST);
+
+      // How fast is prediction quality decaying.
+      if (!detect_flash(twopass, 0)) {
+        const double loop_decay_rate =
+            get_prediction_decay_rate(cpi, &next_frame);
+        decay_accumulator *= loop_decay_rate;
+        decay_accumulator = VPXMAX(decay_accumulator, MIN_DECAY_FACTOR);
+        av_decay_accumulator += decay_accumulator;
+        ++loop_decay_counter;
+      }
+      boost_score += (decay_accumulator * frame_boost);
+    }
+  }
+  av_decay_accumulator /= (double)loop_decay_counter;
+
+  reset_fpf_position(twopass, start_position);
+
+  // Store the zero motion percentage
+  twopass->kf_zeromotion_pct = (int)(zero_motion_accumulator * 100.0);
+
+  // Calculate a section intra ratio used in setting max loop filter.
+  twopass->section_intra_rating = calculate_section_intra_ratio(
+      start_position, twopass->stats_in_end, rc->frames_to_key);
+
+  // Apply various clamps for min and max boost
+  rc->kf_boost = (int)(av_decay_accumulator * boost_score);
+  rc->kf_boost = VPXMAX(rc->kf_boost, (rc->frames_to_key * 3));
+  rc->kf_boost = VPXMAX(rc->kf_boost, MIN_KF_BOOST);
+
+  // Work out how many bits to allocate for the key frame itself.
+  kf_bits = calculate_boost_bits((rc->frames_to_key - 1), rc->kf_boost,
+                                 twopass->kf_group_bits);
+
+  // Work out the fraction of the kf group bits reserved for the inter frames
+  // within the group after discounting the bits for the kf itself.
+  if (twopass->kf_group_bits) {
+    twopass->kfgroup_inter_fraction =
+        (double)(twopass->kf_group_bits - kf_bits) /
+        (double)twopass->kf_group_bits;
+  } else {
+    twopass->kfgroup_inter_fraction = 1.0;
+  }
+
+  twopass->kf_group_bits -= kf_bits;
+
+  // Save the bits to spend on the key frame.
+  gf_group->bit_allocation[0] = kf_bits;
+  gf_group->update_type[0] = KF_UPDATE;
+  gf_group->rf_level[0] = KF_STD;
+
+  // Note the total error score of the kf group minus the key frame itself.
+  twopass->kf_group_error_left = (int)(kf_group_err - kf_mod_err);
+
+  // Adjust the count of total modified error left.
+  // The count of bits left is adjusted elsewhere based on real coded frame
+  // sizes.
+  twopass->modified_error_left -= kf_group_err;
+
+  if (oxcf->resize_mode == RESIZE_DYNAMIC) {
+    // Default to normal-sized frame on keyframes.
+    cpi->rc.next_frame_size_selector = UNSCALED;
+  }
+}
+
+// Define the reference buffers that will be updated post encode.
+static void configure_buffer_updates(VP10_COMP *cpi) {
+  TWO_PASS *const twopass = &cpi->twopass;
+
+  // Wei-Ting: Should we define another function to take care of
+  // cpi->rc.is_$Source_Type to make this function as it is in the comment?
+
+  cpi->rc.is_src_frame_alt_ref = 0;
+#if CONFIG_EXT_REFS
+  cpi->rc.is_bwd_ref_frame = 0;
+  cpi->rc.is_last_bipred_frame = 0;
+  cpi->rc.is_bipred_frame = 0;
+  cpi->rc.is_src_frame_ext_arf = 0;
+#endif  // CONFIG_EXT_REFS
+
+  switch (twopass->gf_group.update_type[twopass->gf_group.index]) {
+    case KF_UPDATE:
+#if CONFIG_EXT_REFS
+      cpi->refresh_bwd_ref_frame = 1;
+#endif  // CONFIG_EXT_REFS
+      cpi->refresh_last_frame = 1;
+      cpi->refresh_golden_frame = 1;
+      cpi->refresh_alt_ref_frame = 1;
+      break;
+
+    case LF_UPDATE:
+#if CONFIG_EXT_REFS
+      // If we have extra ALT_REFs, we can use the farthest ALT (ALT0) as
+      // the BWD_REF.
+      if (cpi->num_extra_arfs) {
+        int tmp = cpi->bwd_fb_idx;
+
+        cpi->rc.is_bwd_ref_frame = 1;
+        cpi->bwd_fb_idx = cpi->alt_fb_idx;
+        cpi->alt_fb_idx = cpi->arf_map[0];
+        cpi->arf_map[0] = tmp;
+      } else {
+        cpi->rc.is_bwd_ref_frame = 0;
+      }
+#endif  // CONFIG_EXT_REFS
+      cpi->refresh_last_frame = 1;
+      cpi->refresh_golden_frame = 0;
+      cpi->refresh_alt_ref_frame = 0;
+      break;
+
+    case GF_UPDATE:
+#if CONFIG_EXT_REFS
+      cpi->refresh_bwd_ref_frame = 0;
+#endif  // CONFIG_EXT_REFS
+      cpi->refresh_last_frame = 1;
+      cpi->refresh_golden_frame = 1;
+      cpi->refresh_alt_ref_frame = 0;
+      break;
+
+    case OVERLAY_UPDATE:
+      cpi->refresh_last_frame = 0;
+      cpi->refresh_golden_frame = 1;
+#if CONFIG_EXT_REFS
+      cpi->refresh_bwd_ref_frame = 0;
+#endif  // CONFIG_EXT_REFS
+      cpi->refresh_alt_ref_frame = 0;
+      cpi->rc.is_src_frame_alt_ref = 1;
+      break;
+
+    case ARF_UPDATE:
+#if CONFIG_EXT_REFS
+      cpi->refresh_bwd_ref_frame = 1;
+#endif  // CONFIG_EXT_REFS
+      cpi->refresh_last_frame = 0;
+      cpi->refresh_golden_frame = 0;
+      cpi->refresh_alt_ref_frame = 1;
+      break;
+
+#if CONFIG_EXT_REFS
+    case BRF_UPDATE:
+      cpi->refresh_last_frame = 0;
+      cpi->refresh_golden_frame = 0;
+      cpi->refresh_bwd_ref_frame = 1;
+      cpi->refresh_alt_ref_frame = 0;
+      cpi->rc.is_bwd_ref_frame = 1;
+      if (cpi->num_extra_arfs) {
+        // Allow BRF use the farthest ALT_REF (ALT0) as BWD_REF by swapping
+        // the virtual indices.
+        // NOTE: The indices will be swapped back after this frame is encoded
+        //       (in vp10_update_reference_frames()).
+        int tmp = cpi->bwd_fb_idx;
+        cpi->bwd_fb_idx = cpi->alt_fb_idx;
+        cpi->alt_fb_idx = cpi->arf_map[0];
+        cpi->arf_map[0] = tmp;
+      }
+      break;
+    case LAST_BIPRED_UPDATE:
+      cpi->refresh_last_frame = 0;
+      cpi->refresh_golden_frame = 0;
+      cpi->refresh_bwd_ref_frame = 0;
+      cpi->refresh_alt_ref_frame = 0;
+      cpi->rc.is_last_bipred_frame = 1;
+      break;
+
+    case BIPRED_UPDATE:
+      cpi->refresh_last_frame = 1;
+      cpi->refresh_golden_frame = 0;
+      cpi->refresh_bwd_ref_frame = 0;
+      cpi->refresh_alt_ref_frame = 0;
+      cpi->rc.is_bipred_frame = 1;
+      break;
+
+    case INTNL_OVERLAY_UPDATE:
+      cpi->refresh_last_frame = 1;
+      cpi->refresh_golden_frame = 0;
+      cpi->refresh_bwd_ref_frame = 0;
+      cpi->refresh_alt_ref_frame = 0;
+      cpi->rc.is_src_frame_alt_ref = 1;
+      cpi->rc.is_src_frame_ext_arf = 1;
+      break;
+#endif  // CONFIG_EXT_REFS
+
+    default: assert(0); break;
+  }
+}
+
+static int is_skippable_frame(const VP10_COMP *cpi) {
+  // If the current frame does not have non-zero motion vector detected in the
+  // first  pass, and so do its previous and forward frames, then this frame
+  // can be skipped for partition check, and the partition size is assigned
+  // according to the variance
+  const TWO_PASS *const twopass = &cpi->twopass;
+
+  return (!frame_is_intra_only(&cpi->common) &&
+          twopass->stats_in - 2 > twopass->stats_in_start &&
+          twopass->stats_in < twopass->stats_in_end &&
+          (twopass->stats_in - 1)->pcnt_inter -
+                  (twopass->stats_in - 1)->pcnt_motion ==
+              1 &&
+          (twopass->stats_in - 2)->pcnt_inter -
+                  (twopass->stats_in - 2)->pcnt_motion ==
+              1 &&
+          twopass->stats_in->pcnt_inter - twopass->stats_in->pcnt_motion == 1);
+}
+
+void vp10_rc_get_second_pass_params(VP10_COMP *cpi) {
+  VP10_COMMON *const cm = &cpi->common;
+  RATE_CONTROL *const rc = &cpi->rc;
+  TWO_PASS *const twopass = &cpi->twopass;
+  GF_GROUP *const gf_group = &twopass->gf_group;
+  int frames_left;
+  FIRSTPASS_STATS this_frame;
+
+  int target_rate;
+
+  frames_left = (int)(twopass->total_stats.count - cm->current_video_frame);
+
+  if (!twopass->stats_in) return;
+
+  // If this is an arf frame then we dont want to read the stats file or
+  // advance the input pointer as we already have what we need.
+  if (gf_group->update_type[gf_group->index] == ARF_UPDATE) {
+    int target_rate;
+    configure_buffer_updates(cpi);
+    target_rate = gf_group->bit_allocation[gf_group->index];
+    target_rate = vp10_rc_clamp_pframe_target_size(cpi, target_rate);
+    rc->base_frame_target = target_rate;
+
+    cm->frame_type = INTER_FRAME;
+
+    // Do the firstpass stats indicate that this frame is skippable for the
+    // partition search?
+    if (cpi->sf.allow_partition_search_skip && cpi->oxcf.pass == 2) {
+      cpi->partition_search_skippable_frame = is_skippable_frame(cpi);
+    }
+
+    return;
+  }
+
+  vpx_clear_system_state();
+
+  if (cpi->oxcf.rc_mode == VPX_Q) {
+    twopass->active_worst_quality = cpi->oxcf.cq_level;
+  } else if (cm->current_video_frame == 0) {
+    // Special case code for first frame.
+    const int section_target_bandwidth =
+        (int)(twopass->bits_left / frames_left);
+    const double section_length = twopass->total_left_stats.count;
+    const double section_error =
+        twopass->total_left_stats.coded_error / section_length;
+    const double section_intra_skip =
+        twopass->total_left_stats.intra_skip_pct / section_length;
+    const double section_inactive_zone =
+        (twopass->total_left_stats.inactive_zone_rows * 2) /
+        ((double)cm->mb_rows * section_length);
+    const int tmp_q = get_twopass_worst_quality(
+        cpi, section_error, section_intra_skip + section_inactive_zone,
+        section_target_bandwidth, DEFAULT_GRP_WEIGHT);
+
+    twopass->active_worst_quality = tmp_q;
+    twopass->baseline_active_worst_quality = tmp_q;
+    rc->ni_av_qi = tmp_q;
+    rc->last_q[INTER_FRAME] = tmp_q;
+    rc->avg_q = vp10_convert_qindex_to_q(tmp_q, cm->bit_depth);
+    rc->avg_frame_qindex[INTER_FRAME] = tmp_q;
+    rc->last_q[KEY_FRAME] = (tmp_q + cpi->oxcf.best_allowed_q) / 2;
+    rc->avg_frame_qindex[KEY_FRAME] = rc->last_q[KEY_FRAME];
+  }
+
+  vp10_zero(this_frame);
+  if (EOF == input_stats(twopass, &this_frame)) return;
+
+  // Set the frame content type flag.
+  if (this_frame.intra_skip_pct >= FC_ANIMATION_THRESH)
+    twopass->fr_content_type = FC_GRAPHICS_ANIMATION;
+  else
+    twopass->fr_content_type = FC_NORMAL;
+
+  // Keyframe and section processing.
+  if (rc->frames_to_key == 0 || (cpi->frame_flags & FRAMEFLAGS_KEY)) {
+    FIRSTPASS_STATS this_frame_copy;
+    this_frame_copy = this_frame;
+    // Define next KF group and assign bits to it.
+    find_next_key_frame(cpi, &this_frame);
+    this_frame = this_frame_copy;
+  } else {
+    cm->frame_type = INTER_FRAME;
+  }
+
+  // Define a new GF/ARF group. (Should always enter here for key frames).
+  if (rc->frames_till_gf_update_due == 0) {
+    define_gf_group(cpi, &this_frame);
+
+    rc->frames_till_gf_update_due = rc->baseline_gf_interval;
+
+#if ARF_STATS_OUTPUT
+    {
+      FILE *fpfile;
+      fpfile = fopen("arf.stt", "a");
+      ++arf_count;
+      fprintf(fpfile, "%10d %10ld %10d %10d %10ld\n", cm->current_video_frame,
+              rc->frames_till_gf_update_due, rc->kf_boost, arf_count,
+              rc->gfu_boost);
+
+      fclose(fpfile);
+    }
+#endif
+  }
+
+  configure_buffer_updates(cpi);
+
+  // Do the firstpass stats indicate that this frame is skippable for the
+  // partition search?
+  if (cpi->sf.allow_partition_search_skip && cpi->oxcf.pass == 2) {
+    cpi->partition_search_skippable_frame = is_skippable_frame(cpi);
+  }
+
+  target_rate = gf_group->bit_allocation[gf_group->index];
+
+  if (cpi->common.frame_type == KEY_FRAME)
+    target_rate = vp10_rc_clamp_iframe_target_size(cpi, target_rate);
+  else
+    target_rate = vp10_rc_clamp_pframe_target_size(cpi, target_rate);
+
+  rc->base_frame_target = target_rate;
+
+  {
+    const int num_mbs = (cpi->oxcf.resize_mode != RESIZE_NONE)
+                            ? cpi->initial_mbs
+                            : cpi->common.MBs;
+    // The multiplication by 256 reverses a scaling factor of (>> 8)
+    // applied when combining MB error values for the frame.
+    twopass->mb_av_energy =
+        log(((this_frame.intra_error * 256.0) / num_mbs) + 1.0);
+  }
+
+  // Update the total stats remaining structure.
+  subtract_stats(&twopass->total_left_stats, &this_frame);
+}
+
+#define MINQ_ADJ_LIMIT 48
+#define MINQ_ADJ_LIMIT_CQ 20
+#define HIGH_UNDERSHOOT_RATIO 2
+void vp10_twopass_postencode_update(VP10_COMP *cpi) {
+  TWO_PASS *const twopass = &cpi->twopass;
+  RATE_CONTROL *const rc = &cpi->rc;
+  const int bits_used = rc->base_frame_target;
+
+  // VBR correction is done through rc->vbr_bits_off_target. Based on the
+  // sign of this value, a limited % adjustment is made to the target rate
+  // of subsequent frames, to try and push it back towards 0. This method
+  // is designed to prevent extreme behaviour at the end of a clip
+  // or group of frames.
+  rc->vbr_bits_off_target += rc->base_frame_target - rc->projected_frame_size;
+  twopass->bits_left = VPXMAX(twopass->bits_left - bits_used, 0);
+
+  // Calculate the pct rc error.
+  if (rc->total_actual_bits) {
+    rc->rate_error_estimate =
+        (int)((rc->vbr_bits_off_target * 100) / rc->total_actual_bits);
+    rc->rate_error_estimate = clamp(rc->rate_error_estimate, -100, 100);
+  } else {
+    rc->rate_error_estimate = 0;
+  }
+
+  if (cpi->common.frame_type != KEY_FRAME) {
+    twopass->kf_group_bits -= bits_used;
+    twopass->last_kfgroup_zeromotion_pct = twopass->kf_zeromotion_pct;
+  }
+  twopass->kf_group_bits = VPXMAX(twopass->kf_group_bits, 0);
+
+  // Increment the gf group index ready for the next frame.
+  ++twopass->gf_group.index;
+
+  // If the rate control is drifting consider adjustment to min or maxq.
+  if ((cpi->oxcf.rc_mode != VPX_Q) &&
+      (cpi->twopass.gf_zeromotion_pct < VLOW_MOTION_THRESHOLD) &&
+      !cpi->rc.is_src_frame_alt_ref) {
+    const int maxq_adj_limit =
+        rc->worst_quality - twopass->active_worst_quality;
+    const int minq_adj_limit =
+        (cpi->oxcf.rc_mode == VPX_CQ ? MINQ_ADJ_LIMIT_CQ : MINQ_ADJ_LIMIT);
+
+    // Undershoot.
+    if (rc->rate_error_estimate > cpi->oxcf.under_shoot_pct) {
+      --twopass->extend_maxq;
+      if (rc->rolling_target_bits >= rc->rolling_actual_bits)
+        ++twopass->extend_minq;
+      // Overshoot.
+    } else if (rc->rate_error_estimate < -cpi->oxcf.over_shoot_pct) {
+      --twopass->extend_minq;
+      if (rc->rolling_target_bits < rc->rolling_actual_bits)
+        ++twopass->extend_maxq;
+    } else {
+      // Adjustment for extreme local overshoot.
+      if (rc->projected_frame_size > (2 * rc->base_frame_target) &&
+          rc->projected_frame_size > (2 * rc->avg_frame_bandwidth))
+        ++twopass->extend_maxq;
+
+      // Unwind undershoot or overshoot adjustment.
+      if (rc->rolling_target_bits < rc->rolling_actual_bits)
+        --twopass->extend_minq;
+      else if (rc->rolling_target_bits > rc->rolling_actual_bits)
+        --twopass->extend_maxq;
+    }
+
+    twopass->extend_minq = clamp(twopass->extend_minq, 0, minq_adj_limit);
+    twopass->extend_maxq = clamp(twopass->extend_maxq, 0, maxq_adj_limit);
+
+    // If there is a big and undexpected undershoot then feed the extra
+    // bits back in quickly. One situation where this may happen is if a
+    // frame is unexpectedly almost perfectly predicted by the ARF or GF
+    // but not very well predcited by the previous frame.
+    if (!frame_is_kf_gf_arf(cpi) && !cpi->rc.is_src_frame_alt_ref) {
+      int fast_extra_thresh = rc->base_frame_target / HIGH_UNDERSHOOT_RATIO;
+      if (rc->projected_frame_size < fast_extra_thresh) {
+        rc->vbr_bits_off_target_fast +=
+            fast_extra_thresh - rc->projected_frame_size;
+        rc->vbr_bits_off_target_fast =
+            VPXMIN(rc->vbr_bits_off_target_fast, (4 * rc->avg_frame_bandwidth));
+
+        // Fast adaptation of minQ if necessary to use up the extra bits.
+        if (rc->avg_frame_bandwidth) {
+          twopass->extend_minq_fast =
+              (int)(rc->vbr_bits_off_target_fast * 8 / rc->avg_frame_bandwidth);
+        }
+        twopass->extend_minq_fast = VPXMIN(
+            twopass->extend_minq_fast, minq_adj_limit - twopass->extend_minq);
+      } else if (rc->vbr_bits_off_target_fast) {
+        twopass->extend_minq_fast = VPXMIN(
+            twopass->extend_minq_fast, minq_adj_limit - twopass->extend_minq);
+      } else {
+        twopass->extend_minq_fast = 0;
+      }
+    }
+  }
+}
diff --git a/av1/encoder/firstpass.h b/av1/encoder/firstpass.h
new file mode 100644
index 0000000..5623540
--- /dev/null
+++ b/av1/encoder/firstpass.h
@@ -0,0 +1,202 @@
+/*
+ *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP10_ENCODER_FIRSTPASS_H_
+#define VP10_ENCODER_FIRSTPASS_H_
+
+#include "av1/encoder/lookahead.h"
+#include "av1/encoder/ratectrl.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if CONFIG_FP_MB_STATS
+
+#define FPMB_DCINTRA_MASK 0x01
+
+#define FPMB_MOTION_ZERO_MASK 0x02
+#define FPMB_MOTION_LEFT_MASK 0x04
+#define FPMB_MOTION_RIGHT_MASK 0x08
+#define FPMB_MOTION_UP_MASK 0x10
+#define FPMB_MOTION_DOWN_MASK 0x20
+
+#define FPMB_ERROR_SMALL_MASK 0x40
+#define FPMB_ERROR_LARGE_MASK 0x80
+#define FPMB_ERROR_SMALL_TH 2000
+#define FPMB_ERROR_LARGE_TH 48000
+
+typedef struct {
+  uint8_t *mb_stats_start;
+  uint8_t *mb_stats_end;
+} FIRSTPASS_MB_STATS;
+#endif
+
+#if CONFIG_EXT_REFS
+// Length of the bi-predictive frame group (BFG)
+// NOTE: Currently each BFG contains one backward ref (BWF) frame plus a certain
+//       number of bi-predictive frames.
+#define BFG_INTERVAL 2
+// The maximum number of extra ALT_REF's
+// NOTE: This number cannot be greater than 2 or the reference frame buffer will
+//       overflow.
+#define MAX_EXT_ARFS 2
+#define MIN_EXT_ARF_INTERVAL 4
+#endif  // CONFIG_EXT_REFS
+
+#define VLOW_MOTION_THRESHOLD 950
+
+typedef struct {
+  double frame;
+  double weight;
+  double intra_error;
+  double coded_error;
+  double sr_coded_error;
+  double pcnt_inter;
+  double pcnt_motion;
+  double pcnt_second_ref;
+  double pcnt_neutral;
+  double intra_skip_pct;
+  double inactive_zone_rows;  // Image mask rows top and bottom.
+  double inactive_zone_cols;  // Image mask columns at left and right edges.
+  double MVr;
+  double mvr_abs;
+  double MVc;
+  double mvc_abs;
+  double MVrv;
+  double MVcv;
+  double mv_in_out_count;
+  double new_mv_count;
+  double duration;
+  double count;
+} FIRSTPASS_STATS;
+
+typedef enum {
+  KF_UPDATE = 0,
+  LF_UPDATE = 1,
+  GF_UPDATE = 2,
+  ARF_UPDATE = 3,
+  OVERLAY_UPDATE = 4,
+#if CONFIG_EXT_REFS
+  BRF_UPDATE = 5,            // Backward Reference Frame
+  LAST_BIPRED_UPDATE = 6,    // Last Bi-predictive Frame
+  BIPRED_UPDATE = 7,         // Bi-predictive Frame, but not the last one
+  INTNL_OVERLAY_UPDATE = 8,  // Internal Overlay Frame
+  FRAME_UPDATE_TYPES = 9
+#else
+  FRAME_UPDATE_TYPES = 5
+#endif  // CONFIG_EXT_REFS
+} FRAME_UPDATE_TYPE;
+
+#define FC_ANIMATION_THRESH 0.15
+typedef enum {
+  FC_NORMAL = 0,
+  FC_GRAPHICS_ANIMATION = 1,
+  FRAME_CONTENT_TYPES = 2
+} FRAME_CONTENT_TYPE;
+
+typedef struct {
+  unsigned char index;
+  RATE_FACTOR_LEVEL rf_level[(MAX_LAG_BUFFERS * 2) + 1];
+  FRAME_UPDATE_TYPE update_type[(MAX_LAG_BUFFERS * 2) + 1];
+  unsigned char arf_src_offset[(MAX_LAG_BUFFERS * 2) + 1];
+  unsigned char arf_update_idx[(MAX_LAG_BUFFERS * 2) + 1];
+  unsigned char arf_ref_idx[(MAX_LAG_BUFFERS * 2) + 1];
+#if CONFIG_EXT_REFS
+  unsigned char brf_src_offset[(MAX_LAG_BUFFERS * 2) + 1];
+  unsigned char bidir_pred_enabled[(MAX_LAG_BUFFERS * 2) + 1];
+#endif  // CONFIG_EXT_REFS
+  int bit_allocation[(MAX_LAG_BUFFERS * 2) + 1];
+} GF_GROUP;
+
+typedef struct {
+  unsigned int section_intra_rating;
+  FIRSTPASS_STATS total_stats;
+  FIRSTPASS_STATS this_frame_stats;
+  const FIRSTPASS_STATS *stats_in;
+  const FIRSTPASS_STATS *stats_in_start;
+  const FIRSTPASS_STATS *stats_in_end;
+  FIRSTPASS_STATS total_left_stats;
+  int first_pass_done;
+  int64_t bits_left;
+  double modified_error_min;
+  double modified_error_max;
+  double modified_error_left;
+  double mb_av_energy;
+
+#if CONFIG_FP_MB_STATS
+  uint8_t *frame_mb_stats_buf;
+  uint8_t *this_frame_mb_stats;
+  FIRSTPASS_MB_STATS firstpass_mb_stats;
+#endif
+  // An indication of the content type of the current frame
+  FRAME_CONTENT_TYPE fr_content_type;
+
+  // Projected total bits available for a key frame group of frames
+  int64_t kf_group_bits;
+
+  // Error score of frames still to be coded in kf group
+  int64_t kf_group_error_left;
+
+  // The fraction for a kf groups total bits allocated to the inter frames
+  double kfgroup_inter_fraction;
+
+  int sr_update_lag;
+
+  int kf_zeromotion_pct;
+  int last_kfgroup_zeromotion_pct;
+  int gf_zeromotion_pct;
+  int active_worst_quality;
+  int baseline_active_worst_quality;
+  int extend_minq;
+  int extend_maxq;
+  int extend_minq_fast;
+
+  GF_GROUP gf_group;
+} TWO_PASS;
+
+struct VP10_COMP;
+
+void vp10_init_first_pass(struct VP10_COMP *cpi);
+void vp10_rc_get_first_pass_params(struct VP10_COMP *cpi);
+void vp10_first_pass(struct VP10_COMP *cpi,
+                     const struct lookahead_entry *source);
+void vp10_end_first_pass(struct VP10_COMP *cpi);
+
+void vp10_init_second_pass(struct VP10_COMP *cpi);
+void vp10_rc_get_second_pass_params(struct VP10_COMP *cpi);
+void vp10_twopass_postencode_update(struct VP10_COMP *cpi);
+
+// Post encode update of the rate control parameters for 2-pass
+void vp10_twopass_postencode_update(struct VP10_COMP *cpi);
+
+void vp10_init_subsampling(struct VP10_COMP *cpi);
+
+void vp10_calculate_coded_size(struct VP10_COMP *cpi, int *scaled_frame_width,
+                               int *scaled_frame_height);
+
+#if CONFIG_EXT_REFS
+static inline int get_number_of_extra_arfs(int interval, int arf_pending) {
+  if (arf_pending && MAX_EXT_ARFS > 0)
+    return interval >= MIN_EXT_ARF_INTERVAL * (MAX_EXT_ARFS + 1)
+               ? MAX_EXT_ARFS
+               : interval >= MIN_EXT_ARF_INTERVAL * MAX_EXT_ARFS
+                     ? MAX_EXT_ARFS - 1
+                     : 0;
+  else
+    return 0;
+}
+#endif
+
+#ifdef __cplusplus
+}  // extern "C"
+#endif
+
+#endif  // VP10_ENCODER_FIRSTPASS_H_
diff --git a/av1/encoder/global_motion.c b/av1/encoder/global_motion.c
new file mode 100644
index 0000000..cda6aaf
--- /dev/null
+++ b/av1/encoder/global_motion.c
@@ -0,0 +1,88 @@
+/*
+ *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <memory.h>
+#include <math.h>
+#include <assert.h>
+
+#include "av1/common/warped_motion.h"
+
+#include "av1/encoder/segmentation.h"
+#include "av1/encoder/global_motion.h"
+#include "av1/encoder/corner_detect.h"
+#include "av1/encoder/corner_match.h"
+#include "av1/encoder/ransac.h"
+
+#define MAX_CORNERS 4096
+#define MIN_INLIER_PROB 0.1
+
+INLINE RansacType get_ransac_type(TransformationType type) {
+  switch (type) {
+    case HOMOGRAPHY: return ransacHomography;
+    case AFFINE: return ransacAffine;
+    case ROTZOOM: return ransacRotZoom;
+    case TRANSLATION: return ransacTranslation;
+    default: assert(0); return NULL;
+  }
+}
+
+// computes global motion parameters by fitting a model using RANSAC
+static int compute_global_motion_params(TransformationType type,
+                                        double *correspondences,
+                                        int num_correspondences, double *params,
+                                        int *inlier_map) {
+  int result;
+  int num_inliers = 0;
+  RansacType ransac = get_ransac_type(type);
+  if (ransac == NULL) return 0;
+
+  result = ransac(correspondences, num_correspondences, &num_inliers,
+                  inlier_map, params);
+  if (!result && num_inliers < MIN_INLIER_PROB * num_correspondences) {
+    result = 1;
+    num_inliers = 0;
+  }
+  return num_inliers;
+}
+
+int compute_global_motion_feature_based(TransformationType type,
+                                        YV12_BUFFER_CONFIG *frm,
+                                        YV12_BUFFER_CONFIG *ref,
+                                        double *params) {
+  int num_frm_corners, num_ref_corners;
+  int num_correspondences;
+  double *correspondences;
+  int num_inliers;
+  int frm_corners[2 * MAX_CORNERS], ref_corners[2 * MAX_CORNERS];
+  int *inlier_map = NULL;
+
+  // compute interest points in images using FAST features
+  num_frm_corners = FastCornerDetect(frm->y_buffer, frm->y_width, frm->y_height,
+                                     frm->y_stride, frm_corners, MAX_CORNERS);
+  num_ref_corners = FastCornerDetect(ref->y_buffer, ref->y_width, ref->y_height,
+                                     ref->y_stride, ref_corners, MAX_CORNERS);
+
+  // find correspondences between the two images
+  correspondences =
+      (double *)malloc(num_frm_corners * 4 * sizeof(*correspondences));
+  num_correspondences = determine_correspondence(
+      frm->y_buffer, (int *)frm_corners, num_frm_corners, ref->y_buffer,
+      (int *)ref_corners, num_ref_corners, frm->y_width, frm->y_height,
+      frm->y_stride, ref->y_stride, correspondences);
+
+  inlier_map = (int *)malloc(num_correspondences * sizeof(*inlier_map));
+  num_inliers = compute_global_motion_params(
+      type, correspondences, num_correspondences, params, inlier_map);
+  free(correspondences);
+  free(inlier_map);
+  return (num_inliers > 0);
+}
diff --git a/av1/encoder/global_motion.h b/av1/encoder/global_motion.h
new file mode 100644
index 0000000..ed088d6
--- /dev/null
+++ b/av1/encoder/global_motion.h
@@ -0,0 +1,28 @@
+/*
+ *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP10_ENCODER_GLOBAL_MOTION_H_
+#define VP10_ENCODER_GLOBAL_MOTION_H_
+
+#include "aom/vpx_integer.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// compute global motion parameters between two frames
+int compute_global_motion_feature_based(TransformationType type,
+                                        YV12_BUFFER_CONFIG *frm,
+                                        YV12_BUFFER_CONFIG *ref,
+                                        double *params);
+#ifdef __cplusplus
+}  // extern "C"
+#endif
+#endif  // VP10_ENCODER_GLOBAL_MOTION_H_
diff --git a/av1/encoder/hybrid_fwd_txfm.c b/av1/encoder/hybrid_fwd_txfm.c
new file mode 100644
index 0000000..ccfab0a
--- /dev/null
+++ b/av1/encoder/hybrid_fwd_txfm.c
@@ -0,0 +1,498 @@
+/*
+ *  Copyright (c) 2015 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "./vp10_rtcd.h"
+#include "./vpx_config.h"
+#include "./vpx_dsp_rtcd.h"
+
+#include "av1/common/idct.h"
+#include "av1/encoder/hybrid_fwd_txfm.h"
+
+static INLINE void fdct32x32(int rd_transform, const int16_t *src,
+                             tran_low_t *dst, int src_stride) {
+  if (rd_transform)
+    vpx_fdct32x32_rd(src, dst, src_stride);
+  else
+    vpx_fdct32x32(src, dst, src_stride);
+}
+
+static void fwd_txfm_4x4(const int16_t *src_diff, tran_low_t *coeff,
+                         int diff_stride, TX_TYPE tx_type, int lossless) {
+  if (lossless) {
+    assert(tx_type == DCT_DCT);
+    vp10_fwht4x4(src_diff, coeff, diff_stride);
+    return;
+  }
+
+  switch (tx_type) {
+    case DCT_DCT:
+    case ADST_DCT:
+    case DCT_ADST:
+    case ADST_ADST: vp10_fht4x4(src_diff, coeff, diff_stride, tx_type); break;
+#if CONFIG_EXT_TX
+    case FLIPADST_DCT:
+    case DCT_FLIPADST:
+    case FLIPADST_FLIPADST:
+    case ADST_FLIPADST:
+    case FLIPADST_ADST:
+    case V_DCT:
+    case H_DCT:
+    case V_ADST:
+    case H_ADST:
+    case V_FLIPADST:
+    case H_FLIPADST: vp10_fht4x4(src_diff, coeff, diff_stride, tx_type); break;
+    case IDTX: vp10_fwd_idtx_c(src_diff, coeff, diff_stride, 4, tx_type); break;
+#endif  // CONFIG_EXT_TX
+    default: assert(0);
+  }
+}
+
+#if CONFIG_EXT_TX
+static void fwd_txfm_4x8(const int16_t *src_diff, tran_low_t *coeff,
+                         int diff_stride, TX_TYPE tx_type,
+                         FWD_TXFM_OPT fwd_txfm_opt) {
+  (void)fwd_txfm_opt;
+  vp10_fht4x8(src_diff, coeff, diff_stride, tx_type);
+}
+
+static void fwd_txfm_8x4(const int16_t *src_diff, tran_low_t *coeff,
+                         int diff_stride, TX_TYPE tx_type,
+                         FWD_TXFM_OPT fwd_txfm_opt) {
+  (void)fwd_txfm_opt;
+  vp10_fht8x4(src_diff, coeff, diff_stride, tx_type);
+}
+
+static void fwd_txfm_8x16(const int16_t *src_diff, tran_low_t *coeff,
+                          int diff_stride, TX_TYPE tx_type,
+                          FWD_TXFM_OPT fwd_txfm_opt) {
+  (void)fwd_txfm_opt;
+  vp10_fht8x16(src_diff, coeff, diff_stride, tx_type);
+}
+
+static void fwd_txfm_16x8(const int16_t *src_diff, tran_low_t *coeff,
+                          int diff_stride, TX_TYPE tx_type,
+                          FWD_TXFM_OPT fwd_txfm_opt) {
+  (void)fwd_txfm_opt;
+  vp10_fht16x8(src_diff, coeff, diff_stride, tx_type);
+}
+
+static void fwd_txfm_16x32(const int16_t *src_diff, tran_low_t *coeff,
+                           int diff_stride, TX_TYPE tx_type,
+                           FWD_TXFM_OPT fwd_txfm_opt) {
+  (void)fwd_txfm_opt;
+  vp10_fht16x32(src_diff, coeff, diff_stride, tx_type);
+}
+
+static void fwd_txfm_32x16(const int16_t *src_diff, tran_low_t *coeff,
+                           int diff_stride, TX_TYPE tx_type,
+                           FWD_TXFM_OPT fwd_txfm_opt) {
+  (void)fwd_txfm_opt;
+  vp10_fht32x16(src_diff, coeff, diff_stride, tx_type);
+}
+#endif  // CONFIG_EXT_TX
+
+static void fwd_txfm_8x8(const int16_t *src_diff, tran_low_t *coeff,
+                         int diff_stride, TX_TYPE tx_type,
+                         FWD_TXFM_OPT fwd_txfm_opt) {
+  switch (tx_type) {
+    case DCT_DCT:
+    case ADST_DCT:
+    case DCT_ADST:
+    case ADST_ADST:
+      if (fwd_txfm_opt == FWD_TXFM_OPT_NORMAL)
+        vp10_fht8x8(src_diff, coeff, diff_stride, tx_type);
+      else  // FWD_TXFM_OPT_DC
+        vpx_fdct8x8_1(src_diff, coeff, diff_stride);
+      break;
+#if CONFIG_EXT_TX
+    case FLIPADST_DCT:
+    case DCT_FLIPADST:
+    case FLIPADST_FLIPADST:
+    case ADST_FLIPADST:
+    case FLIPADST_ADST:
+    case V_DCT:
+    case H_DCT:
+    case V_ADST:
+    case H_ADST:
+    case V_FLIPADST:
+    case H_FLIPADST: vp10_fht8x8(src_diff, coeff, diff_stride, tx_type); break;
+    case IDTX: vp10_fwd_idtx_c(src_diff, coeff, diff_stride, 8, tx_type); break;
+#endif  // CONFIG_EXT_TX
+    default: assert(0);
+  }
+}
+
+static void fwd_txfm_16x16(const int16_t *src_diff, tran_low_t *coeff,
+                           int diff_stride, TX_TYPE tx_type,
+                           FWD_TXFM_OPT fwd_txfm_opt) {
+  switch (tx_type) {
+    case DCT_DCT:
+    case ADST_DCT:
+    case DCT_ADST:
+    case ADST_ADST:
+      if (fwd_txfm_opt == FWD_TXFM_OPT_NORMAL)
+        vp10_fht16x16(src_diff, coeff, diff_stride, tx_type);
+      else  // FWD_TXFM_OPT_DC
+        vpx_fdct16x16_1(src_diff, coeff, diff_stride);
+      break;
+#if CONFIG_EXT_TX
+    case FLIPADST_DCT:
+    case DCT_FLIPADST:
+    case FLIPADST_FLIPADST:
+    case ADST_FLIPADST:
+    case FLIPADST_ADST:
+    case V_DCT:
+    case H_DCT:
+    case V_ADST:
+    case H_ADST:
+    case V_FLIPADST:
+    case H_FLIPADST:
+      vp10_fht16x16(src_diff, coeff, diff_stride, tx_type);
+      break;
+    case IDTX:
+      vp10_fwd_idtx_c(src_diff, coeff, diff_stride, 16, tx_type);
+      break;
+#endif  // CONFIG_EXT_TX
+    default: assert(0);
+  }
+}
+
+static void fwd_txfm_32x32(int rd_transform, const int16_t *src_diff,
+                           tran_low_t *coeff, int diff_stride, TX_TYPE tx_type,
+                           FWD_TXFM_OPT fwd_txfm_opt) {
+  switch (tx_type) {
+    case DCT_DCT:
+      if (fwd_txfm_opt == FWD_TXFM_OPT_NORMAL)
+        fdct32x32(rd_transform, src_diff, coeff, diff_stride);
+      else  // FWD_TXFM_OPT_DC
+        vpx_fdct32x32_1(src_diff, coeff, diff_stride);
+      break;
+#if CONFIG_EXT_TX
+    case ADST_DCT:
+    case DCT_ADST:
+    case ADST_ADST:
+    case FLIPADST_DCT:
+    case DCT_FLIPADST:
+    case FLIPADST_FLIPADST:
+    case ADST_FLIPADST:
+    case FLIPADST_ADST:
+      vp10_fht32x32_c(src_diff, coeff, diff_stride, tx_type);
+      break;
+    case V_DCT:
+    case H_DCT:
+    case V_ADST:
+    case H_ADST:
+    case V_FLIPADST:
+    case H_FLIPADST:
+      vp10_fht32x32_c(src_diff, coeff, diff_stride, tx_type);
+      break;
+    case IDTX:
+      vp10_fwd_idtx_c(src_diff, coeff, diff_stride, 32, tx_type);
+      break;
+#endif  // CONFIG_EXT_TX
+    default: assert(0); break;
+  }
+}
+
+#if CONFIG_VP9_HIGHBITDEPTH
+static void highbd_fwd_txfm_4x4(const int16_t *src_diff, tran_low_t *coeff,
+                                int diff_stride, TX_TYPE tx_type, int lossless,
+                                const int bd) {
+  if (lossless) {
+    assert(tx_type == DCT_DCT);
+    vp10_highbd_fwht4x4(src_diff, coeff, diff_stride);
+    return;
+  }
+
+  switch (tx_type) {
+    case DCT_DCT:
+    case ADST_DCT:
+    case DCT_ADST:
+    case ADST_ADST:
+      vp10_fwd_txfm2d_4x4(src_diff, coeff, diff_stride, tx_type, bd);
+      break;
+#if CONFIG_EXT_TX
+    case FLIPADST_DCT:
+    case DCT_FLIPADST:
+    case FLIPADST_FLIPADST:
+    case ADST_FLIPADST:
+    case FLIPADST_ADST:
+      vp10_fwd_txfm2d_4x4(src_diff, coeff, diff_stride, tx_type, bd);
+      break;
+    case V_DCT:
+    case H_DCT:
+    case V_ADST:
+    case H_ADST:
+    case V_FLIPADST:
+    case H_FLIPADST:
+      vp10_highbd_fht4x4_c(src_diff, coeff, diff_stride, tx_type);
+      break;
+    case IDTX: vp10_fwd_idtx_c(src_diff, coeff, diff_stride, 4, tx_type); break;
+#endif  // CONFIG_EXT_TX
+    default: assert(0);
+  }
+}
+
+#if CONFIG_EXT_TX
+static void highbd_fwd_txfm_4x8(const int16_t *src_diff, tran_low_t *coeff,
+                                int diff_stride, TX_TYPE tx_type,
+                                FWD_TXFM_OPT fwd_txfm_opt, const int bd) {
+  (void)fwd_txfm_opt;
+  (void)bd;
+  vp10_highbd_fht4x8(src_diff, coeff, diff_stride, tx_type);
+}
+
+static void highbd_fwd_txfm_8x4(const int16_t *src_diff, tran_low_t *coeff,
+                                int diff_stride, TX_TYPE tx_type,
+                                FWD_TXFM_OPT fwd_txfm_opt, const int bd) {
+  (void)fwd_txfm_opt;
+  (void)bd;
+  vp10_highbd_fht8x4(src_diff, coeff, diff_stride, tx_type);
+}
+
+static void highbd_fwd_txfm_8x16(const int16_t *src_diff, tran_low_t *coeff,
+                                 int diff_stride, TX_TYPE tx_type,
+                                 FWD_TXFM_OPT fwd_txfm_opt, const int bd) {
+  (void)fwd_txfm_opt;
+  (void)bd;
+  vp10_highbd_fht8x16(src_diff, coeff, diff_stride, tx_type);
+}
+
+static void highbd_fwd_txfm_16x8(const int16_t *src_diff, tran_low_t *coeff,
+                                 int diff_stride, TX_TYPE tx_type,
+                                 FWD_TXFM_OPT fwd_txfm_opt, const int bd) {
+  (void)fwd_txfm_opt;
+  (void)bd;
+  vp10_highbd_fht16x8(src_diff, coeff, diff_stride, tx_type);
+}
+
+static void highbd_fwd_txfm_16x32(const int16_t *src_diff, tran_low_t *coeff,
+                                  int diff_stride, TX_TYPE tx_type,
+                                  FWD_TXFM_OPT fwd_txfm_opt, const int bd) {
+  (void)fwd_txfm_opt;
+  (void)bd;
+  vp10_highbd_fht16x32(src_diff, coeff, diff_stride, tx_type);
+}
+
+static void highbd_fwd_txfm_32x16(const int16_t *src_diff, tran_low_t *coeff,
+                                  int diff_stride, TX_TYPE tx_type,
+                                  FWD_TXFM_OPT fwd_txfm_opt, const int bd) {
+  (void)fwd_txfm_opt;
+  (void)bd;
+  vp10_highbd_fht32x16(src_diff, coeff, diff_stride, tx_type);
+}
+#endif  // CONFIG_EXT_TX
+
+static void highbd_fwd_txfm_8x8(const int16_t *src_diff, tran_low_t *coeff,
+                                int diff_stride, TX_TYPE tx_type,
+                                FWD_TXFM_OPT fwd_txfm_opt, const int bd) {
+  (void)fwd_txfm_opt;
+  switch (tx_type) {
+    case DCT_DCT:
+    case ADST_DCT:
+    case DCT_ADST:
+    case ADST_ADST:
+      vp10_fwd_txfm2d_8x8(src_diff, coeff, diff_stride, tx_type, bd);
+      break;
+#if CONFIG_EXT_TX
+    case FLIPADST_DCT:
+    case DCT_FLIPADST:
+    case FLIPADST_FLIPADST:
+    case ADST_FLIPADST:
+    case FLIPADST_ADST:
+      vp10_fwd_txfm2d_8x8(src_diff, coeff, diff_stride, tx_type, bd);
+      break;
+    case V_DCT:
+    case H_DCT:
+    case V_ADST:
+    case H_ADST:
+    case V_FLIPADST:
+    case H_FLIPADST:
+      // Use C version since DST exists only in C
+      vp10_highbd_fht8x8_c(src_diff, coeff, diff_stride, tx_type);
+      break;
+    case IDTX: vp10_fwd_idtx_c(src_diff, coeff, diff_stride, 8, tx_type); break;
+#endif  // CONFIG_EXT_TX
+    default: assert(0);
+  }
+}
+
+static void highbd_fwd_txfm_16x16(const int16_t *src_diff, tran_low_t *coeff,
+                                  int diff_stride, TX_TYPE tx_type,
+                                  FWD_TXFM_OPT fwd_txfm_opt, const int bd) {
+  (void)fwd_txfm_opt;
+  switch (tx_type) {
+    case DCT_DCT:
+    case ADST_DCT:
+    case DCT_ADST:
+    case ADST_ADST:
+      vp10_fwd_txfm2d_16x16(src_diff, coeff, diff_stride, tx_type, bd);
+      break;
+#if CONFIG_EXT_TX
+    case FLIPADST_DCT:
+    case DCT_FLIPADST:
+    case FLIPADST_FLIPADST:
+    case ADST_FLIPADST:
+    case FLIPADST_ADST:
+      vp10_fwd_txfm2d_16x16(src_diff, coeff, diff_stride, tx_type, bd);
+      break;
+    case V_DCT:
+    case H_DCT:
+    case V_ADST:
+    case H_ADST:
+    case V_FLIPADST:
+    case H_FLIPADST:
+      // Use C version since DST exists only in C
+      vp10_highbd_fht16x16_c(src_diff, coeff, diff_stride, tx_type);
+      break;
+    case IDTX:
+      vp10_fwd_idtx_c(src_diff, coeff, diff_stride, 16, tx_type);
+      break;
+#endif  // CONFIG_EXT_TX
+    default: assert(0);
+  }
+}
+
+static void highbd_fwd_txfm_32x32(int rd_transform, const int16_t *src_diff,
+                                  tran_low_t *coeff, int diff_stride,
+                                  TX_TYPE tx_type, FWD_TXFM_OPT fwd_txfm_opt,
+                                  const int bd) {
+  (void)rd_transform;
+  (void)fwd_txfm_opt;
+  switch (tx_type) {
+    case DCT_DCT:
+      vp10_fwd_txfm2d_32x32(src_diff, coeff, diff_stride, tx_type, bd);
+      break;
+#if CONFIG_EXT_TX
+    case ADST_DCT:
+    case DCT_ADST:
+    case ADST_ADST:
+    case FLIPADST_DCT:
+    case DCT_FLIPADST:
+    case FLIPADST_FLIPADST:
+    case ADST_FLIPADST:
+    case FLIPADST_ADST:
+    case V_DCT:
+    case H_DCT:
+    case V_ADST:
+    case H_ADST:
+    case V_FLIPADST:
+    case H_FLIPADST:
+      vp10_highbd_fht32x32_c(src_diff, coeff, diff_stride, tx_type);
+      break;
+    case IDTX:
+      vp10_fwd_idtx_c(src_diff, coeff, diff_stride, 32, tx_type);
+      break;
+#endif  // CONFIG_EXT_TX
+    default: assert(0); break;
+  }
+}
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+void fwd_txfm(const int16_t *src_diff, tran_low_t *coeff, int diff_stride,
+              FWD_TXFM_PARAM *fwd_txfm_param) {
+  const int fwd_txfm_opt = fwd_txfm_param->fwd_txfm_opt;
+  const TX_TYPE tx_type = fwd_txfm_param->tx_type;
+  const TX_SIZE tx_size = fwd_txfm_param->tx_size;
+  const int rd_transform = fwd_txfm_param->rd_transform;
+  const int lossless = fwd_txfm_param->lossless;
+  switch (tx_size) {
+    case TX_32X32:
+      fwd_txfm_32x32(rd_transform, src_diff, coeff, diff_stride, tx_type,
+                     fwd_txfm_opt);
+      break;
+    case TX_16X16:
+      fwd_txfm_16x16(src_diff, coeff, diff_stride, tx_type, fwd_txfm_opt);
+      break;
+    case TX_8X8:
+      fwd_txfm_8x8(src_diff, coeff, diff_stride, tx_type, fwd_txfm_opt);
+      break;
+#if CONFIG_EXT_TX
+    case TX_4X8:
+      fwd_txfm_4x8(src_diff, coeff, diff_stride, tx_type, fwd_txfm_opt);
+      break;
+    case TX_8X4:
+      fwd_txfm_8x4(src_diff, coeff, diff_stride, tx_type, fwd_txfm_opt);
+      break;
+    case TX_8X16:
+      fwd_txfm_8x16(src_diff, coeff, diff_stride, tx_type, fwd_txfm_opt);
+      break;
+    case TX_16X8:
+      fwd_txfm_16x8(src_diff, coeff, diff_stride, tx_type, fwd_txfm_opt);
+      break;
+    case TX_16X32:
+      fwd_txfm_16x32(src_diff, coeff, diff_stride, tx_type, fwd_txfm_opt);
+      break;
+    case TX_32X16:
+      fwd_txfm_32x16(src_diff, coeff, diff_stride, tx_type, fwd_txfm_opt);
+      break;
+#endif  // CONFIG_EXT_TX
+    case TX_4X4:
+      fwd_txfm_4x4(src_diff, coeff, diff_stride, tx_type, lossless);
+      break;
+    default: assert(0); break;
+  }
+}
+
+#if CONFIG_VP9_HIGHBITDEPTH
+void highbd_fwd_txfm(const int16_t *src_diff, tran_low_t *coeff,
+                     int diff_stride, FWD_TXFM_PARAM *fwd_txfm_param) {
+  const int fwd_txfm_opt = fwd_txfm_param->fwd_txfm_opt;
+  const TX_TYPE tx_type = fwd_txfm_param->tx_type;
+  const TX_SIZE tx_size = fwd_txfm_param->tx_size;
+  const int rd_transform = fwd_txfm_param->rd_transform;
+  const int lossless = fwd_txfm_param->lossless;
+  const int bd = fwd_txfm_param->bd;
+  switch (tx_size) {
+    case TX_32X32:
+      highbd_fwd_txfm_32x32(rd_transform, src_diff, coeff, diff_stride, tx_type,
+                            fwd_txfm_opt, bd);
+      break;
+    case TX_16X16:
+      highbd_fwd_txfm_16x16(src_diff, coeff, diff_stride, tx_type, fwd_txfm_opt,
+                            bd);
+      break;
+    case TX_8X8:
+      highbd_fwd_txfm_8x8(src_diff, coeff, diff_stride, tx_type, fwd_txfm_opt,
+                          bd);
+      break;
+#if CONFIG_EXT_TX
+    case TX_4X8:
+      highbd_fwd_txfm_4x8(src_diff, coeff, diff_stride, tx_type, fwd_txfm_opt,
+                          bd);
+      break;
+    case TX_8X4:
+      highbd_fwd_txfm_8x4(src_diff, coeff, diff_stride, tx_type, fwd_txfm_opt,
+                          bd);
+      break;
+    case TX_8X16:
+      highbd_fwd_txfm_8x16(src_diff, coeff, diff_stride, tx_type, fwd_txfm_opt,
+                           bd);
+      break;
+    case TX_16X8:
+      highbd_fwd_txfm_16x8(src_diff, coeff, diff_stride, tx_type, fwd_txfm_opt,
+                           bd);
+      break;
+    case TX_16X32:
+      highbd_fwd_txfm_16x32(src_diff, coeff, diff_stride, tx_type, fwd_txfm_opt,
+                            bd);
+      break;
+    case TX_32X16:
+      highbd_fwd_txfm_32x16(src_diff, coeff, diff_stride, tx_type, fwd_txfm_opt,
+                            bd);
+      break;
+#endif  // CONFIG_EXT_TX
+    case TX_4X4:
+      highbd_fwd_txfm_4x4(src_diff, coeff, diff_stride, tx_type, lossless, bd);
+      break;
+    default: assert(0); break;
+  }
+}
+#endif  // CONFIG_VP9_HIGHBITDEPTH
diff --git a/av1/encoder/hybrid_fwd_txfm.h b/av1/encoder/hybrid_fwd_txfm.h
new file mode 100644
index 0000000..07b832c
--- /dev/null
+++ b/av1/encoder/hybrid_fwd_txfm.h
@@ -0,0 +1,45 @@
+/*
+ *  Copyright (c) 2015 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP10_ENCODER_HYBRID_FWD_TXFM_H_
+#define VP10_ENCODER_HYBRID_FWD_TXFM_H_
+
+#include "./vpx_config.h"
+
+typedef enum FWD_TXFM_OPT { FWD_TXFM_OPT_NORMAL, FWD_TXFM_OPT_DC } FWD_TXFM_OPT;
+
+typedef struct FWD_TXFM_PARAM {
+  TX_TYPE tx_type;
+  TX_SIZE tx_size;
+  FWD_TXFM_OPT fwd_txfm_opt;
+  int rd_transform;
+  int lossless;
+#if CONFIG_VP9_HIGHBITDEPTH
+  int bd;
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+} FWD_TXFM_PARAM;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void fwd_txfm(const int16_t *src_diff, tran_low_t *coeff, int diff_stride,
+              FWD_TXFM_PARAM *fwd_txfm_param);
+
+#if CONFIG_VP9_HIGHBITDEPTH
+void highbd_fwd_txfm(const int16_t *src_diff, tran_low_t *coeff,
+                     int diff_stride, FWD_TXFM_PARAM *fwd_txfm_param);
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+#ifdef __cplusplus
+}  // extern "C"
+#endif
+
+#endif  // VP10_ENCODER_HYBRID_FWD_TXFM_H_
diff --git a/av1/encoder/lookahead.c b/av1/encoder/lookahead.c
new file mode 100644
index 0000000..3c4ff7d
--- /dev/null
+++ b/av1/encoder/lookahead.c
@@ -0,0 +1,224 @@
+/*
+ *  Copyright (c) 2011 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+#include <assert.h>
+#include <stdlib.h>
+
+#include "./vpx_config.h"
+
+#include "av1/common/common.h"
+
+#include "av1/encoder/encoder.h"
+#include "av1/encoder/extend.h"
+#include "av1/encoder/lookahead.h"
+
+/* Return the buffer at the given absolute index and increment the index */
+static struct lookahead_entry *pop(struct lookahead_ctx *ctx, int *idx) {
+  int index = *idx;
+  struct lookahead_entry *buf = ctx->buf + index;
+
+  assert(index < ctx->max_sz);
+  if (++index >= ctx->max_sz) index -= ctx->max_sz;
+  *idx = index;
+  return buf;
+}
+
+void vp10_lookahead_destroy(struct lookahead_ctx *ctx) {
+  if (ctx) {
+    if (ctx->buf) {
+      int i;
+
+      for (i = 0; i < ctx->max_sz; i++) vpx_free_frame_buffer(&ctx->buf[i].img);
+      free(ctx->buf);
+    }
+    free(ctx);
+  }
+}
+
+struct lookahead_ctx *vp10_lookahead_init(unsigned int width,
+                                          unsigned int height,
+                                          unsigned int subsampling_x,
+                                          unsigned int subsampling_y,
+#if CONFIG_VP9_HIGHBITDEPTH
+                                          int use_highbitdepth,
+#endif
+                                          unsigned int depth) {
+  struct lookahead_ctx *ctx = NULL;
+
+  // Clamp the lookahead queue depth
+  depth = clamp(depth, 1, MAX_LAG_BUFFERS);
+
+  // Allocate memory to keep previous source frames available.
+  depth += MAX_PRE_FRAMES;
+
+  // Allocate the lookahead structures
+  ctx = calloc(1, sizeof(*ctx));
+  if (ctx) {
+    const int legacy_byte_alignment = 0;
+    unsigned int i;
+    ctx->max_sz = depth;
+    ctx->buf = calloc(depth, sizeof(*ctx->buf));
+    if (!ctx->buf) goto bail;
+    for (i = 0; i < depth; i++)
+      if (vpx_alloc_frame_buffer(
+              &ctx->buf[i].img, width, height, subsampling_x, subsampling_y,
+#if CONFIG_VP9_HIGHBITDEPTH
+              use_highbitdepth,
+#endif
+              VPX_ENC_BORDER_IN_PIXELS, legacy_byte_alignment))
+        goto bail;
+  }
+  return ctx;
+bail:
+  vp10_lookahead_destroy(ctx);
+  return NULL;
+}
+
+#define USE_PARTIAL_COPY 0
+
+int vp10_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG *src,
+                        int64_t ts_start, int64_t ts_end,
+#if CONFIG_VP9_HIGHBITDEPTH
+                        int use_highbitdepth,
+#endif
+                        unsigned int flags) {
+  struct lookahead_entry *buf;
+#if USE_PARTIAL_COPY
+  int row, col, active_end;
+  int mb_rows = (src->y_height + 15) >> 4;
+  int mb_cols = (src->y_width + 15) >> 4;
+#endif
+  int width = src->y_crop_width;
+  int height = src->y_crop_height;
+  int uv_width = src->uv_crop_width;
+  int uv_height = src->uv_crop_height;
+  int subsampling_x = src->subsampling_x;
+  int subsampling_y = src->subsampling_y;
+  int larger_dimensions, new_dimensions;
+
+  if (ctx->sz + 1 + MAX_PRE_FRAMES > ctx->max_sz) return 1;
+  ctx->sz++;
+  buf = pop(ctx, &ctx->write_idx);
+
+  new_dimensions = width != buf->img.y_crop_width ||
+                   height != buf->img.y_crop_height ||
+                   uv_width != buf->img.uv_crop_width ||
+                   uv_height != buf->img.uv_crop_height;
+  larger_dimensions = width > buf->img.y_width || height > buf->img.y_height ||
+                      uv_width > buf->img.uv_width ||
+                      uv_height > buf->img.uv_height;
+  assert(!larger_dimensions || new_dimensions);
+
+#if USE_PARTIAL_COPY
+  // TODO(jkoleszar): This is disabled for now, as
+  // vp10_copy_and_extend_frame_with_rect is not subsampling/alpha aware.
+
+  // Only do this partial copy if the following conditions are all met:
+  // 1. Lookahead queue has has size of 1.
+  // 2. Active map is provided.
+  // 3. This is not a key frame, golden nor altref frame.
+  if (!new_dimensions && ctx->max_sz == 1 && active_map && !flags) {
+    for (row = 0; row < mb_rows; ++row) {
+      col = 0;
+
+      while (1) {
+        // Find the first active macroblock in this row.
+        for (; col < mb_cols; ++col) {
+          if (active_map[col]) break;
+        }
+
+        // No more active macroblock in this row.
+        if (col == mb_cols) break;
+
+        // Find the end of active region in this row.
+        active_end = col;
+
+        for (; active_end < mb_cols; ++active_end) {
+          if (!active_map[active_end]) break;
+        }
+
+        // Only copy this active region.
+        vp10_copy_and_extend_frame_with_rect(src, &buf->img, row << 4, col << 4,
+                                             16, (active_end - col) << 4);
+
+        // Start again from the end of this active region.
+        col = active_end;
+      }
+
+      active_map += mb_cols;
+    }
+  } else {
+#endif
+    if (larger_dimensions) {
+      YV12_BUFFER_CONFIG new_img;
+      memset(&new_img, 0, sizeof(new_img));
+      if (vpx_alloc_frame_buffer(&new_img, width, height, subsampling_x,
+                                 subsampling_y,
+#if CONFIG_VP9_HIGHBITDEPTH
+                                 use_highbitdepth,
+#endif
+                                 VPX_ENC_BORDER_IN_PIXELS, 0))
+        return 1;
+      vpx_free_frame_buffer(&buf->img);
+      buf->img = new_img;
+    } else if (new_dimensions) {
+      buf->img.y_crop_width = src->y_crop_width;
+      buf->img.y_crop_height = src->y_crop_height;
+      buf->img.uv_crop_width = src->uv_crop_width;
+      buf->img.uv_crop_height = src->uv_crop_height;
+      buf->img.subsampling_x = src->subsampling_x;
+      buf->img.subsampling_y = src->subsampling_y;
+    }
+    // Partial copy not implemented yet
+    vp10_copy_and_extend_frame(src, &buf->img);
+#if USE_PARTIAL_COPY
+  }
+#endif
+
+  buf->ts_start = ts_start;
+  buf->ts_end = ts_end;
+  buf->flags = flags;
+  return 0;
+}
+
+struct lookahead_entry *vp10_lookahead_pop(struct lookahead_ctx *ctx,
+                                           int drain) {
+  struct lookahead_entry *buf = NULL;
+
+  if (ctx && ctx->sz && (drain || ctx->sz == ctx->max_sz - MAX_PRE_FRAMES)) {
+    buf = pop(ctx, &ctx->read_idx);
+    ctx->sz--;
+  }
+  return buf;
+}
+
+struct lookahead_entry *vp10_lookahead_peek(struct lookahead_ctx *ctx,
+                                            int index) {
+  struct lookahead_entry *buf = NULL;
+
+  if (index >= 0) {
+    // Forward peek
+    if (index < ctx->sz) {
+      index += ctx->read_idx;
+      if (index >= ctx->max_sz) index -= ctx->max_sz;
+      buf = ctx->buf + index;
+    }
+  } else if (index < 0) {
+    // Backward peek
+    if (-index <= MAX_PRE_FRAMES) {
+      index += ctx->read_idx;
+      if (index < 0) index += ctx->max_sz;
+      buf = ctx->buf + index;
+    }
+  }
+
+  return buf;
+}
+
+unsigned int vp10_lookahead_depth(struct lookahead_ctx *ctx) { return ctx->sz; }
diff --git a/av1/encoder/lookahead.h b/av1/encoder/lookahead.h
new file mode 100644
index 0000000..4b26068
--- /dev/null
+++ b/av1/encoder/lookahead.h
@@ -0,0 +1,114 @@
+/*
+ *  Copyright (c) 2011 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP10_ENCODER_LOOKAHEAD_H_
+#define VP10_ENCODER_LOOKAHEAD_H_
+
+#include "aom_scale/yv12config.h"
+#include "aom/vpx_integer.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define MAX_LAG_BUFFERS 25
+
+struct lookahead_entry {
+  YV12_BUFFER_CONFIG img;
+  int64_t ts_start;
+  int64_t ts_end;
+  unsigned int flags;
+};
+
+// The max of past frames we want to keep in the queue.
+#define MAX_PRE_FRAMES 1
+
+struct lookahead_ctx {
+  int max_sz;                  /* Absolute size of the queue */
+  int sz;                      /* Number of buffers currently in the queue */
+  int read_idx;                /* Read index */
+  int write_idx;               /* Write index */
+  struct lookahead_entry *buf; /* Buffer list */
+};
+
+/**\brief Initializes the lookahead stage
+ *
+ * The lookahead stage is a queue of frame buffers on which some analysis
+ * may be done when buffers are enqueued.
+ */
+struct lookahead_ctx *vp10_lookahead_init(unsigned int width,
+                                          unsigned int height,
+                                          unsigned int subsampling_x,
+                                          unsigned int subsampling_y,
+#if CONFIG_VP9_HIGHBITDEPTH
+                                          int use_highbitdepth,
+#endif
+                                          unsigned int depth);
+
+/**\brief Destroys the lookahead stage
+ */
+void vp10_lookahead_destroy(struct lookahead_ctx *ctx);
+
+/**\brief Enqueue a source buffer
+ *
+ * This function will copy the source image into a new framebuffer with
+ * the expected stride/border.
+ *
+ * If active_map is non-NULL and there is only one frame in the queue, then copy
+ * only active macroblocks.
+ *
+ * \param[in] ctx         Pointer to the lookahead context
+ * \param[in] src         Pointer to the image to enqueue
+ * \param[in] ts_start    Timestamp for the start of this frame
+ * \param[in] ts_end      Timestamp for the end of this frame
+ * \param[in] flags       Flags set on this frame
+ * \param[in] active_map  Map that specifies which macroblock is active
+ */
+int vp10_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG *src,
+                        int64_t ts_start, int64_t ts_end,
+#if CONFIG_VP9_HIGHBITDEPTH
+                        int use_highbitdepth,
+#endif
+                        unsigned int flags);
+
+/**\brief Get the next source buffer to encode
+ *
+ *
+ * \param[in] ctx       Pointer to the lookahead context
+ * \param[in] drain     Flag indicating the buffer should be drained
+ *                      (return a buffer regardless of the current queue depth)
+ *
+ * \retval NULL, if drain set and queue is empty
+ * \retval NULL, if drain not set and queue not of the configured depth
+ */
+struct lookahead_entry *vp10_lookahead_pop(struct lookahead_ctx *ctx,
+                                           int drain);
+
+/**\brief Get a future source buffer to encode
+ *
+ * \param[in] ctx       Pointer to the lookahead context
+ * \param[in] index     Index of the frame to be returned, 0 == next frame
+ *
+ * \retval NULL, if no buffer exists at the specified index
+ */
+struct lookahead_entry *vp10_lookahead_peek(struct lookahead_ctx *ctx,
+                                            int index);
+
+/**\brief Get the number of frames currently in the lookahead queue
+ *
+ * \param[in] ctx       Pointer to the lookahead context
+ */
+unsigned int vp10_lookahead_depth(struct lookahead_ctx *ctx);
+
+#ifdef __cplusplus
+}  // extern "C"
+#endif
+
+#endif  // VP10_ENCODER_LOOKAHEAD_H_
diff --git a/av1/encoder/mbgraph.c b/av1/encoder/mbgraph.c
new file mode 100644
index 0000000..43f0f87
--- /dev/null
+++ b/av1/encoder/mbgraph.c
@@ -0,0 +1,404 @@
+/*
+ *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <limits.h>
+
+#include "./vp10_rtcd.h"
+#include "./vpx_dsp_rtcd.h"
+
+#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_mem/vpx_mem.h"
+#include "aom_ports/system_state.h"
+#include "av1/encoder/segmentation.h"
+#include "av1/encoder/mcomp.h"
+#include "av1/common/blockd.h"
+#include "av1/common/reconinter.h"
+#include "av1/common/reconintra.h"
+
+static unsigned int do_16x16_motion_iteration(VP10_COMP *cpi, const MV *ref_mv,
+                                              int mb_row, int mb_col) {
+  MACROBLOCK *const x = &cpi->td.mb;
+  MACROBLOCKD *const xd = &x->e_mbd;
+  const MV_SPEED_FEATURES *const mv_sf = &cpi->sf.mv;
+  const vpx_variance_fn_ptr_t v_fn_ptr = cpi->fn_ptr[BLOCK_16X16];
+
+  const int tmp_col_min = x->mv_col_min;
+  const int tmp_col_max = x->mv_col_max;
+  const int tmp_row_min = x->mv_row_min;
+  const int tmp_row_max = x->mv_row_max;
+  MV ref_full;
+  int cost_list[5];
+
+  // Further step/diamond searches as necessary
+  int step_param = mv_sf->reduce_first_step_size;
+  step_param = VPXMIN(step_param, MAX_MVSEARCH_STEPS - 2);
+
+  vp10_set_mv_search_range(x, ref_mv);
+
+  ref_full.col = ref_mv->col >> 3;
+  ref_full.row = ref_mv->row >> 3;
+
+  /*cpi->sf.search_method == HEX*/
+  vp10_hex_search(x, &ref_full, step_param, x->errorperbit, 0,
+                  cond_cost_list(cpi, cost_list), &v_fn_ptr, 0, ref_mv);
+
+  // Try sub-pixel MC
+  // if (bestsme > error_thresh && bestsme < INT_MAX)
+  {
+    int distortion;
+    unsigned int sse;
+    cpi->find_fractional_mv_step(x, ref_mv, cpi->common.allow_high_precision_mv,
+                                 x->errorperbit, &v_fn_ptr, 0,
+                                 mv_sf->subpel_iters_per_step,
+                                 cond_cost_list(cpi, cost_list), NULL, NULL,
+                                 &distortion, &sse, NULL, 0, 0, 0);
+  }
+
+#if CONFIG_EXT_INTER
+  if (has_second_ref(&xd->mi[0]->mbmi))
+    xd->mi[0]->mbmi.mode = NEW_NEWMV;
+  else
+#endif  // CONFIG_EXT_INTER
+    xd->mi[0]->mbmi.mode = NEWMV;
+
+  xd->mi[0]->mbmi.mv[0] = x->best_mv;
+#if CONFIG_EXT_INTER
+  xd->mi[0]->mbmi.ref_frame[1] = NONE;
+#endif  // CONFIG_EXT_INTER
+
+  vp10_build_inter_predictors_sby(xd, mb_row, mb_col, BLOCK_16X16);
+
+  /* restore UMV window */
+  x->mv_col_min = tmp_col_min;
+  x->mv_col_max = tmp_col_max;
+  x->mv_row_min = tmp_row_min;
+  x->mv_row_max = tmp_row_max;
+
+  return vpx_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride,
+                      xd->plane[0].dst.buf, xd->plane[0].dst.stride);
+}
+
+static int do_16x16_motion_search(VP10_COMP *cpi, const MV *ref_mv, int mb_row,
+                                  int mb_col) {
+  MACROBLOCK *const x = &cpi->td.mb;
+  MACROBLOCKD *const xd = &x->e_mbd;
+  unsigned int err, tmp_err;
+  MV best_mv;
+
+  // Try zero MV first
+  // FIXME should really use something like near/nearest MV and/or MV prediction
+  err = vpx_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride,
+                     xd->plane[0].pre[0].buf, xd->plane[0].pre[0].stride);
+  best_mv.col = best_mv.row = 0;
+
+  // Test last reference frame using the previous best mv as the
+  // starting point (best reference) for the search
+  tmp_err = do_16x16_motion_iteration(cpi, ref_mv, mb_row, mb_col);
+  if (tmp_err < err) {
+    err = tmp_err;
+    best_mv = x->best_mv.as_mv;
+  }
+
+  // If the current best reference mv is not centered on 0,0 then do a 0,0
+  // based search as well.
+  if (ref_mv->row != 0 || ref_mv->col != 0) {
+    unsigned int tmp_err;
+    MV zero_ref_mv = { 0, 0 };
+
+    tmp_err = do_16x16_motion_iteration(cpi, &zero_ref_mv, mb_row, mb_col);
+    if (tmp_err < err) {
+      err = tmp_err;
+      best_mv = x->best_mv.as_mv;
+    }
+  }
+
+  x->best_mv.as_mv = best_mv;
+  return err;
+}
+
+static int do_16x16_zerozero_search(VP10_COMP *cpi, int_mv *dst_mv) {
+  MACROBLOCK *const x = &cpi->td.mb;
+  MACROBLOCKD *const xd = &x->e_mbd;
+  unsigned int err;
+
+  // Try zero MV first
+  // FIXME should really use something like near/nearest MV and/or MV prediction
+  err = vpx_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride,
+                     xd->plane[0].pre[0].buf, xd->plane[0].pre[0].stride);
+
+  dst_mv->as_int = 0;
+
+  return err;
+}
+static int find_best_16x16_intra(VP10_COMP *cpi, PREDICTION_MODE *pbest_mode) {
+  MACROBLOCK *const x = &cpi->td.mb;
+  MACROBLOCKD *const xd = &x->e_mbd;
+  PREDICTION_MODE best_mode = -1, mode;
+  unsigned int best_err = INT_MAX;
+
+  // calculate SATD for each intra prediction mode;
+  // we're intentionally not doing 4x4, we just want a rough estimate
+  for (mode = DC_PRED; mode <= TM_PRED; mode++) {
+    unsigned int err;
+
+    xd->mi[0]->mbmi.mode = mode;
+    vp10_predict_intra_block(xd, 2, 2, TX_16X16, mode, x->plane[0].src.buf,
+                             x->plane[0].src.stride, xd->plane[0].dst.buf,
+                             xd->plane[0].dst.stride, 0, 0, 0);
+    err = vpx_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride,
+                       xd->plane[0].dst.buf, xd->plane[0].dst.stride);
+
+    // find best
+    if (err < best_err) {
+      best_err = err;
+      best_mode = mode;
+    }
+  }
+
+  if (pbest_mode) *pbest_mode = best_mode;
+
+  return best_err;
+}
+
+static void update_mbgraph_mb_stats(VP10_COMP *cpi, MBGRAPH_MB_STATS *stats,
+                                    YV12_BUFFER_CONFIG *buf, int mb_y_offset,
+                                    YV12_BUFFER_CONFIG *golden_ref,
+                                    const MV *prev_golden_ref_mv,
+                                    YV12_BUFFER_CONFIG *alt_ref, int mb_row,
+                                    int mb_col) {
+  MACROBLOCK *const x = &cpi->td.mb;
+  MACROBLOCKD *const xd = &x->e_mbd;
+  int intra_error;
+  VP10_COMMON *cm = &cpi->common;
+
+  // FIXME in practice we're completely ignoring chroma here
+  x->plane[0].src.buf = buf->y_buffer + mb_y_offset;
+  x->plane[0].src.stride = buf->y_stride;
+
+  xd->plane[0].dst.buf = get_frame_new_buffer(cm)->y_buffer + mb_y_offset;
+  xd->plane[0].dst.stride = get_frame_new_buffer(cm)->y_stride;
+
+  // do intra 16x16 prediction
+  intra_error = find_best_16x16_intra(cpi, &stats->ref[INTRA_FRAME].m.mode);
+  if (intra_error <= 0) intra_error = 1;
+  stats->ref[INTRA_FRAME].err = intra_error;
+
+  // Golden frame MV search, if it exists and is different than last frame
+  if (golden_ref) {
+    int g_motion_error;
+    xd->plane[0].pre[0].buf = golden_ref->y_buffer + mb_y_offset;
+    xd->plane[0].pre[0].stride = golden_ref->y_stride;
+    g_motion_error =
+        do_16x16_motion_search(cpi, prev_golden_ref_mv, mb_row, mb_col);
+    stats->ref[GOLDEN_FRAME].m.mv = x->best_mv;
+    stats->ref[GOLDEN_FRAME].err = g_motion_error;
+  } else {
+    stats->ref[GOLDEN_FRAME].err = INT_MAX;
+    stats->ref[GOLDEN_FRAME].m.mv.as_int = 0;
+  }
+
+  // Do an Alt-ref frame MV search, if it exists and is different than
+  // last/golden frame.
+  if (alt_ref) {
+    int a_motion_error;
+    xd->plane[0].pre[0].buf = alt_ref->y_buffer + mb_y_offset;
+    xd->plane[0].pre[0].stride = alt_ref->y_stride;
+    a_motion_error =
+        do_16x16_zerozero_search(cpi, &stats->ref[ALTREF_FRAME].m.mv);
+
+    stats->ref[ALTREF_FRAME].err = a_motion_error;
+  } else {
+    stats->ref[ALTREF_FRAME].err = INT_MAX;
+    stats->ref[ALTREF_FRAME].m.mv.as_int = 0;
+  }
+}
+
+static void update_mbgraph_frame_stats(VP10_COMP *cpi,
+                                       MBGRAPH_FRAME_STATS *stats,
+                                       YV12_BUFFER_CONFIG *buf,
+                                       YV12_BUFFER_CONFIG *golden_ref,
+                                       YV12_BUFFER_CONFIG *alt_ref) {
+  MACROBLOCK *const x = &cpi->td.mb;
+  MACROBLOCKD *const xd = &x->e_mbd;
+  VP10_COMMON *const cm = &cpi->common;
+
+  int mb_col, mb_row, offset = 0;
+  int mb_y_offset = 0, arf_y_offset = 0, gld_y_offset = 0;
+  MV gld_top_mv = { 0, 0 };
+  MODE_INFO mi_local;
+
+  vp10_zero(mi_local);
+  // Set up limit values for motion vectors to prevent them extending outside
+  // the UMV borders.
+  x->mv_row_min = -BORDER_MV_PIXELS_B16;
+  x->mv_row_max = (cm->mb_rows - 1) * 8 + BORDER_MV_PIXELS_B16;
+  xd->up_available = 0;
+  xd->plane[0].dst.stride = buf->y_stride;
+  xd->plane[0].pre[0].stride = buf->y_stride;
+  xd->plane[1].dst.stride = buf->uv_stride;
+  xd->mi[0] = &mi_local;
+  mi_local.mbmi.sb_type = BLOCK_16X16;
+  mi_local.mbmi.ref_frame[0] = LAST_FRAME;
+  mi_local.mbmi.ref_frame[1] = NONE;
+
+  for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) {
+    MV gld_left_mv = gld_top_mv;
+    int mb_y_in_offset = mb_y_offset;
+    int arf_y_in_offset = arf_y_offset;
+    int gld_y_in_offset = gld_y_offset;
+
+    // Set up limit values for motion vectors to prevent them extending outside
+    // the UMV borders.
+    x->mv_col_min = -BORDER_MV_PIXELS_B16;
+    x->mv_col_max = (cm->mb_cols - 1) * 8 + BORDER_MV_PIXELS_B16;
+    xd->left_available = 0;
+
+    for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
+      MBGRAPH_MB_STATS *mb_stats = &stats->mb_stats[offset + mb_col];
+
+      update_mbgraph_mb_stats(cpi, mb_stats, buf, mb_y_in_offset, golden_ref,
+                              &gld_left_mv, alt_ref, mb_row, mb_col);
+      gld_left_mv = mb_stats->ref[GOLDEN_FRAME].m.mv.as_mv;
+      if (mb_col == 0) {
+        gld_top_mv = gld_left_mv;
+      }
+      xd->left_available = 1;
+      mb_y_in_offset += 16;
+      gld_y_in_offset += 16;
+      arf_y_in_offset += 16;
+      x->mv_col_min -= 16;
+      x->mv_col_max -= 16;
+    }
+    xd->up_available = 1;
+    mb_y_offset += buf->y_stride * 16;
+    gld_y_offset += golden_ref->y_stride * 16;
+    if (alt_ref) arf_y_offset += alt_ref->y_stride * 16;
+    x->mv_row_min -= 16;
+    x->mv_row_max -= 16;
+    offset += cm->mb_cols;
+  }
+}
+
+// void separate_arf_mbs_byzz
+static void separate_arf_mbs(VP10_COMP *cpi) {
+  VP10_COMMON *const cm = &cpi->common;
+  int mb_col, mb_row, offset, i;
+  int mi_row, mi_col;
+  int ncnt[4] = { 0 };
+  int n_frames = cpi->mbgraph_n_frames;
+
+  int *arf_not_zz;
+
+  CHECK_MEM_ERROR(
+      cm, arf_not_zz,
+      vpx_calloc(cm->mb_rows * cm->mb_cols * sizeof(*arf_not_zz), 1));
+
+  // We are not interested in results beyond the alt ref itself.
+  if (n_frames > cpi->rc.frames_till_gf_update_due)
+    n_frames = cpi->rc.frames_till_gf_update_due;
+
+  // defer cost to reference frames
+  for (i = n_frames - 1; i >= 0; i--) {
+    MBGRAPH_FRAME_STATS *frame_stats = &cpi->mbgraph_stats[i];
+
+    for (offset = 0, mb_row = 0; mb_row < cm->mb_rows;
+         offset += cm->mb_cols, mb_row++) {
+      for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
+        MBGRAPH_MB_STATS *mb_stats = &frame_stats->mb_stats[offset + mb_col];
+
+        int altref_err = mb_stats->ref[ALTREF_FRAME].err;
+        int intra_err = mb_stats->ref[INTRA_FRAME].err;
+        int golden_err = mb_stats->ref[GOLDEN_FRAME].err;
+
+        // Test for altref vs intra and gf and that its mv was 0,0.
+        if (altref_err > 1000 || altref_err > intra_err ||
+            altref_err > golden_err) {
+          arf_not_zz[offset + mb_col]++;
+        }
+      }
+    }
+  }
+
+  // arf_not_zz is indexed by MB, but this loop is indexed by MI to avoid out
+  // of bound access in segmentation_map
+  for (mi_row = 0; mi_row < cm->mi_rows; mi_row++) {
+    for (mi_col = 0; mi_col < cm->mi_cols; mi_col++) {
+      // If any of the blocks in the sequence failed then the MB
+      // goes in segment 0
+      if (arf_not_zz[mi_row / 2 * cm->mb_cols + mi_col / 2]) {
+        ncnt[0]++;
+        cpi->segmentation_map[mi_row * cm->mi_cols + mi_col] = 0;
+      } else {
+        cpi->segmentation_map[mi_row * cm->mi_cols + mi_col] = 1;
+        ncnt[1]++;
+      }
+    }
+  }
+
+  // Only bother with segmentation if over 10% of the MBs in static segment
+  // if ( ncnt[1] && (ncnt[0] / ncnt[1] < 10) )
+  if (1) {
+    // Note % of blocks that are marked as static
+    if (cm->MBs)
+      cpi->static_mb_pct = (ncnt[1] * 100) / (cm->mi_rows * cm->mi_cols);
+
+    // This error case should not be reachable as this function should
+    // never be called with the common data structure uninitialized.
+    else
+      cpi->static_mb_pct = 0;
+
+    vp10_enable_segmentation(&cm->seg);
+  } else {
+    cpi->static_mb_pct = 0;
+    vp10_disable_segmentation(&cm->seg);
+  }
+
+  // Free localy allocated storage
+  vpx_free(arf_not_zz);
+}
+
+void vp10_update_mbgraph_stats(VP10_COMP *cpi) {
+  VP10_COMMON *const cm = &cpi->common;
+  int i, n_frames = vp10_lookahead_depth(cpi->lookahead);
+  YV12_BUFFER_CONFIG *golden_ref = get_ref_frame_buffer(cpi, GOLDEN_FRAME);
+
+  assert(golden_ref != NULL);
+
+  // we need to look ahead beyond where the ARF transitions into
+  // being a GF - so exit if we don't look ahead beyond that
+  if (n_frames <= cpi->rc.frames_till_gf_update_due) return;
+
+  if (n_frames > MAX_LAG_BUFFERS) n_frames = MAX_LAG_BUFFERS;
+
+  cpi->mbgraph_n_frames = n_frames;
+  for (i = 0; i < n_frames; i++) {
+    MBGRAPH_FRAME_STATS *frame_stats = &cpi->mbgraph_stats[i];
+    memset(frame_stats->mb_stats, 0,
+           cm->mb_rows * cm->mb_cols * sizeof(*cpi->mbgraph_stats[i].mb_stats));
+  }
+
+  // do motion search to find contribution of each reference to data
+  // later on in this GF group
+  // FIXME really, the GF/last MC search should be done forward, and
+  // the ARF MC search backwards, to get optimal results for MV caching
+  for (i = 0; i < n_frames; i++) {
+    MBGRAPH_FRAME_STATS *frame_stats = &cpi->mbgraph_stats[i];
+    struct lookahead_entry *q_cur = vp10_lookahead_peek(cpi->lookahead, i);
+
+    assert(q_cur != NULL);
+
+    update_mbgraph_frame_stats(cpi, frame_stats, &q_cur->img, golden_ref,
+                               cpi->Source);
+  }
+
+  vpx_clear_system_state();
+
+  separate_arf_mbs(cpi);
+}
diff --git a/av1/encoder/mbgraph.h b/av1/encoder/mbgraph.h
new file mode 100644
index 0000000..0b056af
--- /dev/null
+++ b/av1/encoder/mbgraph.h
@@ -0,0 +1,38 @@
+/*
+ *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP10_ENCODER_MBGRAPH_H_
+#define VP10_ENCODER_MBGRAPH_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct {
+  struct {
+    int err;
+    union {
+      int_mv mv;
+      PREDICTION_MODE mode;
+    } m;
+  } ref[TOTAL_REFS_PER_FRAME];
+} MBGRAPH_MB_STATS;
+
+typedef struct { MBGRAPH_MB_STATS *mb_stats; } MBGRAPH_FRAME_STATS;
+
+struct VP10_COMP;
+
+void vp10_update_mbgraph_stats(struct VP10_COMP *cpi);
+
+#ifdef __cplusplus
+}  // extern "C"
+#endif
+
+#endif  // VP10_ENCODER_MBGRAPH_H_
diff --git a/av1/encoder/mcomp.c b/av1/encoder/mcomp.c
new file mode 100644
index 0000000..9ee06e9
--- /dev/null
+++ b/av1/encoder/mcomp.c
@@ -0,0 +1,3417 @@
+/*
+ *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <limits.h>
+#include <math.h>
+#include <stdio.h>
+
+#include "./vpx_config.h"
+#include "./vpx_dsp_rtcd.h"
+
+#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_mem/vpx_mem.h"
+#include "aom_ports/mem.h"
+
+#include "av1/common/common.h"
+#include "av1/common/reconinter.h"
+
+#include "av1/encoder/encoder.h"
+#include "av1/encoder/mcomp.h"
+#include "av1/encoder/rdopt.h"
+
+// #define NEW_DIAMOND_SEARCH
+
+static INLINE const uint8_t *get_buf_from_mv(const struct buf_2d *buf,
+                                             const MV *mv) {
+  return &buf->buf[mv->row * buf->stride + mv->col];
+}
+
+void vp10_set_mv_search_range(MACROBLOCK *x, const MV *mv) {
+  int col_min = (mv->col >> 3) - MAX_FULL_PEL_VAL + (mv->col & 7 ? 1 : 0);
+  int row_min = (mv->row >> 3) - MAX_FULL_PEL_VAL + (mv->row & 7 ? 1 : 0);
+  int col_max = (mv->col >> 3) + MAX_FULL_PEL_VAL;
+  int row_max = (mv->row >> 3) + MAX_FULL_PEL_VAL;
+
+  col_min = VPXMAX(col_min, (MV_LOW >> 3) + 1);
+  row_min = VPXMAX(row_min, (MV_LOW >> 3) + 1);
+  col_max = VPXMIN(col_max, (MV_UPP >> 3) - 1);
+  row_max = VPXMIN(row_max, (MV_UPP >> 3) - 1);
+
+  // Get intersection of UMV window and valid MV window to reduce # of checks
+  // in diamond search.
+  if (x->mv_col_min < col_min) x->mv_col_min = col_min;
+  if (x->mv_col_max > col_max) x->mv_col_max = col_max;
+  if (x->mv_row_min < row_min) x->mv_row_min = row_min;
+  if (x->mv_row_max > row_max) x->mv_row_max = row_max;
+}
+
+int vp10_init_search_range(int size) {
+  int sr = 0;
+  // Minimum search size no matter what the passed in value.
+  size = VPXMAX(16, size);
+
+  while ((size << sr) < MAX_FULL_PEL_VAL) sr++;
+
+  sr = VPXMIN(sr, MAX_MVSEARCH_STEPS - 2);
+  return sr;
+}
+
+static INLINE int mv_cost(const MV *mv, const int *joint_cost,
+                          int *const comp_cost[2]) {
+  return joint_cost[vp10_get_mv_joint(mv)] + comp_cost[0][mv->row] +
+         comp_cost[1][mv->col];
+}
+
+int vp10_mv_bit_cost(const MV *mv, const MV *ref, const int *mvjcost,
+                     int *mvcost[2], int weight) {
+  const MV diff = { mv->row - ref->row, mv->col - ref->col };
+  return ROUND_POWER_OF_TWO(mv_cost(&diff, mvjcost, mvcost) * weight, 7);
+}
+
+#define PIXEL_TRANSFORM_ERROR_SCALE 4
+static int mv_err_cost(const MV *mv, const MV *ref, const int *mvjcost,
+                       int *mvcost[2], int error_per_bit) {
+  if (mvcost) {
+    const MV diff = { mv->row - ref->row, mv->col - ref->col };
+    // This product sits at a 32-bit ceiling right now and any additional
+    // accuracy in either bit cost or error cost will cause it to overflow.
+    return ROUND_POWER_OF_TWO(
+        (unsigned)mv_cost(&diff, mvjcost, mvcost) * error_per_bit,
+        RDDIV_BITS + VP10_PROB_COST_SHIFT - RD_EPB_SHIFT +
+            PIXEL_TRANSFORM_ERROR_SCALE);
+  }
+  return 0;
+}
+
+static int mvsad_err_cost(const MACROBLOCK *x, const MV *mv, const MV *ref,
+                          int sad_per_bit) {
+  const MV diff = { (mv->row - ref->row) * 8, (mv->col - ref->col) * 8 };
+  return ROUND_POWER_OF_TWO(
+      (unsigned)mv_cost(&diff, x->nmvjointsadcost, x->mvsadcost) * sad_per_bit,
+      VP10_PROB_COST_SHIFT);
+}
+
+void vp10_init_dsmotion_compensation(search_site_config *cfg, int stride) {
+  int len, ss_count = 1;
+
+  cfg->ss[0].mv.col = cfg->ss[0].mv.row = 0;
+  cfg->ss[0].offset = 0;
+
+  for (len = MAX_FIRST_STEP; len > 0; len /= 2) {
+    // Generate offsets for 4 search sites per step.
+    const MV ss_mvs[] = { { -len, 0 }, { len, 0 }, { 0, -len }, { 0, len } };
+    int i;
+    for (i = 0; i < 4; ++i) {
+      search_site *const ss = &cfg->ss[ss_count++];
+      ss->mv = ss_mvs[i];
+      ss->offset = ss->mv.row * stride + ss->mv.col;
+    }
+  }
+
+  cfg->ss_count = ss_count;
+  cfg->searches_per_step = 4;
+}
+
+void vp10_init3smotion_compensation(search_site_config *cfg, int stride) {
+  int len, ss_count = 1;
+
+  cfg->ss[0].mv.col = cfg->ss[0].mv.row = 0;
+  cfg->ss[0].offset = 0;
+
+  for (len = MAX_FIRST_STEP; len > 0; len /= 2) {
+    // Generate offsets for 8 search sites per step.
+    const MV ss_mvs[8] = { { -len, 0 },   { len, 0 },     { 0, -len },
+                           { 0, len },    { -len, -len }, { -len, len },
+                           { len, -len }, { len, len } };
+    int i;
+    for (i = 0; i < 8; ++i) {
+      search_site *const ss = &cfg->ss[ss_count++];
+      ss->mv = ss_mvs[i];
+      ss->offset = ss->mv.row * stride + ss->mv.col;
+    }
+  }
+
+  cfg->ss_count = ss_count;
+  cfg->searches_per_step = 8;
+}
+
+/*
+ * To avoid the penalty for crossing cache-line read, preload the reference
+ * area in a small buffer, which is aligned to make sure there won't be crossing
+ * cache-line read while reading from this buffer. This reduced the cpu
+ * cycles spent on reading ref data in sub-pixel filter functions.
+ * TODO: Currently, since sub-pixel search range here is -3 ~ 3, copy 22 rows x
+ * 32 cols area that is enough for 16x16 macroblock. Later, for SPLITMV, we
+ * could reduce the area.
+ */
+
+// convert motion vector component to offset for sv[a]f calc
+static INLINE int sp(int x) { return x & 7; }
+
+static INLINE const uint8_t *pre(const uint8_t *buf, int stride, int r, int c) {
+  return &buf[(r >> 3) * stride + (c >> 3)];
+}
+
+/* checks if (r, c) has better score than previous best */
+#define CHECK_BETTER(v, r, c)                                             \
+  if (c >= minc && c <= maxc && r >= minr && r <= maxr) {                 \
+    MV this_mv = { r, c };                                                \
+    v = mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit);    \
+    if (second_pred == NULL)                                              \
+      thismse = vfp->svf(pre(y, y_stride, r, c), y_stride, sp(c), sp(r),  \
+                         src_address, src_stride, &sse);                  \
+    else                                                                  \
+      thismse = vfp->svaf(pre(y, y_stride, r, c), y_stride, sp(c), sp(r), \
+                          src_address, src_stride, &sse, second_pred);    \
+    v += thismse;                                                         \
+    if (v < besterr) {                                                    \
+      besterr = v;                                                        \
+      br = r;                                                             \
+      bc = c;                                                             \
+      *distortion = thismse;                                              \
+      *sse1 = sse;                                                        \
+    }                                                                     \
+  } else {                                                                \
+    v = INT_MAX;                                                          \
+  }
+
+#define CHECK_BETTER0(v, r, c) CHECK_BETTER(v, r, c)
+
+static INLINE const uint8_t *upre(const uint8_t *buf, int stride, int r,
+                                  int c) {
+  return &buf[(r)*stride + (c)];
+}
+
+/* checks if (r, c) has better score than previous best */
+#define CHECK_BETTER1(v, r, c)                                         \
+  if (c >= minc && c <= maxc && r >= minr && r <= maxr) {              \
+    MV this_mv = { r, c };                                             \
+    thismse = upsampled_pref_error(xd, vfp, src_address, src_stride,   \
+                                   upre(y, y_stride, r, c), y_stride,  \
+                                   second_pred, w, h, &sse);           \
+    v = mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit); \
+    v += thismse;                                                      \
+    if (v < besterr) {                                                 \
+      besterr = v;                                                     \
+      br = r;                                                          \
+      bc = c;                                                          \
+      *distortion = thismse;                                           \
+      *sse1 = sse;                                                     \
+    }                                                                  \
+  } else {                                                             \
+    v = INT_MAX;                                                       \
+  }
+
+#define FIRST_LEVEL_CHECKS                                       \
+  {                                                              \
+    unsigned int left, right, up, down, diag;                    \
+    CHECK_BETTER(left, tr, tc - hstep);                          \
+    CHECK_BETTER(right, tr, tc + hstep);                         \
+    CHECK_BETTER(up, tr - hstep, tc);                            \
+    CHECK_BETTER(down, tr + hstep, tc);                          \
+    whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);     \
+    switch (whichdir) {                                          \
+      case 0: CHECK_BETTER(diag, tr - hstep, tc - hstep); break; \
+      case 1: CHECK_BETTER(diag, tr - hstep, tc + hstep); break; \
+      case 2: CHECK_BETTER(diag, tr + hstep, tc - hstep); break; \
+      case 3: CHECK_BETTER(diag, tr + hstep, tc + hstep); break; \
+    }                                                            \
+  }
+
+#define SECOND_LEVEL_CHECKS                                       \
+  {                                                               \
+    int kr, kc;                                                   \
+    unsigned int second;                                          \
+    if (tr != br && tc != bc) {                                   \
+      kr = br - tr;                                               \
+      kc = bc - tc;                                               \
+      CHECK_BETTER(second, tr + kr, tc + 2 * kc);                 \
+      CHECK_BETTER(second, tr + 2 * kr, tc + kc);                 \
+    } else if (tr == br && tc != bc) {                            \
+      kc = bc - tc;                                               \
+      CHECK_BETTER(second, tr + hstep, tc + 2 * kc);              \
+      CHECK_BETTER(second, tr - hstep, tc + 2 * kc);              \
+      switch (whichdir) {                                         \
+        case 0:                                                   \
+        case 1: CHECK_BETTER(second, tr + hstep, tc + kc); break; \
+        case 2:                                                   \
+        case 3: CHECK_BETTER(second, tr - hstep, tc + kc); break; \
+      }                                                           \
+    } else if (tr != br && tc == bc) {                            \
+      kr = br - tr;                                               \
+      CHECK_BETTER(second, tr + 2 * kr, tc + hstep);              \
+      CHECK_BETTER(second, tr + 2 * kr, tc - hstep);              \
+      switch (whichdir) {                                         \
+        case 0:                                                   \
+        case 2: CHECK_BETTER(second, tr + kr, tc + hstep); break; \
+        case 1:                                                   \
+        case 3: CHECK_BETTER(second, tr + kr, tc - hstep); break; \
+      }                                                           \
+    }                                                             \
+  }
+
+// TODO(yunqingwang): SECOND_LEVEL_CHECKS_BEST was a rewrote of
+// SECOND_LEVEL_CHECKS, and SECOND_LEVEL_CHECKS should be rewritten
+// later in the same way.
+#define SECOND_LEVEL_CHECKS_BEST(k)                \
+  {                                                \
+    unsigned int second;                           \
+    int br0 = br;                                  \
+    int bc0 = bc;                                  \
+    assert(tr == br || tc == bc);                  \
+    if (tr == br && tc != bc) {                    \
+      kc = bc - tc;                                \
+    } else if (tr != br && tc == bc) {             \
+      kr = br - tr;                                \
+    }                                              \
+    CHECK_BETTER##k(second, br0 + kr, bc0);        \
+    CHECK_BETTER##k(second, br0, bc0 + kc);        \
+    if (br0 != br || bc0 != bc) {                  \
+      CHECK_BETTER##k(second, br0 + kr, bc0 + kc); \
+    }                                              \
+  }
+
+#define SETUP_SUBPEL_SEARCH                                         \
+  const uint8_t *const src_address = x->plane[0].src.buf;           \
+  const int src_stride = x->plane[0].src.stride;                    \
+  const MACROBLOCKD *xd = &x->e_mbd;                                \
+  unsigned int besterr = INT_MAX;                                   \
+  unsigned int sse;                                                 \
+  unsigned int whichdir;                                            \
+  int thismse;                                                      \
+  MV *bestmv = &x->best_mv.as_mv;                                   \
+  const unsigned int halfiters = iters_per_step;                    \
+  const unsigned int quarteriters = iters_per_step;                 \
+  const unsigned int eighthiters = iters_per_step;                  \
+  const int y_stride = xd->plane[0].pre[0].stride;                  \
+  const int offset = bestmv->row * y_stride + bestmv->col;          \
+  const uint8_t *const y = xd->plane[0].pre[0].buf;                 \
+                                                                    \
+  int br = bestmv->row * 8;                                         \
+  int bc = bestmv->col * 8;                                         \
+  int hstep = 4;                                                    \
+  const int minc = VPXMAX(x->mv_col_min * 8, ref_mv->col - MV_MAX); \
+  const int maxc = VPXMIN(x->mv_col_max * 8, ref_mv->col + MV_MAX); \
+  const int minr = VPXMAX(x->mv_row_min * 8, ref_mv->row - MV_MAX); \
+  const int maxr = VPXMIN(x->mv_row_max * 8, ref_mv->row + MV_MAX); \
+  int tr = br;                                                      \
+  int tc = bc;                                                      \
+                                                                    \
+  bestmv->row *= 8;                                                 \
+  bestmv->col *= 8;
+
+static unsigned int setup_center_error(
+    const MACROBLOCKD *xd, const MV *bestmv, const MV *ref_mv,
+    int error_per_bit, const vpx_variance_fn_ptr_t *vfp,
+    const uint8_t *const src, const int src_stride, const uint8_t *const y,
+    int y_stride, const uint8_t *second_pred, int w, int h, int offset,
+    int *mvjcost, int *mvcost[2], unsigned int *sse1, int *distortion) {
+  unsigned int besterr;
+#if CONFIG_VP9_HIGHBITDEPTH
+  if (second_pred != NULL) {
+    if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+      DECLARE_ALIGNED(16, uint16_t, comp_pred16[MAX_SB_SQUARE]);
+      vpx_highbd_comp_avg_pred(comp_pred16, second_pred, w, h, y + offset,
+                               y_stride);
+      besterr =
+          vfp->vf(CONVERT_TO_BYTEPTR(comp_pred16), w, src, src_stride, sse1);
+    } else {
+      DECLARE_ALIGNED(16, uint8_t, comp_pred[MAX_SB_SQUARE]);
+      vpx_comp_avg_pred(comp_pred, second_pred, w, h, y + offset, y_stride);
+      besterr = vfp->vf(comp_pred, w, src, src_stride, sse1);
+    }
+  } else {
+    besterr = vfp->vf(y + offset, y_stride, src, src_stride, sse1);
+  }
+  *distortion = besterr;
+  besterr += mv_err_cost(bestmv, ref_mv, mvjcost, mvcost, error_per_bit);
+#else
+  (void)xd;
+  if (second_pred != NULL) {
+    DECLARE_ALIGNED(16, uint8_t, comp_pred[MAX_SB_SQUARE]);
+    vpx_comp_avg_pred(comp_pred, second_pred, w, h, y + offset, y_stride);
+    besterr = vfp->vf(comp_pred, w, src, src_stride, sse1);
+  } else {
+    besterr = vfp->vf(y + offset, y_stride, src, src_stride, sse1);
+  }
+  *distortion = besterr;
+  besterr += mv_err_cost(bestmv, ref_mv, mvjcost, mvcost, error_per_bit);
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+  return besterr;
+}
+
+static INLINE int divide_and_round(const int n, const int d) {
+  return ((n < 0) ^ (d < 0)) ? ((n - d / 2) / d) : ((n + d / 2) / d);
+}
+
+static INLINE int is_cost_list_wellbehaved(int *cost_list) {
+  return cost_list[0] < cost_list[1] && cost_list[0] < cost_list[2] &&
+         cost_list[0] < cost_list[3] && cost_list[0] < cost_list[4];
+}
+
+// Returns surface minima estimate at given precision in 1/2^n bits.
+// Assume a model for the cost surface: S = A(x - x0)^2 + B(y - y0)^2 + C
+// For a given set of costs S0, S1, S2, S3, S4 at points
+// (y, x) = (0, 0), (0, -1), (1, 0), (0, 1) and (-1, 0) respectively,
+// the solution for the location of the minima (x0, y0) is given by:
+// x0 = 1/2 (S1 - S3)/(S1 + S3 - 2*S0),
+// y0 = 1/2 (S4 - S2)/(S4 + S2 - 2*S0).
+// The code below is an integerized version of that.
+static void get_cost_surf_min(int *cost_list, int *ir, int *ic, int bits) {
+  *ic = divide_and_round((cost_list[1] - cost_list[3]) * (1 << (bits - 1)),
+                         (cost_list[1] - 2 * cost_list[0] + cost_list[3]));
+  *ir = divide_and_round((cost_list[4] - cost_list[2]) * (1 << (bits - 1)),
+                         (cost_list[4] - 2 * cost_list[0] + cost_list[2]));
+}
+
+int vp10_find_best_sub_pixel_tree_pruned_evenmore(
+    MACROBLOCK *x, const MV *ref_mv, int allow_hp, int error_per_bit,
+    const vpx_variance_fn_ptr_t *vfp, int forced_stop, int iters_per_step,
+    int *cost_list, int *mvjcost, int *mvcost[2], int *distortion,
+    unsigned int *sse1, const uint8_t *second_pred, int w, int h,
+    int use_upsampled_ref) {
+  SETUP_SUBPEL_SEARCH;
+  besterr = setup_center_error(
+      xd, bestmv, ref_mv, error_per_bit, vfp, src_address, src_stride, y,
+      y_stride, second_pred, w, h, offset, mvjcost, mvcost, sse1, distortion);
+  (void)halfiters;
+  (void)quarteriters;
+  (void)eighthiters;
+  (void)whichdir;
+  (void)allow_hp;
+  (void)forced_stop;
+  (void)hstep;
+  (void)use_upsampled_ref;
+
+  if (cost_list && cost_list[0] != INT_MAX && cost_list[1] != INT_MAX &&
+      cost_list[2] != INT_MAX && cost_list[3] != INT_MAX &&
+      cost_list[4] != INT_MAX && is_cost_list_wellbehaved(cost_list)) {
+    int ir, ic;
+    unsigned int minpt;
+    get_cost_surf_min(cost_list, &ir, &ic, 2);
+    if (ir != 0 || ic != 0) {
+      CHECK_BETTER(minpt, tr + 2 * ir, tc + 2 * ic);
+    }
+  } else {
+    FIRST_LEVEL_CHECKS;
+    if (halfiters > 1) {
+      SECOND_LEVEL_CHECKS;
+    }
+
+    tr = br;
+    tc = bc;
+
+    // Each subsequent iteration checks at least one point in common with
+    // the last iteration could be 2 ( if diag selected) 1/4 pel
+    // Note forced_stop: 0 - full, 1 - qtr only, 2 - half only
+    if (forced_stop != 2) {
+      hstep >>= 1;
+      FIRST_LEVEL_CHECKS;
+      if (quarteriters > 1) {
+        SECOND_LEVEL_CHECKS;
+      }
+    }
+  }
+
+  tr = br;
+  tc = bc;
+
+  if (allow_hp && vp10_use_mv_hp(ref_mv) && forced_stop == 0) {
+    hstep >>= 1;
+    FIRST_LEVEL_CHECKS;
+    if (eighthiters > 1) {
+      SECOND_LEVEL_CHECKS;
+    }
+  }
+
+  bestmv->row = br;
+  bestmv->col = bc;
+
+  if ((abs(bestmv->col - ref_mv->col) > (MAX_FULL_PEL_VAL << 3)) ||
+      (abs(bestmv->row - ref_mv->row) > (MAX_FULL_PEL_VAL << 3)))
+    return INT_MAX;
+
+  return besterr;
+}
+
+int vp10_find_best_sub_pixel_tree_pruned_more(
+    MACROBLOCK *x, const MV *ref_mv, int allow_hp, int error_per_bit,
+    const vpx_variance_fn_ptr_t *vfp, int forced_stop, int iters_per_step,
+    int *cost_list, int *mvjcost, int *mvcost[2], int *distortion,
+    unsigned int *sse1, const uint8_t *second_pred, int w, int h,
+    int use_upsampled_ref) {
+  SETUP_SUBPEL_SEARCH;
+  (void)use_upsampled_ref;
+
+  besterr = setup_center_error(
+      xd, bestmv, ref_mv, error_per_bit, vfp, src_address, src_stride, y,
+      y_stride, second_pred, w, h, offset, mvjcost, mvcost, sse1, distortion);
+  if (cost_list && cost_list[0] != INT_MAX && cost_list[1] != INT_MAX &&
+      cost_list[2] != INT_MAX && cost_list[3] != INT_MAX &&
+      cost_list[4] != INT_MAX && is_cost_list_wellbehaved(cost_list)) {
+    unsigned int minpt;
+    int ir, ic;
+    get_cost_surf_min(cost_list, &ir, &ic, 1);
+    if (ir != 0 || ic != 0) {
+      CHECK_BETTER(minpt, tr + ir * hstep, tc + ic * hstep);
+    }
+  } else {
+    FIRST_LEVEL_CHECKS;
+    if (halfiters > 1) {
+      SECOND_LEVEL_CHECKS;
+    }
+  }
+
+  // Each subsequent iteration checks at least one point in common with
+  // the last iteration could be 2 ( if diag selected) 1/4 pel
+
+  // Note forced_stop: 0 - full, 1 - qtr only, 2 - half only
+  if (forced_stop != 2) {
+    tr = br;
+    tc = bc;
+    hstep >>= 1;
+    FIRST_LEVEL_CHECKS;
+    if (quarteriters > 1) {
+      SECOND_LEVEL_CHECKS;
+    }
+  }
+
+  if (allow_hp && vp10_use_mv_hp(ref_mv) && forced_stop == 0) {
+    tr = br;
+    tc = bc;
+    hstep >>= 1;
+    FIRST_LEVEL_CHECKS;
+    if (eighthiters > 1) {
+      SECOND_LEVEL_CHECKS;
+    }
+  }
+  // These lines insure static analysis doesn't warn that
+  // tr and tc aren't used after the above point.
+  (void)tr;
+  (void)tc;
+
+  bestmv->row = br;
+  bestmv->col = bc;
+
+  if ((abs(bestmv->col - ref_mv->col) > (MAX_FULL_PEL_VAL << 3)) ||
+      (abs(bestmv->row - ref_mv->row) > (MAX_FULL_PEL_VAL << 3)))
+    return INT_MAX;
+
+  return besterr;
+}
+
+int vp10_find_best_sub_pixel_tree_pruned(
+    MACROBLOCK *x, const MV *ref_mv, int allow_hp, int error_per_bit,
+    const vpx_variance_fn_ptr_t *vfp, int forced_stop, int iters_per_step,
+    int *cost_list, int *mvjcost, int *mvcost[2], int *distortion,
+    unsigned int *sse1, const uint8_t *second_pred, int w, int h,
+    int use_upsampled_ref) {
+  SETUP_SUBPEL_SEARCH;
+  (void)use_upsampled_ref;
+
+  besterr = setup_center_error(
+      xd, bestmv, ref_mv, error_per_bit, vfp, src_address, src_stride, y,
+      y_stride, second_pred, w, h, offset, mvjcost, mvcost, sse1, distortion);
+  if (cost_list && cost_list[0] != INT_MAX && cost_list[1] != INT_MAX &&
+      cost_list[2] != INT_MAX && cost_list[3] != INT_MAX &&
+      cost_list[4] != INT_MAX) {
+    unsigned int left, right, up, down, diag;
+    whichdir = (cost_list[1] < cost_list[3] ? 0 : 1) +
+               (cost_list[2] < cost_list[4] ? 0 : 2);
+    switch (whichdir) {
+      case 0:
+        CHECK_BETTER(left, tr, tc - hstep);
+        CHECK_BETTER(down, tr + hstep, tc);
+        CHECK_BETTER(diag, tr + hstep, tc - hstep);
+        break;
+      case 1:
+        CHECK_BETTER(right, tr, tc + hstep);
+        CHECK_BETTER(down, tr + hstep, tc);
+        CHECK_BETTER(diag, tr + hstep, tc + hstep);
+        break;
+      case 2:
+        CHECK_BETTER(left, tr, tc - hstep);
+        CHECK_BETTER(up, tr - hstep, tc);
+        CHECK_BETTER(diag, tr - hstep, tc - hstep);
+        break;
+      case 3:
+        CHECK_BETTER(right, tr, tc + hstep);
+        CHECK_BETTER(up, tr - hstep, tc);
+        CHECK_BETTER(diag, tr - hstep, tc + hstep);
+        break;
+    }
+  } else {
+    FIRST_LEVEL_CHECKS;
+    if (halfiters > 1) {
+      SECOND_LEVEL_CHECKS;
+    }
+  }
+
+  tr = br;
+  tc = bc;
+
+  // Each subsequent iteration checks at least one point in common with
+  // the last iteration could be 2 ( if diag selected) 1/4 pel
+
+  // Note forced_stop: 0 - full, 1 - qtr only, 2 - half only
+  if (forced_stop != 2) {
+    hstep >>= 1;
+    FIRST_LEVEL_CHECKS;
+    if (quarteriters > 1) {
+      SECOND_LEVEL_CHECKS;
+    }
+    tr = br;
+    tc = bc;
+  }
+
+  if (allow_hp && vp10_use_mv_hp(ref_mv) && forced_stop == 0) {
+    hstep >>= 1;
+    FIRST_LEVEL_CHECKS;
+    if (eighthiters > 1) {
+      SECOND_LEVEL_CHECKS;
+    }
+    tr = br;
+    tc = bc;
+  }
+  // These lines insure static analysis doesn't warn that
+  // tr and tc aren't used after the above point.
+  (void)tr;
+  (void)tc;
+
+  bestmv->row = br;
+  bestmv->col = bc;
+
+  if ((abs(bestmv->col - ref_mv->col) > (MAX_FULL_PEL_VAL << 3)) ||
+      (abs(bestmv->row - ref_mv->row) > (MAX_FULL_PEL_VAL << 3)))
+    return INT_MAX;
+
+  return besterr;
+}
+
+/* clang-format off */
+static const MV search_step_table[12] = {
+  // left, right, up, down
+  { 0, -4 }, { 0, 4 }, { -4, 0 }, { 4, 0 },
+  { 0, -2 }, { 0, 2 }, { -2, 0 }, { 2, 0 },
+  { 0, -1 }, { 0, 1 }, { -1, 0 }, { 1, 0 }
+};
+/* clang-format on */
+
+static int upsampled_pref_error(const MACROBLOCKD *xd,
+                                const vpx_variance_fn_ptr_t *vfp,
+                                const uint8_t *const src, const int src_stride,
+                                const uint8_t *const y, int y_stride,
+                                const uint8_t *second_pred, int w, int h,
+                                unsigned int *sse) {
+  unsigned int besterr;
+#if CONFIG_VP9_HIGHBITDEPTH
+  if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+    DECLARE_ALIGNED(16, uint16_t, pred16[MAX_SB_SQUARE]);
+    if (second_pred != NULL)
+      vpx_highbd_comp_avg_upsampled_pred(pred16, second_pred, w, h, y,
+                                         y_stride);
+    else
+      vpx_highbd_upsampled_pred(pred16, w, h, y, y_stride);
+
+    besterr = vfp->vf(CONVERT_TO_BYTEPTR(pred16), w, src, src_stride, sse);
+  } else {
+    DECLARE_ALIGNED(16, uint8_t, pred[MAX_SB_SQUARE]);
+#else
+  DECLARE_ALIGNED(16, uint8_t, pred[MAX_SB_SQUARE]);
+  (void)xd;
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+    if (second_pred != NULL)
+      vpx_comp_avg_upsampled_pred(pred, second_pred, w, h, y, y_stride);
+    else
+      vpx_upsampled_pred(pred, w, h, y, y_stride);
+
+    besterr = vfp->vf(pred, w, src, src_stride, sse);
+#if CONFIG_VP9_HIGHBITDEPTH
+  }
+#endif
+  return besterr;
+}
+
+static unsigned int upsampled_setup_center_error(
+    const MACROBLOCKD *xd, const MV *bestmv, const MV *ref_mv,
+    int error_per_bit, const vpx_variance_fn_ptr_t *vfp,
+    const uint8_t *const src, const int src_stride, const uint8_t *const y,
+    int y_stride, const uint8_t *second_pred, int w, int h, int offset,
+    int *mvjcost, int *mvcost[2], unsigned int *sse1, int *distortion) {
+  unsigned int besterr = upsampled_pref_error(
+      xd, vfp, src, src_stride, y + offset, y_stride, second_pred, w, h, sse1);
+  *distortion = besterr;
+  besterr += mv_err_cost(bestmv, ref_mv, mvjcost, mvcost, error_per_bit);
+  return besterr;
+}
+
+int vp10_find_best_sub_pixel_tree(MACROBLOCK *x, const MV *ref_mv, int allow_hp,
+                                  int error_per_bit,
+                                  const vpx_variance_fn_ptr_t *vfp,
+                                  int forced_stop, int iters_per_step,
+                                  int *cost_list, int *mvjcost, int *mvcost[2],
+                                  int *distortion, unsigned int *sse1,
+                                  const uint8_t *second_pred, int w, int h,
+                                  int use_upsampled_ref) {
+  const uint8_t *const src_address = x->plane[0].src.buf;
+  const int src_stride = x->plane[0].src.stride;
+  const MACROBLOCKD *xd = &x->e_mbd;
+  unsigned int besterr = INT_MAX;
+  unsigned int sse;
+  unsigned int thismse;
+  const int y_stride = xd->plane[0].pre[0].stride;
+  MV *bestmv = &x->best_mv.as_mv;
+  const int offset = bestmv->row * y_stride + bestmv->col;
+  const uint8_t *const y = xd->plane[0].pre[0].buf;
+
+  int br = bestmv->row * 8;
+  int bc = bestmv->col * 8;
+  int hstep = 4;
+  int iter, round = 3 - forced_stop;
+  const int minc = VPXMAX(x->mv_col_min * 8, ref_mv->col - MV_MAX);
+  const int maxc = VPXMIN(x->mv_col_max * 8, ref_mv->col + MV_MAX);
+  const int minr = VPXMAX(x->mv_row_min * 8, ref_mv->row - MV_MAX);
+  const int maxr = VPXMIN(x->mv_row_max * 8, ref_mv->row + MV_MAX);
+  int tr = br;
+  int tc = bc;
+  const MV *search_step = search_step_table;
+  int idx, best_idx = -1;
+  unsigned int cost_array[5];
+  int kr, kc;
+
+  if (!(allow_hp && vp10_use_mv_hp(ref_mv)))
+    if (round == 3) round = 2;
+
+  bestmv->row *= 8;
+  bestmv->col *= 8;
+
+  // use_upsampled_ref can be 0 or 1
+  if (use_upsampled_ref)
+    besterr = upsampled_setup_center_error(
+        xd, bestmv, ref_mv, error_per_bit, vfp, src_address, src_stride, y,
+        y_stride, second_pred, w, h, (offset * 8), mvjcost, mvcost, sse1,
+        distortion);
+  else
+    besterr = setup_center_error(
+        xd, bestmv, ref_mv, error_per_bit, vfp, src_address, src_stride, y,
+        y_stride, second_pred, w, h, offset, mvjcost, mvcost, sse1, distortion);
+
+  (void)cost_list;  // to silence compiler warning
+
+  for (iter = 0; iter < round; ++iter) {
+    // Check vertical and horizontal sub-pixel positions.
+    for (idx = 0; idx < 4; ++idx) {
+      tr = br + search_step[idx].row;
+      tc = bc + search_step[idx].col;
+      if (tc >= minc && tc <= maxc && tr >= minr && tr <= maxr) {
+        MV this_mv = { tr, tc };
+
+        if (use_upsampled_ref) {
+          const uint8_t *const pre_address = y + tr * y_stride + tc;
+
+          thismse = upsampled_pref_error(xd, vfp, src_address, src_stride,
+                                         pre_address, y_stride, second_pred, w,
+                                         h, &sse);
+        } else {
+          const uint8_t *const pre_address =
+              y + (tr >> 3) * y_stride + (tc >> 3);
+          if (second_pred == NULL)
+            thismse = vfp->svf(pre_address, y_stride, sp(tc), sp(tr),
+                               src_address, src_stride, &sse);
+          else
+            thismse = vfp->svaf(pre_address, y_stride, sp(tc), sp(tr),
+                                src_address, src_stride, &sse, second_pred);
+        }
+
+        cost_array[idx] = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost,
+                                                mvcost, error_per_bit);
+
+        if (cost_array[idx] < besterr) {
+          best_idx = idx;
+          besterr = cost_array[idx];
+          *distortion = thismse;
+          *sse1 = sse;
+        }
+      } else {
+        cost_array[idx] = INT_MAX;
+      }
+    }
+
+    // Check diagonal sub-pixel position
+    kc = (cost_array[0] <= cost_array[1] ? -hstep : hstep);
+    kr = (cost_array[2] <= cost_array[3] ? -hstep : hstep);
+
+    tc = bc + kc;
+    tr = br + kr;
+    if (tc >= minc && tc <= maxc && tr >= minr && tr <= maxr) {
+      MV this_mv = { tr, tc };
+
+      if (use_upsampled_ref) {
+        const uint8_t *const pre_address = y + tr * y_stride + tc;
+
+        thismse =
+            upsampled_pref_error(xd, vfp, src_address, src_stride, pre_address,
+                                 y_stride, second_pred, w, h, &sse);
+      } else {
+        const uint8_t *const pre_address = y + (tr >> 3) * y_stride + (tc >> 3);
+
+        if (second_pred == NULL)
+          thismse = vfp->svf(pre_address, y_stride, sp(tc), sp(tr), src_address,
+                             src_stride, &sse);
+        else
+          thismse = vfp->svaf(pre_address, y_stride, sp(tc), sp(tr),
+                              src_address, src_stride, &sse, second_pred);
+      }
+
+      cost_array[4] = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost,
+                                            error_per_bit);
+
+      if (cost_array[4] < besterr) {
+        best_idx = 4;
+        besterr = cost_array[4];
+        *distortion = thismse;
+        *sse1 = sse;
+      }
+    } else {
+      cost_array[idx] = INT_MAX;
+    }
+
+    if (best_idx < 4 && best_idx >= 0) {
+      br += search_step[best_idx].row;
+      bc += search_step[best_idx].col;
+    } else if (best_idx == 4) {
+      br = tr;
+      bc = tc;
+    }
+
+    if (iters_per_step > 1 && best_idx != -1) {
+      if (use_upsampled_ref) {
+        SECOND_LEVEL_CHECKS_BEST(1);
+      } else {
+        SECOND_LEVEL_CHECKS_BEST(0);
+      }
+    }
+
+    search_step += 4;
+    hstep >>= 1;
+    best_idx = -1;
+  }
+
+  // These lines insure static analysis doesn't warn that
+  // tr and tc aren't used after the above point.
+  (void)tr;
+  (void)tc;
+
+  bestmv->row = br;
+  bestmv->col = bc;
+
+  if ((abs(bestmv->col - ref_mv->col) > (MAX_FULL_PEL_VAL << 3)) ||
+      (abs(bestmv->row - ref_mv->row) > (MAX_FULL_PEL_VAL << 3)))
+    return INT_MAX;
+
+  return besterr;
+}
+
+#undef PRE
+#undef CHECK_BETTER
+
+static INLINE int check_bounds(const MACROBLOCK *x, int row, int col,
+                               int range) {
+  return ((row - range) >= x->mv_row_min) & ((row + range) <= x->mv_row_max) &
+         ((col - range) >= x->mv_col_min) & ((col + range) <= x->mv_col_max);
+}
+
+static INLINE int is_mv_in(const MACROBLOCK *x, const MV *mv) {
+  return (mv->col >= x->mv_col_min) && (mv->col <= x->mv_col_max) &&
+         (mv->row >= x->mv_row_min) && (mv->row <= x->mv_row_max);
+}
+
+#define CHECK_BETTER                                                      \
+  {                                                                       \
+    if (thissad < bestsad) {                                              \
+      if (use_mvcost)                                                     \
+        thissad += mvsad_err_cost(x, &this_mv, &fcenter_mv, sad_per_bit); \
+      if (thissad < bestsad) {                                            \
+        bestsad = thissad;                                                \
+        best_site = i;                                                    \
+      }                                                                   \
+    }                                                                     \
+  }
+
+#define MAX_PATTERN_SCALES 11
+#define MAX_PATTERN_CANDIDATES 8  // max number of canddiates per scale
+#define PATTERN_CANDIDATES_REF 3  // number of refinement candidates
+
+// Calculate and return a sad+mvcost list around an integer best pel.
+static INLINE void calc_int_cost_list(const MACROBLOCK *x,
+                                      const MV *const ref_mv, int sadpb,
+                                      const vpx_variance_fn_ptr_t *fn_ptr,
+                                      const MV *best_mv, int *cost_list) {
+  static const MV neighbors[4] = { { 0, -1 }, { 1, 0 }, { 0, 1 }, { -1, 0 } };
+  const struct buf_2d *const what = &x->plane[0].src;
+  const struct buf_2d *const in_what = &x->e_mbd.plane[0].pre[0];
+  const MV fcenter_mv = { ref_mv->row >> 3, ref_mv->col >> 3 };
+  const int br = best_mv->row;
+  const int bc = best_mv->col;
+  MV this_mv;
+  int i;
+  unsigned int sse;
+
+  this_mv.row = br;
+  this_mv.col = bc;
+  cost_list[0] =
+      fn_ptr->vf(what->buf, what->stride, get_buf_from_mv(in_what, &this_mv),
+                 in_what->stride, &sse) +
+      mvsad_err_cost(x, &this_mv, &fcenter_mv, sadpb);
+  if (check_bounds(x, br, bc, 1)) {
+    for (i = 0; i < 4; i++) {
+      const MV this_mv = { br + neighbors[i].row, bc + neighbors[i].col };
+      cost_list[i + 1] = fn_ptr->vf(what->buf, what->stride,
+                                    get_buf_from_mv(in_what, &this_mv),
+                                    in_what->stride, &sse) +
+                         mv_err_cost(&this_mv, &fcenter_mv, x->nmvjointcost,
+                                     x->mvcost, x->errorperbit);
+    }
+  } else {
+    for (i = 0; i < 4; i++) {
+      const MV this_mv = { br + neighbors[i].row, bc + neighbors[i].col };
+      if (!is_mv_in(x, &this_mv))
+        cost_list[i + 1] = INT_MAX;
+      else
+        cost_list[i + 1] = fn_ptr->vf(what->buf, what->stride,
+                                      get_buf_from_mv(in_what, &this_mv),
+                                      in_what->stride, &sse) +
+                           mv_err_cost(&this_mv, &fcenter_mv, x->nmvjointcost,
+                                       x->mvcost, x->errorperbit);
+    }
+  }
+}
+
+static INLINE void calc_int_sad_list(const MACROBLOCK *x,
+                                     const MV *const ref_mv, int sadpb,
+                                     const vpx_variance_fn_ptr_t *fn_ptr,
+                                     const MV *best_mv, int *cost_list,
+                                     const int use_mvcost, const int bestsad) {
+  static const MV neighbors[4] = { { 0, -1 }, { 1, 0 }, { 0, 1 }, { -1, 0 } };
+  const struct buf_2d *const what = &x->plane[0].src;
+  const struct buf_2d *const in_what = &x->e_mbd.plane[0].pre[0];
+  const MV fcenter_mv = { ref_mv->row >> 3, ref_mv->col >> 3 };
+  int i;
+  const int br = best_mv->row;
+  const int bc = best_mv->col;
+
+  if (cost_list[0] == INT_MAX) {
+    cost_list[0] = bestsad;
+    if (check_bounds(x, br, bc, 1)) {
+      for (i = 0; i < 4; i++) {
+        const MV this_mv = { br + neighbors[i].row, bc + neighbors[i].col };
+        cost_list[i + 1] =
+            fn_ptr->sdf(what->buf, what->stride,
+                        get_buf_from_mv(in_what, &this_mv), in_what->stride);
+      }
+    } else {
+      for (i = 0; i < 4; i++) {
+        const MV this_mv = { br + neighbors[i].row, bc + neighbors[i].col };
+        if (!is_mv_in(x, &this_mv))
+          cost_list[i + 1] = INT_MAX;
+        else
+          cost_list[i + 1] =
+              fn_ptr->sdf(what->buf, what->stride,
+                          get_buf_from_mv(in_what, &this_mv), in_what->stride);
+      }
+    }
+  } else {
+    if (use_mvcost) {
+      for (i = 0; i < 4; i++) {
+        const MV this_mv = { br + neighbors[i].row, bc + neighbors[i].col };
+        if (cost_list[i + 1] != INT_MAX) {
+          cost_list[i + 1] += mvsad_err_cost(x, &this_mv, &fcenter_mv, sadpb);
+        }
+      }
+    }
+  }
+}
+
+// Generic pattern search function that searches over multiple scales.
+// Each scale can have a different number of candidates and shape of
+// candidates as indicated in the num_candidates and candidates arrays
+// passed into this function
+//
+static int pattern_search(
+    MACROBLOCK *x, MV *start_mv, int search_param, int sad_per_bit,
+    int do_init_search, int *cost_list, const vpx_variance_fn_ptr_t *vfp,
+    int use_mvcost, const MV *center_mv,
+    const int num_candidates[MAX_PATTERN_SCALES],
+    const MV candidates[MAX_PATTERN_SCALES][MAX_PATTERN_CANDIDATES]) {
+  const MACROBLOCKD *const xd = &x->e_mbd;
+  static const int search_param_to_steps[MAX_MVSEARCH_STEPS] = {
+    10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
+  };
+  int i, s, t;
+  const struct buf_2d *const what = &x->plane[0].src;
+  const struct buf_2d *const in_what = &xd->plane[0].pre[0];
+  const int last_is_4 = num_candidates[0] == 4;
+  int br, bc;
+  int bestsad = INT_MAX;
+  int thissad;
+  int k = -1;
+  const MV fcenter_mv = { center_mv->row >> 3, center_mv->col >> 3 };
+  int best_init_s = search_param_to_steps[search_param];
+  // adjust ref_mv to make sure it is within MV range
+  clamp_mv(start_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min,
+           x->mv_row_max);
+  br = start_mv->row;
+  bc = start_mv->col;
+  if (cost_list != NULL) {
+    cost_list[0] = cost_list[1] = cost_list[2] = cost_list[3] = cost_list[4] =
+        INT_MAX;
+  }
+
+  // Work out the start point for the search
+  bestsad = vfp->sdf(what->buf, what->stride,
+                     get_buf_from_mv(in_what, start_mv), in_what->stride) +
+            mvsad_err_cost(x, start_mv, &fcenter_mv, sad_per_bit);
+
+  // Search all possible scales upto the search param around the center point
+  // pick the scale of the point that is best as the starting scale of
+  // further steps around it.
+  if (do_init_search) {
+    s = best_init_s;
+    best_init_s = -1;
+    for (t = 0; t <= s; ++t) {
+      int best_site = -1;
+      if (check_bounds(x, br, bc, 1 << t)) {
+        for (i = 0; i < num_candidates[t]; i++) {
+          const MV this_mv = { br + candidates[t][i].row,
+                               bc + candidates[t][i].col };
+          thissad =
+              vfp->sdf(what->buf, what->stride,
+                       get_buf_from_mv(in_what, &this_mv), in_what->stride);
+          CHECK_BETTER
+        }
+      } else {
+        for (i = 0; i < num_candidates[t]; i++) {
+          const MV this_mv = { br + candidates[t][i].row,
+                               bc + candidates[t][i].col };
+          if (!is_mv_in(x, &this_mv)) continue;
+          thissad =
+              vfp->sdf(what->buf, what->stride,
+                       get_buf_from_mv(in_what, &this_mv), in_what->stride);
+          CHECK_BETTER
+        }
+      }
+      if (best_site == -1) {
+        continue;
+      } else {
+        best_init_s = t;
+        k = best_site;
+      }
+    }
+    if (best_init_s != -1) {
+      br += candidates[best_init_s][k].row;
+      bc += candidates[best_init_s][k].col;
+    }
+  }
+
+  // If the center point is still the best, just skip this and move to
+  // the refinement step.
+  if (best_init_s != -1) {
+    const int last_s = (last_is_4 && cost_list != NULL);
+    int best_site = -1;
+    s = best_init_s;
+
+    for (; s >= last_s; s--) {
+      // No need to search all points the 1st time if initial search was used
+      if (!do_init_search || s != best_init_s) {
+        if (check_bounds(x, br, bc, 1 << s)) {
+          for (i = 0; i < num_candidates[s]; i++) {
+            const MV this_mv = { br + candidates[s][i].row,
+                                 bc + candidates[s][i].col };
+            thissad =
+                vfp->sdf(what->buf, what->stride,
+                         get_buf_from_mv(in_what, &this_mv), in_what->stride);
+            CHECK_BETTER
+          }
+        } else {
+          for (i = 0; i < num_candidates[s]; i++) {
+            const MV this_mv = { br + candidates[s][i].row,
+                                 bc + candidates[s][i].col };
+            if (!is_mv_in(x, &this_mv)) continue;
+            thissad =
+                vfp->sdf(what->buf, what->stride,
+                         get_buf_from_mv(in_what, &this_mv), in_what->stride);
+            CHECK_BETTER
+          }
+        }
+
+        if (best_site == -1) {
+          continue;
+        } else {
+          br += candidates[s][best_site].row;
+          bc += candidates[s][best_site].col;
+          k = best_site;
+        }
+      }
+
+      do {
+        int next_chkpts_indices[PATTERN_CANDIDATES_REF];
+        best_site = -1;
+        next_chkpts_indices[0] = (k == 0) ? num_candidates[s] - 1 : k - 1;
+        next_chkpts_indices[1] = k;
+        next_chkpts_indices[2] = (k == num_candidates[s] - 1) ? 0 : k + 1;
+
+        if (check_bounds(x, br, bc, 1 << s)) {
+          for (i = 0; i < PATTERN_CANDIDATES_REF; i++) {
+            const MV this_mv = {
+              br + candidates[s][next_chkpts_indices[i]].row,
+              bc + candidates[s][next_chkpts_indices[i]].col
+            };
+            thissad =
+                vfp->sdf(what->buf, what->stride,
+                         get_buf_from_mv(in_what, &this_mv), in_what->stride);
+            CHECK_BETTER
+          }
+        } else {
+          for (i = 0; i < PATTERN_CANDIDATES_REF; i++) {
+            const MV this_mv = {
+              br + candidates[s][next_chkpts_indices[i]].row,
+              bc + candidates[s][next_chkpts_indices[i]].col
+            };
+            if (!is_mv_in(x, &this_mv)) continue;
+            thissad =
+                vfp->sdf(what->buf, what->stride,
+                         get_buf_from_mv(in_what, &this_mv), in_what->stride);
+            CHECK_BETTER
+          }
+        }
+
+        if (best_site != -1) {
+          k = next_chkpts_indices[best_site];
+          br += candidates[s][k].row;
+          bc += candidates[s][k].col;
+        }
+      } while (best_site != -1);
+    }
+
+    // Note: If we enter the if below, then cost_list must be non-NULL.
+    if (s == 0) {
+      cost_list[0] = bestsad;
+      if (!do_init_search || s != best_init_s) {
+        if (check_bounds(x, br, bc, 1 << s)) {
+          for (i = 0; i < num_candidates[s]; i++) {
+            const MV this_mv = { br + candidates[s][i].row,
+                                 bc + candidates[s][i].col };
+            cost_list[i + 1] = thissad =
+                vfp->sdf(what->buf, what->stride,
+                         get_buf_from_mv(in_what, &this_mv), in_what->stride);
+            CHECK_BETTER
+          }
+        } else {
+          for (i = 0; i < num_candidates[s]; i++) {
+            const MV this_mv = { br + candidates[s][i].row,
+                                 bc + candidates[s][i].col };
+            if (!is_mv_in(x, &this_mv)) continue;
+            cost_list[i + 1] = thissad =
+                vfp->sdf(what->buf, what->stride,
+                         get_buf_from_mv(in_what, &this_mv), in_what->stride);
+            CHECK_BETTER
+          }
+        }
+
+        if (best_site != -1) {
+          br += candidates[s][best_site].row;
+          bc += candidates[s][best_site].col;
+          k = best_site;
+        }
+      }
+      while (best_site != -1) {
+        int next_chkpts_indices[PATTERN_CANDIDATES_REF];
+        best_site = -1;
+        next_chkpts_indices[0] = (k == 0) ? num_candidates[s] - 1 : k - 1;
+        next_chkpts_indices[1] = k;
+        next_chkpts_indices[2] = (k == num_candidates[s] - 1) ? 0 : k + 1;
+        cost_list[1] = cost_list[2] = cost_list[3] = cost_list[4] = INT_MAX;
+        cost_list[((k + 2) % 4) + 1] = cost_list[0];
+        cost_list[0] = bestsad;
+
+        if (check_bounds(x, br, bc, 1 << s)) {
+          for (i = 0; i < PATTERN_CANDIDATES_REF; i++) {
+            const MV this_mv = {
+              br + candidates[s][next_chkpts_indices[i]].row,
+              bc + candidates[s][next_chkpts_indices[i]].col
+            };
+            cost_list[next_chkpts_indices[i] + 1] = thissad =
+                vfp->sdf(what->buf, what->stride,
+                         get_buf_from_mv(in_what, &this_mv), in_what->stride);
+            CHECK_BETTER
+          }
+        } else {
+          for (i = 0; i < PATTERN_CANDIDATES_REF; i++) {
+            const MV this_mv = {
+              br + candidates[s][next_chkpts_indices[i]].row,
+              bc + candidates[s][next_chkpts_indices[i]].col
+            };
+            if (!is_mv_in(x, &this_mv)) {
+              cost_list[next_chkpts_indices[i] + 1] = INT_MAX;
+              continue;
+            }
+            cost_list[next_chkpts_indices[i] + 1] = thissad =
+                vfp->sdf(what->buf, what->stride,
+                         get_buf_from_mv(in_what, &this_mv), in_what->stride);
+            CHECK_BETTER
+          }
+        }
+
+        if (best_site != -1) {
+          k = next_chkpts_indices[best_site];
+          br += candidates[s][k].row;
+          bc += candidates[s][k].col;
+        }
+      }
+    }
+  }
+
+  // Returns the one-away integer pel cost/sad around the best as follows:
+  // cost_list[0]: cost/sad at the best integer pel
+  // cost_list[1]: cost/sad at delta {0, -1} (left)   from the best integer pel
+  // cost_list[2]: cost/sad at delta { 1, 0} (bottom) from the best integer pel
+  // cost_list[3]: cost/sad at delta { 0, 1} (right)  from the best integer pel
+  // cost_list[4]: cost/sad at delta {-1, 0} (top)    from the best integer pel
+  if (cost_list) {
+    const MV best_mv = { br, bc };
+    if (last_is_4) {
+      calc_int_sad_list(x, center_mv, sad_per_bit, vfp, &best_mv, cost_list,
+                        use_mvcost, bestsad);
+    } else {
+      calc_int_cost_list(x, center_mv, sad_per_bit, vfp, &best_mv, cost_list);
+    }
+  }
+  x->best_mv.as_mv.row = br;
+  x->best_mv.as_mv.col = bc;
+  return bestsad;
+}
+
+int vp10_get_mvpred_var(const MACROBLOCK *x, const MV *best_mv,
+                        const MV *center_mv, const vpx_variance_fn_ptr_t *vfp,
+                        int use_mvcost) {
+  const MACROBLOCKD *const xd = &x->e_mbd;
+  const struct buf_2d *const what = &x->plane[0].src;
+  const struct buf_2d *const in_what = &xd->plane[0].pre[0];
+  const MV mv = { best_mv->row * 8, best_mv->col * 8 };
+  unsigned int unused;
+
+  return vfp->vf(what->buf, what->stride, get_buf_from_mv(in_what, best_mv),
+                 in_what->stride, &unused) +
+         (use_mvcost ? mv_err_cost(&mv, center_mv, x->nmvjointcost, x->mvcost,
+                                   x->errorperbit)
+                     : 0);
+}
+
+int vp10_get_mvpred_av_var(const MACROBLOCK *x, const MV *best_mv,
+                           const MV *center_mv, const uint8_t *second_pred,
+                           const vpx_variance_fn_ptr_t *vfp, int use_mvcost) {
+  const MACROBLOCKD *const xd = &x->e_mbd;
+  const struct buf_2d *const what = &x->plane[0].src;
+  const struct buf_2d *const in_what = &xd->plane[0].pre[0];
+  const MV mv = { best_mv->row * 8, best_mv->col * 8 };
+  unsigned int unused;
+
+  return vfp->svaf(get_buf_from_mv(in_what, best_mv), in_what->stride, 0, 0,
+                   what->buf, what->stride, &unused, second_pred) +
+         (use_mvcost ? mv_err_cost(&mv, center_mv, x->nmvjointcost, x->mvcost,
+                                   x->errorperbit)
+                     : 0);
+}
+
+int vp10_hex_search(MACROBLOCK *x, MV *start_mv, int search_param,
+                    int sad_per_bit, int do_init_search, int *cost_list,
+                    const vpx_variance_fn_ptr_t *vfp, int use_mvcost,
+                    const MV *center_mv) {
+  // First scale has 8-closest points, the rest have 6 points in hex shape
+  // at increasing scales
+  static const int hex_num_candidates[MAX_PATTERN_SCALES] = { 8, 6, 6, 6, 6, 6,
+                                                              6, 6, 6, 6, 6 };
+  // Note that the largest candidate step at each scale is 2^scale
+  /* clang-format off */
+  static const MV hex_candidates[MAX_PATTERN_SCALES][MAX_PATTERN_CANDIDATES] = {
+    { { -1, -1 }, { 0, -1 }, { 1, -1 }, { 1, 0 }, { 1, 1 }, { 0, 1 }, { -1, 1 },
+      { -1, 0 } },
+    { { -1, -2 }, { 1, -2 }, { 2, 0 }, { 1, 2 }, { -1, 2 }, { -2, 0 } },
+    { { -2, -4 }, { 2, -4 }, { 4, 0 }, { 2, 4 }, { -2, 4 }, { -4, 0 } },
+    { { -4, -8 }, { 4, -8 }, { 8, 0 }, { 4, 8 }, { -4, 8 }, { -8, 0 } },
+    { { -8, -16 }, { 8, -16 }, { 16, 0 }, { 8, 16 }, { -8, 16 }, { -16, 0 } },
+    { { -16, -32 }, { 16, -32 }, { 32, 0 }, { 16, 32 }, { -16, 32 },
+      { -32, 0 } },
+    { { -32, -64 }, { 32, -64 }, { 64, 0 }, { 32, 64 }, { -32, 64 },
+      { -64, 0 } },
+    { { -64, -128 }, { 64, -128 }, { 128, 0 }, { 64, 128 }, { -64, 128 },
+      { -128, 0 } },
+    { { -128, -256 }, { 128, -256 }, { 256, 0 }, { 128, 256 }, { -128, 256 },
+      { -256, 0 } },
+    { { -256, -512 }, { 256, -512 }, { 512, 0 }, { 256, 512 }, { -256, 512 },
+      { -512, 0 } },
+    { { -512, -1024 }, { 512, -1024 }, { 1024, 0 }, { 512, 1024 },
+      { -512, 1024 }, { -1024, 0 } },
+  };
+  /* clang-format on */
+  return pattern_search(x, start_mv, search_param, sad_per_bit, do_init_search,
+                        cost_list, vfp, use_mvcost, center_mv,
+                        hex_num_candidates, hex_candidates);
+}
+
+static int bigdia_search(MACROBLOCK *x, MV *start_mv, int search_param,
+                         int sad_per_bit, int do_init_search, int *cost_list,
+                         const vpx_variance_fn_ptr_t *vfp, int use_mvcost,
+                         const MV *center_mv) {
+  // First scale has 4-closest points, the rest have 8 points in diamond
+  // shape at increasing scales
+  static const int bigdia_num_candidates[MAX_PATTERN_SCALES] = {
+    4, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+  };
+  // Note that the largest candidate step at each scale is 2^scale
+  /* clang-format off */
+  static const MV
+      bigdia_candidates[MAX_PATTERN_SCALES][MAX_PATTERN_CANDIDATES] = {
+        { { 0, -1 }, { 1, 0 }, { 0, 1 }, { -1, 0 } },
+        { { -1, -1 }, { 0, -2 }, { 1, -1 }, { 2, 0 }, { 1, 1 }, { 0, 2 },
+          { -1, 1 }, { -2, 0 } },
+        { { -2, -2 }, { 0, -4 }, { 2, -2 }, { 4, 0 }, { 2, 2 }, { 0, 4 },
+          { -2, 2 }, { -4, 0 } },
+        { { -4, -4 }, { 0, -8 }, { 4, -4 }, { 8, 0 }, { 4, 4 }, { 0, 8 },
+          { -4, 4 }, { -8, 0 } },
+        { { -8, -8 }, { 0, -16 }, { 8, -8 }, { 16, 0 }, { 8, 8 }, { 0, 16 },
+          { -8, 8 }, { -16, 0 } },
+        { { -16, -16 }, { 0, -32 }, { 16, -16 }, { 32, 0 }, { 16, 16 },
+          { 0, 32 }, { -16, 16 }, { -32, 0 } },
+        { { -32, -32 }, { 0, -64 }, { 32, -32 }, { 64, 0 }, { 32, 32 },
+          { 0, 64 }, { -32, 32 }, { -64, 0 } },
+        { { -64, -64 }, { 0, -128 }, { 64, -64 }, { 128, 0 }, { 64, 64 },
+          { 0, 128 }, { -64, 64 }, { -128, 0 } },
+        { { -128, -128 }, { 0, -256 }, { 128, -128 }, { 256, 0 }, { 128, 128 },
+          { 0, 256 }, { -128, 128 }, { -256, 0 } },
+        { { -256, -256 }, { 0, -512 }, { 256, -256 }, { 512, 0 }, { 256, 256 },
+          { 0, 512 }, { -256, 256 }, { -512, 0 } },
+        { { -512, -512 }, { 0, -1024 }, { 512, -512 }, { 1024, 0 },
+          { 512, 512 }, { 0, 1024 }, { -512, 512 }, { -1024, 0 } },
+      };
+  /* clang-format on */
+  return pattern_search(x, start_mv, search_param, sad_per_bit, do_init_search,
+                        cost_list, vfp, use_mvcost, center_mv,
+                        bigdia_num_candidates, bigdia_candidates);
+}
+
+static int square_search(MACROBLOCK *x, MV *start_mv, int search_param,
+                         int sad_per_bit, int do_init_search, int *cost_list,
+                         const vpx_variance_fn_ptr_t *vfp, int use_mvcost,
+                         const MV *center_mv) {
+  // All scales have 8 closest points in square shape
+  static const int square_num_candidates[MAX_PATTERN_SCALES] = {
+    8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+  };
+  // Note that the largest candidate step at each scale is 2^scale
+  /* clang-format off */
+  static const MV
+      square_candidates[MAX_PATTERN_SCALES][MAX_PATTERN_CANDIDATES] = {
+        { { -1, -1 }, { 0, -1 }, { 1, -1 }, { 1, 0 }, { 1, 1 }, { 0, 1 },
+          { -1, 1 }, { -1, 0 } },
+        { { -2, -2 }, { 0, -2 }, { 2, -2 }, { 2, 0 }, { 2, 2 }, { 0, 2 },
+          { -2, 2 }, { -2, 0 } },
+        { { -4, -4 }, { 0, -4 }, { 4, -4 }, { 4, 0 }, { 4, 4 }, { 0, 4 },
+          { -4, 4 }, { -4, 0 } },
+        { { -8, -8 }, { 0, -8 }, { 8, -8 }, { 8, 0 }, { 8, 8 }, { 0, 8 },
+          { -8, 8 }, { -8, 0 } },
+        { { -16, -16 }, { 0, -16 }, { 16, -16 }, { 16, 0 }, { 16, 16 },
+          { 0, 16 }, { -16, 16 }, { -16, 0 } },
+        { { -32, -32 }, { 0, -32 }, { 32, -32 }, { 32, 0 }, { 32, 32 },
+          { 0, 32 }, { -32, 32 }, { -32, 0 } },
+        { { -64, -64 }, { 0, -64 }, { 64, -64 }, { 64, 0 }, { 64, 64 },
+          { 0, 64 }, { -64, 64 }, { -64, 0 } },
+        { { -128, -128 }, { 0, -128 }, { 128, -128 }, { 128, 0 }, { 128, 128 },
+          { 0, 128 }, { -128, 128 }, { -128, 0 } },
+        { { -256, -256 }, { 0, -256 }, { 256, -256 }, { 256, 0 }, { 256, 256 },
+          { 0, 256 }, { -256, 256 }, { -256, 0 } },
+        { { -512, -512 }, { 0, -512 }, { 512, -512 }, { 512, 0 }, { 512, 512 },
+          { 0, 512 }, { -512, 512 }, { -512, 0 } },
+        { { -1024, -1024 }, { 0, -1024 }, { 1024, -1024 }, { 1024, 0 },
+          { 1024, 1024 }, { 0, 1024 }, { -1024, 1024 }, { -1024, 0 } },
+      };
+  /* clang-format on */
+  return pattern_search(x, start_mv, search_param, sad_per_bit, do_init_search,
+                        cost_list, vfp, use_mvcost, center_mv,
+                        square_num_candidates, square_candidates);
+}
+
+static int fast_hex_search(MACROBLOCK *x, MV *ref_mv, int search_param,
+                           int sad_per_bit,
+                           int do_init_search,  // must be zero for fast_hex
+                           int *cost_list, const vpx_variance_fn_ptr_t *vfp,
+                           int use_mvcost, const MV *center_mv) {
+  return vp10_hex_search(
+      x, ref_mv, VPXMAX(MAX_MVSEARCH_STEPS - 2, search_param), sad_per_bit,
+      do_init_search, cost_list, vfp, use_mvcost, center_mv);
+}
+
+static int fast_dia_search(MACROBLOCK *x, MV *ref_mv, int search_param,
+                           int sad_per_bit, int do_init_search, int *cost_list,
+                           const vpx_variance_fn_ptr_t *vfp, int use_mvcost,
+                           const MV *center_mv) {
+  return bigdia_search(x, ref_mv, VPXMAX(MAX_MVSEARCH_STEPS - 2, search_param),
+                       sad_per_bit, do_init_search, cost_list, vfp, use_mvcost,
+                       center_mv);
+}
+
+#undef CHECK_BETTER
+
+// Exhuastive motion search around a given centre position with a given
+// step size.
+static int exhuastive_mesh_search(MACROBLOCK *x, MV *ref_mv, MV *best_mv,
+                                  int range, int step, int sad_per_bit,
+                                  const vpx_variance_fn_ptr_t *fn_ptr,
+                                  const MV *center_mv) {
+  const MACROBLOCKD *const xd = &x->e_mbd;
+  const struct buf_2d *const what = &x->plane[0].src;
+  const struct buf_2d *const in_what = &xd->plane[0].pre[0];
+  MV fcenter_mv = { center_mv->row, center_mv->col };
+  unsigned int best_sad = INT_MAX;
+  int r, c, i;
+  int start_col, end_col, start_row, end_row;
+  int col_step = (step > 1) ? step : 4;
+
+  assert(step >= 1);
+
+  clamp_mv(&fcenter_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min,
+           x->mv_row_max);
+  *best_mv = fcenter_mv;
+  best_sad =
+      fn_ptr->sdf(what->buf, what->stride,
+                  get_buf_from_mv(in_what, &fcenter_mv), in_what->stride) +
+      mvsad_err_cost(x, &fcenter_mv, ref_mv, sad_per_bit);
+  start_row = VPXMAX(-range, x->mv_row_min - fcenter_mv.row);
+  start_col = VPXMAX(-range, x->mv_col_min - fcenter_mv.col);
+  end_row = VPXMIN(range, x->mv_row_max - fcenter_mv.row);
+  end_col = VPXMIN(range, x->mv_col_max - fcenter_mv.col);
+
+  for (r = start_row; r <= end_row; r += step) {
+    for (c = start_col; c <= end_col; c += col_step) {
+      // Step > 1 means we are not checking every location in this pass.
+      if (step > 1) {
+        const MV mv = { fcenter_mv.row + r, fcenter_mv.col + c };
+        unsigned int sad =
+            fn_ptr->sdf(what->buf, what->stride, get_buf_from_mv(in_what, &mv),
+                        in_what->stride);
+        if (sad < best_sad) {
+          sad += mvsad_err_cost(x, &mv, ref_mv, sad_per_bit);
+          if (sad < best_sad) {
+            best_sad = sad;
+            x->second_best_mv.as_mv = *best_mv;
+            *best_mv = mv;
+          }
+        }
+      } else {
+        // 4 sads in a single call if we are checking every location
+        if (c + 3 <= end_col) {
+          unsigned int sads[4];
+          const uint8_t *addrs[4];
+          for (i = 0; i < 4; ++i) {
+            const MV mv = { fcenter_mv.row + r, fcenter_mv.col + c + i };
+            addrs[i] = get_buf_from_mv(in_what, &mv);
+          }
+          fn_ptr->sdx4df(what->buf, what->stride, addrs, in_what->stride, sads);
+
+          for (i = 0; i < 4; ++i) {
+            if (sads[i] < best_sad) {
+              const MV mv = { fcenter_mv.row + r, fcenter_mv.col + c + i };
+              const unsigned int sad =
+                  sads[i] + mvsad_err_cost(x, &mv, ref_mv, sad_per_bit);
+              if (sad < best_sad) {
+                best_sad = sad;
+                x->second_best_mv.as_mv = *best_mv;
+                *best_mv = mv;
+              }
+            }
+          }
+        } else {
+          for (i = 0; i < end_col - c; ++i) {
+            const MV mv = { fcenter_mv.row + r, fcenter_mv.col + c + i };
+            unsigned int sad =
+                fn_ptr->sdf(what->buf, what->stride,
+                            get_buf_from_mv(in_what, &mv), in_what->stride);
+            if (sad < best_sad) {
+              sad += mvsad_err_cost(x, &mv, ref_mv, sad_per_bit);
+              if (sad < best_sad) {
+                best_sad = sad;
+                x->second_best_mv.as_mv = *best_mv;
+                *best_mv = mv;
+              }
+            }
+          }
+        }
+      }
+    }
+  }
+
+  return best_sad;
+}
+
+int vp10_diamond_search_sad_c(MACROBLOCK *x, const search_site_config *cfg,
+                              MV *ref_mv, MV *best_mv, int search_param,
+                              int sad_per_bit, int *num00,
+                              const vpx_variance_fn_ptr_t *fn_ptr,
+                              const MV *center_mv) {
+  int i, j, step;
+
+  const MACROBLOCKD *const xd = &x->e_mbd;
+  uint8_t *what = x->plane[0].src.buf;
+  const int what_stride = x->plane[0].src.stride;
+  const uint8_t *in_what;
+  const int in_what_stride = xd->plane[0].pre[0].stride;
+  const uint8_t *best_address;
+
+  unsigned int bestsad = INT_MAX;
+  int best_site = 0;
+  int last_site = 0;
+
+  int ref_row;
+  int ref_col;
+
+  // search_param determines the length of the initial step and hence the number
+  // of iterations.
+  // 0 = initial step (MAX_FIRST_STEP) pel
+  // 1 = (MAX_FIRST_STEP/2) pel,
+  // 2 = (MAX_FIRST_STEP/4) pel...
+  const search_site *ss = &cfg->ss[search_param * cfg->searches_per_step];
+  const int tot_steps = (cfg->ss_count / cfg->searches_per_step) - search_param;
+
+  const MV fcenter_mv = { center_mv->row >> 3, center_mv->col >> 3 };
+  clamp_mv(ref_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max);
+  ref_row = ref_mv->row;
+  ref_col = ref_mv->col;
+  *num00 = 0;
+  best_mv->row = ref_row;
+  best_mv->col = ref_col;
+
+  // Work out the start point for the search
+  in_what = xd->plane[0].pre[0].buf + ref_row * in_what_stride + ref_col;
+  best_address = in_what;
+
+  // Check the starting position
+  bestsad = fn_ptr->sdf(what, what_stride, in_what, in_what_stride) +
+            mvsad_err_cost(x, best_mv, &fcenter_mv, sad_per_bit);
+
+  i = 1;
+
+  for (step = 0; step < tot_steps; step++) {
+    int all_in = 1, t;
+
+    // All_in is true if every one of the points we are checking are within
+    // the bounds of the image.
+    all_in &= ((best_mv->row + ss[i].mv.row) > x->mv_row_min);
+    all_in &= ((best_mv->row + ss[i + 1].mv.row) < x->mv_row_max);
+    all_in &= ((best_mv->col + ss[i + 2].mv.col) > x->mv_col_min);
+    all_in &= ((best_mv->col + ss[i + 3].mv.col) < x->mv_col_max);
+
+    // If all the pixels are within the bounds we don't check whether the
+    // search point is valid in this loop,  otherwise we check each point
+    // for validity..
+    if (all_in) {
+      unsigned int sad_array[4];
+
+      for (j = 0; j < cfg->searches_per_step; j += 4) {
+        unsigned char const *block_offset[4];
+
+        for (t = 0; t < 4; t++)
+          block_offset[t] = ss[i + t].offset + best_address;
+
+        fn_ptr->sdx4df(what, what_stride, block_offset, in_what_stride,
+                       sad_array);
+
+        for (t = 0; t < 4; t++, i++) {
+          if (sad_array[t] < bestsad) {
+            const MV this_mv = { best_mv->row + ss[i].mv.row,
+                                 best_mv->col + ss[i].mv.col };
+            sad_array[t] +=
+                mvsad_err_cost(x, &this_mv, &fcenter_mv, sad_per_bit);
+            if (sad_array[t] < bestsad) {
+              bestsad = sad_array[t];
+              best_site = i;
+            }
+          }
+        }
+      }
+    } else {
+      for (j = 0; j < cfg->searches_per_step; j++) {
+        // Trap illegal vectors
+        const MV this_mv = { best_mv->row + ss[i].mv.row,
+                             best_mv->col + ss[i].mv.col };
+
+        if (is_mv_in(x, &this_mv)) {
+          const uint8_t *const check_here = ss[i].offset + best_address;
+          unsigned int thissad =
+              fn_ptr->sdf(what, what_stride, check_here, in_what_stride);
+
+          if (thissad < bestsad) {
+            thissad += mvsad_err_cost(x, &this_mv, &fcenter_mv, sad_per_bit);
+            if (thissad < bestsad) {
+              bestsad = thissad;
+              best_site = i;
+            }
+          }
+        }
+        i++;
+      }
+    }
+    if (best_site != last_site) {
+      x->second_best_mv.as_mv = *best_mv;
+      best_mv->row += ss[best_site].mv.row;
+      best_mv->col += ss[best_site].mv.col;
+      best_address += ss[best_site].offset;
+      last_site = best_site;
+#if defined(NEW_DIAMOND_SEARCH)
+      while (1) {
+        const MV this_mv = { best_mv->row + ss[best_site].mv.row,
+                             best_mv->col + ss[best_site].mv.col };
+        if (is_mv_in(x, &this_mv)) {
+          const uint8_t *const check_here = ss[best_site].offset + best_address;
+          unsigned int thissad =
+              fn_ptr->sdf(what, what_stride, check_here, in_what_stride);
+          if (thissad < bestsad) {
+            thissad += mvsad_err_cost(x, &this_mv, &fcenter_mv, sad_per_bit);
+            if (thissad < bestsad) {
+              bestsad = thissad;
+              best_mv->row += ss[best_site].mv.row;
+              best_mv->col += ss[best_site].mv.col;
+              best_address += ss[best_site].offset;
+              continue;
+            }
+          }
+        }
+        break;
+      }
+#endif
+    } else if (best_address == in_what) {
+      (*num00)++;
+    }
+  }
+  return bestsad;
+}
+
+static int vector_match(int16_t *ref, int16_t *src, int bwl) {
+  int best_sad = INT_MAX;
+  int this_sad;
+  int d;
+  int center, offset = 0;
+  int bw = 4 << bwl;  // redundant variable, to be changed in the experiments.
+  for (d = 0; d <= bw; d += 16) {
+    this_sad = vpx_vector_var(&ref[d], src, bwl);
+    if (this_sad < best_sad) {
+      best_sad = this_sad;
+      offset = d;
+    }
+  }
+  center = offset;
+
+  for (d = -8; d <= 8; d += 16) {
+    int this_pos = offset + d;
+    // check limit
+    if (this_pos < 0 || this_pos > bw) continue;
+    this_sad = vpx_vector_var(&ref[this_pos], src, bwl);
+    if (this_sad < best_sad) {
+      best_sad = this_sad;
+      center = this_pos;
+    }
+  }
+  offset = center;
+
+  for (d = -4; d <= 4; d += 8) {
+    int this_pos = offset + d;
+    // check limit
+    if (this_pos < 0 || this_pos > bw) continue;
+    this_sad = vpx_vector_var(&ref[this_pos], src, bwl);
+    if (this_sad < best_sad) {
+      best_sad = this_sad;
+      center = this_pos;
+    }
+  }
+  offset = center;
+
+  for (d = -2; d <= 2; d += 4) {
+    int this_pos = offset + d;
+    // check limit
+    if (this_pos < 0 || this_pos > bw) continue;
+    this_sad = vpx_vector_var(&ref[this_pos], src, bwl);
+    if (this_sad < best_sad) {
+      best_sad = this_sad;
+      center = this_pos;
+    }
+  }
+  offset = center;
+
+  for (d = -1; d <= 1; d += 2) {
+    int this_pos = offset + d;
+    // check limit
+    if (this_pos < 0 || this_pos > bw) continue;
+    this_sad = vpx_vector_var(&ref[this_pos], src, bwl);
+    if (this_sad < best_sad) {
+      best_sad = this_sad;
+      center = this_pos;
+    }
+  }
+
+  return (center - (bw >> 1));
+}
+
+static const MV search_pos[4] = {
+  { -1, 0 }, { 0, -1 }, { 0, 1 }, { 1, 0 },
+};
+
+unsigned int vp10_int_pro_motion_estimation(const VP10_COMP *cpi, MACROBLOCK *x,
+                                            BLOCK_SIZE bsize, int mi_row,
+                                            int mi_col) {
+  MACROBLOCKD *xd = &x->e_mbd;
+  MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
+  struct buf_2d backup_yv12[MAX_MB_PLANE] = { { 0, 0, 0, 0, 0 } };
+  DECLARE_ALIGNED(16, int16_t, hbuf[2 * MAX_SB_SIZE]);
+  DECLARE_ALIGNED(16, int16_t, vbuf[2 * MAX_SB_SIZE]);
+  DECLARE_ALIGNED(16, int16_t, src_hbuf[MAX_SB_SQUARE]);
+  DECLARE_ALIGNED(16, int16_t, src_vbuf[MAX_SB_SQUARE]);
+  int idx;
+  const int bw = 4 << b_width_log2_lookup[bsize];
+  const int bh = 4 << b_height_log2_lookup[bsize];
+  const int search_width = bw << 1;
+  const int search_height = bh << 1;
+  const int src_stride = x->plane[0].src.stride;
+  const int ref_stride = xd->plane[0].pre[0].stride;
+  uint8_t const *ref_buf, *src_buf;
+  MV *tmp_mv = &xd->mi[0]->mbmi.mv[0].as_mv;
+  unsigned int best_sad, tmp_sad, this_sad[4];
+  MV this_mv;
+  const int norm_factor = 3 + (bw >> 5);
+  const YV12_BUFFER_CONFIG *scaled_ref_frame =
+      vp10_get_scaled_ref_frame(cpi, mbmi->ref_frame[0]);
+
+  if (scaled_ref_frame) {
+    int i;
+    // Swap out the reference frame for a version that's been scaled to
+    // match the resolution of the current frame, allowing the existing
+    // motion search code to be used without additional modifications.
+    for (i = 0; i < MAX_MB_PLANE; i++) backup_yv12[i] = xd->plane[i].pre[0];
+    vp10_setup_pre_planes(xd, 0, scaled_ref_frame, mi_row, mi_col, NULL);
+  }
+
+#if CONFIG_VP9_HIGHBITDEPTH
+  {
+    unsigned int this_sad;
+    tmp_mv->row = 0;
+    tmp_mv->col = 0;
+    this_sad = cpi->fn_ptr[bsize].sdf(x->plane[0].src.buf, src_stride,
+                                      xd->plane[0].pre[0].buf, ref_stride);
+
+    if (scaled_ref_frame) {
+      int i;
+      for (i = 0; i < MAX_MB_PLANE; i++) xd->plane[i].pre[0] = backup_yv12[i];
+    }
+    return this_sad;
+  }
+#endif
+
+  // Set up prediction 1-D reference set
+  ref_buf = xd->plane[0].pre[0].buf - (bw >> 1);
+  for (idx = 0; idx < search_width; idx += 16) {
+    vpx_int_pro_row(&hbuf[idx], ref_buf, ref_stride, bh);
+    ref_buf += 16;
+  }
+
+  ref_buf = xd->plane[0].pre[0].buf - (bh >> 1) * ref_stride;
+  for (idx = 0; idx < search_height; ++idx) {
+    vbuf[idx] = vpx_int_pro_col(ref_buf, bw) >> norm_factor;
+    ref_buf += ref_stride;
+  }
+
+  // Set up src 1-D reference set
+  for (idx = 0; idx < bw; idx += 16) {
+    src_buf = x->plane[0].src.buf + idx;
+    vpx_int_pro_row(&src_hbuf[idx], src_buf, src_stride, bh);
+  }
+
+  src_buf = x->plane[0].src.buf;
+  for (idx = 0; idx < bh; ++idx) {
+    src_vbuf[idx] = vpx_int_pro_col(src_buf, bw) >> norm_factor;
+    src_buf += src_stride;
+  }
+
+  // Find the best match per 1-D search
+  tmp_mv->col = vector_match(hbuf, src_hbuf, b_width_log2_lookup[bsize]);
+  tmp_mv->row = vector_match(vbuf, src_vbuf, b_height_log2_lookup[bsize]);
+
+  this_mv = *tmp_mv;
+  src_buf = x->plane[0].src.buf;
+  ref_buf = xd->plane[0].pre[0].buf + this_mv.row * ref_stride + this_mv.col;
+  best_sad = cpi->fn_ptr[bsize].sdf(src_buf, src_stride, ref_buf, ref_stride);
+
+  {
+    const uint8_t *const pos[4] = {
+      ref_buf - ref_stride, ref_buf - 1, ref_buf + 1, ref_buf + ref_stride,
+    };
+
+    cpi->fn_ptr[bsize].sdx4df(src_buf, src_stride, pos, ref_stride, this_sad);
+  }
+
+  for (idx = 0; idx < 4; ++idx) {
+    if (this_sad[idx] < best_sad) {
+      best_sad = this_sad[idx];
+      tmp_mv->row = search_pos[idx].row + this_mv.row;
+      tmp_mv->col = search_pos[idx].col + this_mv.col;
+    }
+  }
+
+  if (this_sad[0] < this_sad[3])
+    this_mv.row -= 1;
+  else
+    this_mv.row += 1;
+
+  if (this_sad[1] < this_sad[2])
+    this_mv.col -= 1;
+  else
+    this_mv.col += 1;
+
+  ref_buf = xd->plane[0].pre[0].buf + this_mv.row * ref_stride + this_mv.col;
+
+  tmp_sad = cpi->fn_ptr[bsize].sdf(src_buf, src_stride, ref_buf, ref_stride);
+  if (best_sad > tmp_sad) {
+    *tmp_mv = this_mv;
+    best_sad = tmp_sad;
+  }
+
+  tmp_mv->row *= 8;
+  tmp_mv->col *= 8;
+
+  if (scaled_ref_frame) {
+    int i;
+    for (i = 0; i < MAX_MB_PLANE; i++) xd->plane[i].pre[0] = backup_yv12[i];
+  }
+
+  return best_sad;
+}
+
+/* do_refine: If last step (1-away) of n-step search doesn't pick the center
+              point as the best match, we will do a final 1-away diamond
+              refining search  */
+static int full_pixel_diamond(VP10_COMP *cpi, MACROBLOCK *x, MV *mvp_full,
+                              int step_param, int sadpb, int further_steps,
+                              int do_refine, int *cost_list,
+                              const vpx_variance_fn_ptr_t *fn_ptr,
+                              const MV *ref_mv) {
+  MV temp_mv;
+  int thissme, n, num00 = 0;
+  int bestsme = cpi->diamond_search_sad(x, &cpi->ss_cfg, mvp_full, &temp_mv,
+                                        step_param, sadpb, &n, fn_ptr, ref_mv);
+  if (bestsme < INT_MAX)
+    bestsme = vp10_get_mvpred_var(x, &temp_mv, ref_mv, fn_ptr, 1);
+  x->best_mv.as_mv = temp_mv;
+
+  // If there won't be more n-step search, check to see if refining search is
+  // needed.
+  if (n > further_steps) do_refine = 0;
+
+  while (n < further_steps) {
+    ++n;
+
+    if (num00) {
+      num00--;
+    } else {
+      thissme = cpi->diamond_search_sad(x, &cpi->ss_cfg, mvp_full, &temp_mv,
+                                        step_param + n, sadpb, &num00, fn_ptr,
+                                        ref_mv);
+      if (thissme < INT_MAX)
+        thissme = vp10_get_mvpred_var(x, &temp_mv, ref_mv, fn_ptr, 1);
+
+      // check to see if refining search is needed.
+      if (num00 > further_steps - n) do_refine = 0;
+
+      if (thissme < bestsme) {
+        bestsme = thissme;
+        x->best_mv.as_mv = temp_mv;
+      }
+    }
+  }
+
+  // final 1-away diamond refining search
+  if (do_refine) {
+    const int search_range = 8;
+    MV best_mv = x->best_mv.as_mv;
+    thissme = vp10_refining_search_sad(x, &best_mv, sadpb, search_range, fn_ptr,
+                                       ref_mv);
+    if (thissme < INT_MAX)
+      thissme = vp10_get_mvpred_var(x, &best_mv, ref_mv, fn_ptr, 1);
+    if (thissme < bestsme) {
+      bestsme = thissme;
+      x->best_mv.as_mv = best_mv;
+    }
+  }
+
+  // Return cost list.
+  if (cost_list) {
+    calc_int_cost_list(x, ref_mv, sadpb, fn_ptr, &x->best_mv.as_mv, cost_list);
+  }
+  return bestsme;
+}
+
+#define MIN_RANGE 7
+#define MAX_RANGE 256
+#define MIN_INTERVAL 1
+// Runs an limited range exhaustive mesh search using a pattern set
+// according to the encode speed profile.
+static int full_pixel_exhaustive(VP10_COMP *cpi, MACROBLOCK *x,
+                                 const MV *centre_mv_full, int sadpb,
+                                 int *cost_list,
+                                 const vpx_variance_fn_ptr_t *fn_ptr,
+                                 const MV *ref_mv, MV *dst_mv) {
+  const SPEED_FEATURES *const sf = &cpi->sf;
+  MV temp_mv = { centre_mv_full->row, centre_mv_full->col };
+  MV f_ref_mv = { ref_mv->row >> 3, ref_mv->col >> 3 };
+  int bestsme;
+  int i;
+  int interval = sf->mesh_patterns[0].interval;
+  int range = sf->mesh_patterns[0].range;
+  int baseline_interval_divisor;
+
+  // Keep track of number of exhaustive calls (this frame in this thread).
+  ++(*x->ex_search_count_ptr);
+
+  // Trap illegal values for interval and range for this function.
+  if ((range < MIN_RANGE) || (range > MAX_RANGE) || (interval < MIN_INTERVAL) ||
+      (interval > range))
+    return INT_MAX;
+
+  baseline_interval_divisor = range / interval;
+
+  // Check size of proposed first range against magnitude of the centre
+  // value used as a starting point.
+  range = VPXMAX(range, (5 * VPXMAX(abs(temp_mv.row), abs(temp_mv.col))) / 4);
+  range = VPXMIN(range, MAX_RANGE);
+  interval = VPXMAX(interval, range / baseline_interval_divisor);
+
+  // initial search
+  bestsme = exhuastive_mesh_search(x, &f_ref_mv, &temp_mv, range, interval,
+                                   sadpb, fn_ptr, &temp_mv);
+
+  if ((interval > MIN_INTERVAL) && (range > MIN_RANGE)) {
+    // Progressive searches with range and step size decreasing each time
+    // till we reach a step size of 1. Then break out.
+    for (i = 1; i < MAX_MESH_STEP; ++i) {
+      // First pass with coarser step and longer range
+      bestsme = exhuastive_mesh_search(
+          x, &f_ref_mv, &temp_mv, sf->mesh_patterns[i].range,
+          sf->mesh_patterns[i].interval, sadpb, fn_ptr, &temp_mv);
+
+      if (sf->mesh_patterns[i].interval == 1) break;
+    }
+  }
+
+  if (bestsme < INT_MAX)
+    bestsme = vp10_get_mvpred_var(x, &temp_mv, ref_mv, fn_ptr, 1);
+  *dst_mv = temp_mv;
+
+  // Return cost list.
+  if (cost_list) {
+    calc_int_cost_list(x, ref_mv, sadpb, fn_ptr, dst_mv, cost_list);
+  }
+  return bestsme;
+}
+
+int vp10_full_search_sad_c(const MACROBLOCK *x, const MV *ref_mv,
+                           int sad_per_bit, int distance,
+                           const vpx_variance_fn_ptr_t *fn_ptr,
+                           const MV *center_mv, MV *best_mv) {
+  int r, c;
+  const MACROBLOCKD *const xd = &x->e_mbd;
+  const struct buf_2d *const what = &x->plane[0].src;
+  const struct buf_2d *const in_what = &xd->plane[0].pre[0];
+  const int row_min = VPXMAX(ref_mv->row - distance, x->mv_row_min);
+  const int row_max = VPXMIN(ref_mv->row + distance, x->mv_row_max);
+  const int col_min = VPXMAX(ref_mv->col - distance, x->mv_col_min);
+  const int col_max = VPXMIN(ref_mv->col + distance, x->mv_col_max);
+  const MV fcenter_mv = { center_mv->row >> 3, center_mv->col >> 3 };
+  int best_sad =
+      fn_ptr->sdf(what->buf, what->stride, get_buf_from_mv(in_what, ref_mv),
+                  in_what->stride) +
+      mvsad_err_cost(x, ref_mv, &fcenter_mv, sad_per_bit);
+  *best_mv = *ref_mv;
+
+  for (r = row_min; r < row_max; ++r) {
+    for (c = col_min; c < col_max; ++c) {
+      const MV mv = { r, c };
+      const int sad =
+          fn_ptr->sdf(what->buf, what->stride, get_buf_from_mv(in_what, &mv),
+                      in_what->stride) +
+          mvsad_err_cost(x, &mv, &fcenter_mv, sad_per_bit);
+      if (sad < best_sad) {
+        best_sad = sad;
+        *best_mv = mv;
+      }
+    }
+  }
+  return best_sad;
+}
+
+int vp10_full_search_sadx3(const MACROBLOCK *x, const MV *ref_mv,
+                           int sad_per_bit, int distance,
+                           const vpx_variance_fn_ptr_t *fn_ptr,
+                           const MV *center_mv, MV *best_mv) {
+  int r;
+  const MACROBLOCKD *const xd = &x->e_mbd;
+  const struct buf_2d *const what = &x->plane[0].src;
+  const struct buf_2d *const in_what = &xd->plane[0].pre[0];
+  const int row_min = VPXMAX(ref_mv->row - distance, x->mv_row_min);
+  const int row_max = VPXMIN(ref_mv->row + distance, x->mv_row_max);
+  const int col_min = VPXMAX(ref_mv->col - distance, x->mv_col_min);
+  const int col_max = VPXMIN(ref_mv->col + distance, x->mv_col_max);
+  const MV fcenter_mv = { center_mv->row >> 3, center_mv->col >> 3 };
+  unsigned int best_sad =
+      fn_ptr->sdf(what->buf, what->stride, get_buf_from_mv(in_what, ref_mv),
+                  in_what->stride) +
+      mvsad_err_cost(x, ref_mv, &fcenter_mv, sad_per_bit);
+  *best_mv = *ref_mv;
+
+  for (r = row_min; r < row_max; ++r) {
+    int c = col_min;
+    const uint8_t *check_here = &in_what->buf[r * in_what->stride + c];
+
+    if (fn_ptr->sdx3f != NULL) {
+      while ((c + 2) < col_max) {
+        int i;
+        DECLARE_ALIGNED(16, uint32_t, sads[3]);
+
+        fn_ptr->sdx3f(what->buf, what->stride, check_here, in_what->stride,
+                      sads);
+
+        for (i = 0; i < 3; ++i) {
+          unsigned int sad = sads[i];
+          if (sad < best_sad) {
+            const MV mv = { r, c };
+            sad += mvsad_err_cost(x, &mv, &fcenter_mv, sad_per_bit);
+            if (sad < best_sad) {
+              best_sad = sad;
+              *best_mv = mv;
+            }
+          }
+          ++check_here;
+          ++c;
+        }
+      }
+    }
+
+    while (c < col_max) {
+      unsigned int sad =
+          fn_ptr->sdf(what->buf, what->stride, check_here, in_what->stride);
+      if (sad < best_sad) {
+        const MV mv = { r, c };
+        sad += mvsad_err_cost(x, &mv, &fcenter_mv, sad_per_bit);
+        if (sad < best_sad) {
+          best_sad = sad;
+          *best_mv = mv;
+        }
+      }
+      ++check_here;
+      ++c;
+    }
+  }
+
+  return best_sad;
+}
+
+int vp10_full_search_sadx8(const MACROBLOCK *x, const MV *ref_mv,
+                           int sad_per_bit, int distance,
+                           const vpx_variance_fn_ptr_t *fn_ptr,
+                           const MV *center_mv, MV *best_mv) {
+  int r;
+  const MACROBLOCKD *const xd = &x->e_mbd;
+  const struct buf_2d *const what = &x->plane[0].src;
+  const struct buf_2d *const in_what = &xd->plane[0].pre[0];
+  const int row_min = VPXMAX(ref_mv->row - distance, x->mv_row_min);
+  const int row_max = VPXMIN(ref_mv->row + distance, x->mv_row_max);
+  const int col_min = VPXMAX(ref_mv->col - distance, x->mv_col_min);
+  const int col_max = VPXMIN(ref_mv->col + distance, x->mv_col_max);
+  const MV fcenter_mv = { center_mv->row >> 3, center_mv->col >> 3 };
+  unsigned int best_sad =
+      fn_ptr->sdf(what->buf, what->stride, get_buf_from_mv(in_what, ref_mv),
+                  in_what->stride) +
+      mvsad_err_cost(x, ref_mv, &fcenter_mv, sad_per_bit);
+  *best_mv = *ref_mv;
+
+  for (r = row_min; r < row_max; ++r) {
+    int c = col_min;
+    const uint8_t *check_here = &in_what->buf[r * in_what->stride + c];
+
+    if (fn_ptr->sdx8f != NULL) {
+      while ((c + 7) < col_max) {
+        int i;
+        DECLARE_ALIGNED(16, uint32_t, sads[8]);
+
+        fn_ptr->sdx8f(what->buf, what->stride, check_here, in_what->stride,
+                      sads);
+
+        for (i = 0; i < 8; ++i) {
+          unsigned int sad = sads[i];
+          if (sad < best_sad) {
+            const MV mv = { r, c };
+            sad += mvsad_err_cost(x, &mv, &fcenter_mv, sad_per_bit);
+            if (sad < best_sad) {
+              best_sad = sad;
+              *best_mv = mv;
+            }
+          }
+          ++check_here;
+          ++c;
+        }
+      }
+    }
+
+    if (fn_ptr->sdx3f != NULL) {
+      while ((c + 2) < col_max) {
+        int i;
+        DECLARE_ALIGNED(16, uint32_t, sads[3]);
+
+        fn_ptr->sdx3f(what->buf, what->stride, check_here, in_what->stride,
+                      sads);
+
+        for (i = 0; i < 3; ++i) {
+          unsigned int sad = sads[i];
+          if (sad < best_sad) {
+            const MV mv = { r, c };
+            sad += mvsad_err_cost(x, &mv, &fcenter_mv, sad_per_bit);
+            if (sad < best_sad) {
+              best_sad = sad;
+              *best_mv = mv;
+            }
+          }
+          ++check_here;
+          ++c;
+        }
+      }
+    }
+
+    while (c < col_max) {
+      unsigned int sad =
+          fn_ptr->sdf(what->buf, what->stride, check_here, in_what->stride);
+      if (sad < best_sad) {
+        const MV mv = { r, c };
+        sad += mvsad_err_cost(x, &mv, &fcenter_mv, sad_per_bit);
+        if (sad < best_sad) {
+          best_sad = sad;
+          *best_mv = mv;
+        }
+      }
+      ++check_here;
+      ++c;
+    }
+  }
+
+  return best_sad;
+}
+
+int vp10_refining_search_sad(MACROBLOCK *x, MV *ref_mv, int error_per_bit,
+                             int search_range,
+                             const vpx_variance_fn_ptr_t *fn_ptr,
+                             const MV *center_mv) {
+  const MACROBLOCKD *const xd = &x->e_mbd;
+  const MV neighbors[4] = { { -1, 0 }, { 0, -1 }, { 0, 1 }, { 1, 0 } };
+  const struct buf_2d *const what = &x->plane[0].src;
+  const struct buf_2d *const in_what = &xd->plane[0].pre[0];
+  const MV fcenter_mv = { center_mv->row >> 3, center_mv->col >> 3 };
+  const uint8_t *best_address = get_buf_from_mv(in_what, ref_mv);
+  unsigned int best_sad =
+      fn_ptr->sdf(what->buf, what->stride, best_address, in_what->stride) +
+      mvsad_err_cost(x, ref_mv, &fcenter_mv, error_per_bit);
+  int i, j;
+
+  for (i = 0; i < search_range; i++) {
+    int best_site = -1;
+    const int all_in = ((ref_mv->row - 1) > x->mv_row_min) &
+                       ((ref_mv->row + 1) < x->mv_row_max) &
+                       ((ref_mv->col - 1) > x->mv_col_min) &
+                       ((ref_mv->col + 1) < x->mv_col_max);
+
+    if (all_in) {
+      unsigned int sads[4];
+      const uint8_t *const positions[4] = { best_address - in_what->stride,
+                                            best_address - 1, best_address + 1,
+                                            best_address + in_what->stride };
+
+      fn_ptr->sdx4df(what->buf, what->stride, positions, in_what->stride, sads);
+
+      for (j = 0; j < 4; ++j) {
+        if (sads[j] < best_sad) {
+          const MV mv = { ref_mv->row + neighbors[j].row,
+                          ref_mv->col + neighbors[j].col };
+          sads[j] += mvsad_err_cost(x, &mv, &fcenter_mv, error_per_bit);
+          if (sads[j] < best_sad) {
+            best_sad = sads[j];
+            best_site = j;
+          }
+        }
+      }
+    } else {
+      for (j = 0; j < 4; ++j) {
+        const MV mv = { ref_mv->row + neighbors[j].row,
+                        ref_mv->col + neighbors[j].col };
+
+        if (is_mv_in(x, &mv)) {
+          unsigned int sad =
+              fn_ptr->sdf(what->buf, what->stride,
+                          get_buf_from_mv(in_what, &mv), in_what->stride);
+          if (sad < best_sad) {
+            sad += mvsad_err_cost(x, &mv, &fcenter_mv, error_per_bit);
+            if (sad < best_sad) {
+              best_sad = sad;
+              best_site = j;
+            }
+          }
+        }
+      }
+    }
+
+    if (best_site == -1) {
+      break;
+    } else {
+      x->second_best_mv.as_mv = *ref_mv;
+      ref_mv->row += neighbors[best_site].row;
+      ref_mv->col += neighbors[best_site].col;
+      best_address = get_buf_from_mv(in_what, ref_mv);
+    }
+  }
+
+  return best_sad;
+}
+
+// This function is called when we do joint motion search in comp_inter_inter
+// mode.
+int vp10_refining_search_8p_c(MACROBLOCK *x, int error_per_bit,
+                              int search_range,
+                              const vpx_variance_fn_ptr_t *fn_ptr,
+                              const MV *center_mv, const uint8_t *second_pred) {
+  const MV neighbors[8] = { { -1, 0 },  { 0, -1 }, { 0, 1 },  { 1, 0 },
+                            { -1, -1 }, { 1, -1 }, { -1, 1 }, { 1, 1 } };
+  const MACROBLOCKD *const xd = &x->e_mbd;
+  const struct buf_2d *const what = &x->plane[0].src;
+  const struct buf_2d *const in_what = &xd->plane[0].pre[0];
+  const MV fcenter_mv = { center_mv->row >> 3, center_mv->col >> 3 };
+  MV *best_mv = &x->best_mv.as_mv;
+  unsigned int best_sad =
+      fn_ptr->sdaf(what->buf, what->stride, get_buf_from_mv(in_what, best_mv),
+                   in_what->stride, second_pred) +
+      mvsad_err_cost(x, best_mv, &fcenter_mv, error_per_bit);
+  int i, j;
+
+  for (i = 0; i < search_range; ++i) {
+    int best_site = -1;
+
+    for (j = 0; j < 8; ++j) {
+      const MV mv = { best_mv->row + neighbors[j].row,
+                      best_mv->col + neighbors[j].col };
+
+      if (is_mv_in(x, &mv)) {
+        unsigned int sad =
+            fn_ptr->sdaf(what->buf, what->stride, get_buf_from_mv(in_what, &mv),
+                         in_what->stride, second_pred);
+        if (sad < best_sad) {
+          sad += mvsad_err_cost(x, &mv, &fcenter_mv, error_per_bit);
+          if (sad < best_sad) {
+            best_sad = sad;
+            best_site = j;
+          }
+        }
+      }
+    }
+
+    if (best_site == -1) {
+      break;
+    } else {
+      best_mv->row += neighbors[best_site].row;
+      best_mv->col += neighbors[best_site].col;
+    }
+  }
+  return best_sad;
+}
+
+#define MIN_EX_SEARCH_LIMIT 128
+static int is_exhaustive_allowed(VP10_COMP *cpi, MACROBLOCK *x) {
+  const SPEED_FEATURES *const sf = &cpi->sf;
+  const int max_ex =
+      VPXMAX(MIN_EX_SEARCH_LIMIT,
+             (*x->m_search_count_ptr * sf->max_exaustive_pct) / 100);
+
+  return sf->allow_exhaustive_searches &&
+         (sf->exhaustive_searches_thresh < INT_MAX) &&
+         (*x->ex_search_count_ptr <= max_ex) && !cpi->rc.is_src_frame_alt_ref;
+}
+
+int vp10_full_pixel_search(VP10_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
+                           MV *mvp_full, int step_param, int error_per_bit,
+                           int *cost_list, const MV *ref_mv, int var_max,
+                           int rd) {
+  const SPEED_FEATURES *const sf = &cpi->sf;
+  const SEARCH_METHODS method = sf->mv.search_method;
+  vpx_variance_fn_ptr_t *fn_ptr = &cpi->fn_ptr[bsize];
+  int var = 0;
+
+  if (cost_list) {
+    cost_list[0] = INT_MAX;
+    cost_list[1] = INT_MAX;
+    cost_list[2] = INT_MAX;
+    cost_list[3] = INT_MAX;
+    cost_list[4] = INT_MAX;
+  }
+
+  // Keep track of number of searches (this frame in this thread).
+  ++(*x->m_search_count_ptr);
+
+  switch (method) {
+    case FAST_DIAMOND:
+      var = fast_dia_search(x, mvp_full, step_param, error_per_bit, 0,
+                            cost_list, fn_ptr, 1, ref_mv);
+      break;
+    case FAST_HEX:
+      var = fast_hex_search(x, mvp_full, step_param, error_per_bit, 0,
+                            cost_list, fn_ptr, 1, ref_mv);
+      break;
+    case HEX:
+      var = vp10_hex_search(x, mvp_full, step_param, error_per_bit, 1,
+                            cost_list, fn_ptr, 1, ref_mv);
+      break;
+    case SQUARE:
+      var = square_search(x, mvp_full, step_param, error_per_bit, 1, cost_list,
+                          fn_ptr, 1, ref_mv);
+      break;
+    case BIGDIA:
+      var = bigdia_search(x, mvp_full, step_param, error_per_bit, 1, cost_list,
+                          fn_ptr, 1, ref_mv);
+      break;
+    case NSTEP:
+      var = full_pixel_diamond(cpi, x, mvp_full, step_param, error_per_bit,
+                               MAX_MVSEARCH_STEPS - 1 - step_param, 1,
+                               cost_list, fn_ptr, ref_mv);
+
+      // Should we allow a follow on exhaustive search?
+      if (is_exhaustive_allowed(cpi, x)) {
+        int64_t exhuastive_thr = sf->exhaustive_searches_thresh;
+        exhuastive_thr >>=
+            8 - (b_width_log2_lookup[bsize] + b_height_log2_lookup[bsize]);
+
+        // Threshold variance for an exhaustive full search.
+        if (var > exhuastive_thr) {
+          int var_ex;
+          MV tmp_mv_ex;
+          var_ex =
+              full_pixel_exhaustive(cpi, x, &x->best_mv.as_mv, error_per_bit,
+                                    cost_list, fn_ptr, ref_mv, &tmp_mv_ex);
+
+          if (var_ex < var) {
+            var = var_ex;
+            x->best_mv.as_mv = tmp_mv_ex;
+          }
+        }
+      }
+      break;
+
+      break;
+    default: assert(0 && "Invalid search method.");
+  }
+
+  if (method != NSTEP && rd && var < var_max)
+    var = vp10_get_mvpred_var(x, &x->best_mv.as_mv, ref_mv, fn_ptr, 1);
+
+  return var;
+}
+
+#if CONFIG_EXT_INTER
+/* returns subpixel variance error function */
+#define DIST(r, c)                                                         \
+  vfp->msvf(pre(y, y_stride, r, c), y_stride, sp(c), sp(r), z, src_stride, \
+            mask, mask_stride, &sse)
+
+/* checks if (r, c) has better score than previous best */
+
+#define MVC(r, c)                                                         \
+  (mvcost                                                                 \
+       ? ((mvjcost[((r) != rr) * 2 + ((c) != rc)] + mvcost[0][((r)-rr)] + \
+           mvcost[1][((c)-rc)]) *                                         \
+              error_per_bit +                                             \
+          4096) >>                                                        \
+             13                                                           \
+       : 0)
+
+#define CHECK_BETTER(v, r, c)                             \
+  if (c >= minc && c <= maxc && r >= minr && r <= maxr) { \
+    thismse = (DIST(r, c));                               \
+    if ((v = MVC(r, c) + thismse) < besterr) {            \
+      besterr = v;                                        \
+      br = r;                                             \
+      bc = c;                                             \
+      *distortion = thismse;                              \
+      *sse1 = sse;                                        \
+    }                                                     \
+  } else {                                                \
+    v = INT_MAX;                                          \
+  }
+
+#undef CHECK_BETTER0
+#define CHECK_BETTER0(v, r, c) CHECK_BETTER(v, r, c)
+
+#undef CHECK_BETTER1
+#define CHECK_BETTER1(v, r, c)                                                 \
+  if (c >= minc && c <= maxc && r >= minr && r <= maxr) {                      \
+    thismse = upsampled_masked_pref_error(xd, mask, mask_stride, vfp, z,       \
+                                          src_stride, upre(y, y_stride, r, c), \
+                                          y_stride, w, h, &sse);               \
+    if ((v = MVC(r, c) + thismse) < besterr) {                                 \
+      besterr = v;                                                             \
+      br = r;                                                                  \
+      bc = c;                                                                  \
+      *distortion = thismse;                                                   \
+      *sse1 = sse;                                                             \
+    }                                                                          \
+  } else {                                                                     \
+    v = INT_MAX;                                                               \
+  }
+
+int vp10_find_best_masked_sub_pixel_tree(
+    const MACROBLOCK *x, const uint8_t *mask, int mask_stride, MV *bestmv,
+    const MV *ref_mv, int allow_hp, int error_per_bit,
+    const vpx_variance_fn_ptr_t *vfp, int forced_stop, int iters_per_step,
+    int *mvjcost, int *mvcost[2], int *distortion, unsigned int *sse1,
+    int is_second) {
+  const uint8_t *const z = x->plane[0].src.buf;
+  const int src_stride = x->plane[0].src.stride;
+  const MACROBLOCKD *xd = &x->e_mbd;
+  unsigned int besterr = INT_MAX;
+  unsigned int sse;
+  int thismse;
+  unsigned int whichdir;
+  unsigned int halfiters = iters_per_step;
+  unsigned int quarteriters = iters_per_step;
+  unsigned int eighthiters = iters_per_step;
+
+  const int y_stride = xd->plane[0].pre[is_second].stride;
+  const int offset = bestmv->row * y_stride + bestmv->col;
+  const uint8_t *const y = xd->plane[0].pre[is_second].buf;
+
+  int rr = ref_mv->row;
+  int rc = ref_mv->col;
+  int br = bestmv->row * 8;
+  int bc = bestmv->col * 8;
+  int hstep = 4;
+  const int minc = VPXMAX(x->mv_col_min * 8, ref_mv->col - MV_MAX);
+  const int maxc = VPXMIN(x->mv_col_max * 8, ref_mv->col + MV_MAX);
+  const int minr = VPXMAX(x->mv_row_min * 8, ref_mv->row - MV_MAX);
+  const int maxr = VPXMIN(x->mv_row_max * 8, ref_mv->row + MV_MAX);
+
+  int tr = br;
+  int tc = bc;
+
+  // central mv
+  bestmv->row *= 8;
+  bestmv->col *= 8;
+
+  // calculate central point error
+  besterr =
+      vfp->mvf(y + offset, y_stride, z, src_stride, mask, mask_stride, sse1);
+  *distortion = besterr;
+  besterr += mv_err_cost(bestmv, ref_mv, mvjcost, mvcost, error_per_bit);
+
+  // 1/2 pel
+  FIRST_LEVEL_CHECKS;
+  if (halfiters > 1) {
+    SECOND_LEVEL_CHECKS;
+  }
+  tr = br;
+  tc = bc;
+
+  // Note forced_stop: 0 - full, 1 - qtr only, 2 - half only
+  if (forced_stop != 2) {
+    hstep >>= 1;
+    FIRST_LEVEL_CHECKS;
+    if (quarteriters > 1) {
+      SECOND_LEVEL_CHECKS;
+    }
+    tr = br;
+    tc = bc;
+  }
+
+  if (allow_hp && vp10_use_mv_hp(ref_mv) && forced_stop == 0) {
+    hstep >>= 1;
+    FIRST_LEVEL_CHECKS;
+    if (eighthiters > 1) {
+      SECOND_LEVEL_CHECKS;
+    }
+    tr = br;
+    tc = bc;
+  }
+  // These lines insure static analysis doesn't warn that
+  // tr and tc aren't used after the above point.
+  (void)tr;
+  (void)tc;
+
+  bestmv->row = br;
+  bestmv->col = bc;
+
+  if ((abs(bestmv->col - ref_mv->col) > (MAX_FULL_PEL_VAL << 3)) ||
+      (abs(bestmv->row - ref_mv->row) > (MAX_FULL_PEL_VAL << 3)))
+    return INT_MAX;
+
+  return besterr;
+}
+
+static unsigned int setup_masked_center_error(
+    const uint8_t *mask, int mask_stride, const MV *bestmv, const MV *ref_mv,
+    int error_per_bit, const vpx_variance_fn_ptr_t *vfp,
+    const uint8_t *const src, const int src_stride, const uint8_t *const y,
+    int y_stride, int offset, int *mvjcost, int *mvcost[2], unsigned int *sse1,
+    int *distortion) {
+  unsigned int besterr;
+  besterr =
+      vfp->mvf(y + offset, y_stride, src, src_stride, mask, mask_stride, sse1);
+  *distortion = besterr;
+  besterr += mv_err_cost(bestmv, ref_mv, mvjcost, mvcost, error_per_bit);
+  return besterr;
+}
+
+static int upsampled_masked_pref_error(const MACROBLOCKD *xd,
+                                       const uint8_t *mask, int mask_stride,
+                                       const vpx_variance_fn_ptr_t *vfp,
+                                       const uint8_t *const src,
+                                       const int src_stride,
+                                       const uint8_t *const y, int y_stride,
+                                       int w, int h, unsigned int *sse) {
+  unsigned int besterr;
+#if CONFIG_VP9_HIGHBITDEPTH
+  if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+    DECLARE_ALIGNED(16, uint16_t, pred16[MAX_SB_SQUARE]);
+    vpx_highbd_upsampled_pred(pred16, w, h, y, y_stride);
+
+    besterr = vfp->mvf(CONVERT_TO_BYTEPTR(pred16), w, src, src_stride, mask,
+                       mask_stride, sse);
+  } else {
+    DECLARE_ALIGNED(16, uint8_t, pred[MAX_SB_SQUARE]);
+#else
+  DECLARE_ALIGNED(16, uint8_t, pred[MAX_SB_SQUARE]);
+  (void)xd;
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+    vpx_upsampled_pred(pred, w, h, y, y_stride);
+
+    besterr = vfp->mvf(pred, w, src, src_stride, mask, mask_stride, sse);
+#if CONFIG_VP9_HIGHBITDEPTH
+  }
+#endif
+  return besterr;
+}
+
+static unsigned int upsampled_setup_masked_center_error(
+    const MACROBLOCKD *xd, const uint8_t *mask, int mask_stride,
+    const MV *bestmv, const MV *ref_mv, int error_per_bit,
+    const vpx_variance_fn_ptr_t *vfp, const uint8_t *const src,
+    const int src_stride, const uint8_t *const y, int y_stride, int w, int h,
+    int offset, int *mvjcost, int *mvcost[2], unsigned int *sse1,
+    int *distortion) {
+  unsigned int besterr =
+      upsampled_masked_pref_error(xd, mask, mask_stride, vfp, src, src_stride,
+                                  y + offset, y_stride, w, h, sse1);
+  *distortion = besterr;
+  besterr += mv_err_cost(bestmv, ref_mv, mvjcost, mvcost, error_per_bit);
+  return besterr;
+}
+
+int vp10_find_best_masked_sub_pixel_tree_up(
+    VP10_COMP *cpi, MACROBLOCK *x, const uint8_t *mask, int mask_stride,
+    int mi_row, int mi_col, MV *bestmv, const MV *ref_mv, int allow_hp,
+    int error_per_bit, const vpx_variance_fn_ptr_t *vfp, int forced_stop,
+    int iters_per_step, int *mvjcost, int *mvcost[2], int *distortion,
+    unsigned int *sse1, int is_second, int use_upsampled_ref) {
+  const uint8_t *const z = x->plane[0].src.buf;
+  const uint8_t *const src_address = z;
+  const int src_stride = x->plane[0].src.stride;
+  MACROBLOCKD *xd = &x->e_mbd;
+  struct macroblockd_plane *const pd = &xd->plane[0];
+  MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
+  unsigned int besterr = INT_MAX;
+  unsigned int sse;
+  unsigned int thismse;
+
+  int rr = ref_mv->row;
+  int rc = ref_mv->col;
+  int br = bestmv->row * 8;
+  int bc = bestmv->col * 8;
+  int hstep = 4;
+  int iter;
+  int round = 3 - forced_stop;
+  const int minc = VPXMAX(x->mv_col_min * 8, ref_mv->col - MV_MAX);
+  const int maxc = VPXMIN(x->mv_col_max * 8, ref_mv->col + MV_MAX);
+  const int minr = VPXMAX(x->mv_row_min * 8, ref_mv->row - MV_MAX);
+  const int maxr = VPXMIN(x->mv_row_max * 8, ref_mv->row + MV_MAX);
+  int tr = br;
+  int tc = bc;
+  const MV *search_step = search_step_table;
+  int idx, best_idx = -1;
+  unsigned int cost_array[5];
+  int kr, kc;
+  const int w = 4 * num_4x4_blocks_wide_lookup[mbmi->sb_type];
+  const int h = 4 * num_4x4_blocks_high_lookup[mbmi->sb_type];
+  int offset;
+  int y_stride;
+  const uint8_t *y;
+
+  const struct buf_2d backup_pred = pd->pre[is_second];
+  if (use_upsampled_ref) {
+    int ref = xd->mi[0]->mbmi.ref_frame[is_second];
+    const YV12_BUFFER_CONFIG *upsampled_ref = get_upsampled_ref(cpi, ref);
+    setup_pred_plane(&pd->pre[is_second], upsampled_ref->y_buffer,
+                     upsampled_ref->y_crop_width, upsampled_ref->y_crop_height,
+                     upsampled_ref->y_stride, (mi_row << 3), (mi_col << 3),
+                     NULL, pd->subsampling_x, pd->subsampling_y);
+  }
+  y = pd->pre[is_second].buf;
+  y_stride = pd->pre[is_second].stride;
+  offset = bestmv->row * y_stride + bestmv->col;
+
+  if (!(allow_hp && vp10_use_mv_hp(ref_mv)))
+    if (round == 3) round = 2;
+
+  bestmv->row *= 8;
+  bestmv->col *= 8;
+
+  // use_upsampled_ref can be 0 or 1
+  if (use_upsampled_ref)
+    besterr = upsampled_setup_masked_center_error(
+        xd, mask, mask_stride, bestmv, ref_mv, error_per_bit, vfp, z,
+        src_stride, y, y_stride, w, h, (offset << 3), mvjcost, mvcost, sse1,
+        distortion);
+  else
+    besterr = setup_masked_center_error(
+        mask, mask_stride, bestmv, ref_mv, error_per_bit, vfp, z, src_stride, y,
+        y_stride, offset, mvjcost, mvcost, sse1, distortion);
+
+  for (iter = 0; iter < round; ++iter) {
+    // Check vertical and horizontal sub-pixel positions.
+    for (idx = 0; idx < 4; ++idx) {
+      tr = br + search_step[idx].row;
+      tc = bc + search_step[idx].col;
+      if (tc >= minc && tc <= maxc && tr >= minr && tr <= maxr) {
+        MV this_mv = { tr, tc };
+
+        if (use_upsampled_ref) {
+          const uint8_t *const pre_address = y + tr * y_stride + tc;
+
+          thismse = upsampled_masked_pref_error(
+              xd, mask, mask_stride, vfp, src_address, src_stride, pre_address,
+              y_stride, w, h, &sse);
+        } else {
+          const uint8_t *const pre_address =
+              y + (tr >> 3) * y_stride + (tc >> 3);
+          thismse = vfp->msvf(pre_address, y_stride, sp(tc), sp(tr),
+                              src_address, src_stride, mask, mask_stride, &sse);
+        }
+
+        cost_array[idx] = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost,
+                                                mvcost, error_per_bit);
+
+        if (cost_array[idx] < besterr) {
+          best_idx = idx;
+          besterr = cost_array[idx];
+          *distortion = thismse;
+          *sse1 = sse;
+        }
+      } else {
+        cost_array[idx] = INT_MAX;
+      }
+    }
+
+    // Check diagonal sub-pixel position
+    kc = (cost_array[0] <= cost_array[1] ? -hstep : hstep);
+    kr = (cost_array[2] <= cost_array[3] ? -hstep : hstep);
+
+    tc = bc + kc;
+    tr = br + kr;
+    if (tc >= minc && tc <= maxc && tr >= minr && tr <= maxr) {
+      MV this_mv = { tr, tc };
+
+      if (use_upsampled_ref) {
+        const uint8_t *const pre_address = y + tr * y_stride + tc;
+
+        thismse = upsampled_masked_pref_error(
+            xd, mask, mask_stride, vfp, src_address, src_stride, pre_address,
+            y_stride, w, h, &sse);
+      } else {
+        const uint8_t *const pre_address = y + (tr >> 3) * y_stride + (tc >> 3);
+
+        thismse = vfp->msvf(pre_address, y_stride, sp(tc), sp(tr), src_address,
+                            src_stride, mask, mask_stride, &sse);
+      }
+
+      cost_array[4] = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost,
+                                            error_per_bit);
+
+      if (cost_array[4] < besterr) {
+        best_idx = 4;
+        besterr = cost_array[4];
+        *distortion = thismse;
+        *sse1 = sse;
+      }
+    } else {
+      cost_array[idx] = INT_MAX;
+    }
+
+    if (best_idx < 4 && best_idx >= 0) {
+      br += search_step[best_idx].row;
+      bc += search_step[best_idx].col;
+    } else if (best_idx == 4) {
+      br = tr;
+      bc = tc;
+    }
+
+    if (iters_per_step > 1 && best_idx != -1) {
+      if (use_upsampled_ref) {
+        SECOND_LEVEL_CHECKS_BEST(1);
+      } else {
+        SECOND_LEVEL_CHECKS_BEST(0);
+      }
+    }
+
+    tr = br;
+    tc = bc;
+
+    search_step += 4;
+    hstep >>= 1;
+    best_idx = -1;
+  }
+
+  // These lines insure static analysis doesn't warn that
+  // tr and tc aren't used after the above point.
+  (void)tr;
+  (void)tc;
+
+  bestmv->row = br;
+  bestmv->col = bc;
+
+  if (use_upsampled_ref) {
+    pd->pre[is_second] = backup_pred;
+  }
+
+  if ((abs(bestmv->col - ref_mv->col) > (MAX_FULL_PEL_VAL << 3)) ||
+      (abs(bestmv->row - ref_mv->row) > (MAX_FULL_PEL_VAL << 3)))
+    return INT_MAX;
+
+  return besterr;
+}
+
+#undef DIST
+#undef MVC
+#undef CHECK_BETTER
+
+static int get_masked_mvpred_var(const MACROBLOCK *x, const uint8_t *mask,
+                                 int mask_stride, const MV *best_mv,
+                                 const MV *center_mv,
+                                 const vpx_variance_fn_ptr_t *vfp,
+                                 int use_mvcost, int is_second) {
+  const MACROBLOCKD *const xd = &x->e_mbd;
+  const struct buf_2d *const what = &x->plane[0].src;
+  const struct buf_2d *const in_what = &xd->plane[0].pre[is_second];
+  const MV mv = { best_mv->row * 8, best_mv->col * 8 };
+  unsigned int unused;
+
+  return vfp->mvf(what->buf, what->stride, get_buf_from_mv(in_what, best_mv),
+                  in_what->stride, mask, mask_stride, &unused) +
+         (use_mvcost ? mv_err_cost(&mv, center_mv, x->nmvjointcost, x->mvcost,
+                                   x->errorperbit)
+                     : 0);
+}
+
+int masked_refining_search_sad(const MACROBLOCK *x, const uint8_t *mask,
+                               int mask_stride, MV *ref_mv, int error_per_bit,
+                               int search_range,
+                               const vpx_variance_fn_ptr_t *fn_ptr,
+                               const MV *center_mv, int is_second) {
+  const MV neighbors[4] = { { -1, 0 }, { 0, -1 }, { 0, 1 }, { 1, 0 } };
+  const MACROBLOCKD *const xd = &x->e_mbd;
+  const struct buf_2d *const what = &x->plane[0].src;
+  const struct buf_2d *const in_what = &xd->plane[0].pre[is_second];
+  const MV fcenter_mv = { center_mv->row >> 3, center_mv->col >> 3 };
+  unsigned int best_sad =
+      fn_ptr->msdf(what->buf, what->stride, get_buf_from_mv(in_what, ref_mv),
+                   in_what->stride, mask, mask_stride) +
+      mvsad_err_cost(x, ref_mv, &fcenter_mv, error_per_bit);
+  int i, j;
+
+  for (i = 0; i < search_range; i++) {
+    int best_site = -1;
+
+    for (j = 0; j < 4; j++) {
+      const MV mv = { ref_mv->row + neighbors[j].row,
+                      ref_mv->col + neighbors[j].col };
+      if (is_mv_in(x, &mv)) {
+        unsigned int sad =
+            fn_ptr->msdf(what->buf, what->stride, get_buf_from_mv(in_what, &mv),
+                         in_what->stride, mask, mask_stride);
+        if (sad < best_sad) {
+          sad += mvsad_err_cost(x, &mv, &fcenter_mv, error_per_bit);
+          if (sad < best_sad) {
+            best_sad = sad;
+            best_site = j;
+          }
+        }
+      }
+    }
+
+    if (best_site == -1) {
+      break;
+    } else {
+      ref_mv->row += neighbors[best_site].row;
+      ref_mv->col += neighbors[best_site].col;
+    }
+  }
+  return best_sad;
+}
+
+int masked_diamond_search_sad(const MACROBLOCK *x,
+                              const search_site_config *cfg,
+                              const uint8_t *mask, int mask_stride, MV *ref_mv,
+                              MV *best_mv, int search_param, int sad_per_bit,
+                              int *num00, const vpx_variance_fn_ptr_t *fn_ptr,
+                              const MV *center_mv, int is_second) {
+  const MACROBLOCKD *const xd = &x->e_mbd;
+  const struct buf_2d *const what = &x->plane[0].src;
+  const struct buf_2d *const in_what = &xd->plane[0].pre[is_second];
+  // search_param determines the length of the initial step and hence the number
+  // of iterations
+  // 0 = initial step (MAX_FIRST_STEP) pel : 1 = (MAX_FIRST_STEP/2) pel, 2 =
+  // (MAX_FIRST_STEP/4) pel... etc.
+  const search_site *const ss = &cfg->ss[search_param * cfg->searches_per_step];
+  const int tot_steps = (cfg->ss_count / cfg->searches_per_step) - search_param;
+  const MV fcenter_mv = { center_mv->row >> 3, center_mv->col >> 3 };
+  const uint8_t *best_address, *in_what_ref;
+  int best_sad = INT_MAX;
+  int best_site = 0;
+  int last_site = 0;
+  int i, j, step;
+
+  clamp_mv(ref_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max);
+  in_what_ref = get_buf_from_mv(in_what, ref_mv);
+  best_address = in_what_ref;
+  *num00 = 0;
+  *best_mv = *ref_mv;
+
+  // Check the starting position
+  best_sad = fn_ptr->msdf(what->buf, what->stride, best_address,
+                          in_what->stride, mask, mask_stride) +
+             mvsad_err_cost(x, best_mv, &fcenter_mv, sad_per_bit);
+
+  i = 1;
+
+  for (step = 0; step < tot_steps; step++) {
+    for (j = 0; j < cfg->searches_per_step; j++) {
+      const MV mv = { best_mv->row + ss[i].mv.row,
+                      best_mv->col + ss[i].mv.col };
+      if (is_mv_in(x, &mv)) {
+        int sad =
+            fn_ptr->msdf(what->buf, what->stride, best_address + ss[i].offset,
+                         in_what->stride, mask, mask_stride);
+        if (sad < best_sad) {
+          sad += mvsad_err_cost(x, &mv, &fcenter_mv, sad_per_bit);
+          if (sad < best_sad) {
+            best_sad = sad;
+            best_site = i;
+          }
+        }
+      }
+
+      i++;
+    }
+
+    if (best_site != last_site) {
+      best_mv->row += ss[best_site].mv.row;
+      best_mv->col += ss[best_site].mv.col;
+      best_address += ss[best_site].offset;
+      last_site = best_site;
+#if defined(NEW_DIAMOND_SEARCH)
+      while (1) {
+        const MV this_mv = { best_mv->row + ss[best_site].mv.row,
+                             best_mv->col + ss[best_site].mv.col };
+        if (is_mv_in(x, &this_mv)) {
+          int sad = fn_ptr->msdf(what->buf, what->stride,
+                                 best_address + ss[best_site].offset,
+                                 in_what->stride, mask, mask_stride);
+          if (sad < best_sad) {
+            sad += mvsad_err_cost(x, &this_mv, &fcenter_mv, sad_per_bit);
+            if (sad < best_sad) {
+              best_sad = sad;
+              best_mv->row += ss[best_site].mv.row;
+              best_mv->col += ss[best_site].mv.col;
+              best_address += ss[best_site].offset;
+              continue;
+            }
+          }
+        }
+        break;
+      }
+#endif
+    } else if (best_address == in_what_ref) {
+      (*num00)++;
+    }
+  }
+  return best_sad;
+}
+
+int vp10_masked_full_pixel_diamond(const VP10_COMP *cpi, MACROBLOCK *x,
+                                   const uint8_t *mask, int mask_stride,
+                                   MV *mvp_full, int step_param, int sadpb,
+                                   int further_steps, int do_refine,
+                                   const vpx_variance_fn_ptr_t *fn_ptr,
+                                   const MV *ref_mv, MV *dst_mv,
+                                   int is_second) {
+  MV temp_mv;
+  int thissme, n, num00 = 0;
+  int bestsme = masked_diamond_search_sad(x, &cpi->ss_cfg, mask, mask_stride,
+                                          mvp_full, &temp_mv, step_param, sadpb,
+                                          &n, fn_ptr, ref_mv, is_second);
+  if (bestsme < INT_MAX)
+    bestsme = get_masked_mvpred_var(x, mask, mask_stride, &temp_mv, ref_mv,
+                                    fn_ptr, 1, is_second);
+  *dst_mv = temp_mv;
+
+  // If there won't be more n-step search, check to see if refining search is
+  // needed.
+  if (n > further_steps) do_refine = 0;
+
+  while (n < further_steps) {
+    ++n;
+
+    if (num00) {
+      num00--;
+    } else {
+      thissme = masked_diamond_search_sad(
+          x, &cpi->ss_cfg, mask, mask_stride, mvp_full, &temp_mv,
+          step_param + n, sadpb, &num00, fn_ptr, ref_mv, is_second);
+      if (thissme < INT_MAX)
+        thissme = get_masked_mvpred_var(x, mask, mask_stride, &temp_mv, ref_mv,
+                                        fn_ptr, 1, is_second);
+
+      // check to see if refining search is needed.
+      if (num00 > further_steps - n) do_refine = 0;
+
+      if (thissme < bestsme) {
+        bestsme = thissme;
+        *dst_mv = temp_mv;
+      }
+    }
+  }
+
+  // final 1-away diamond refining search
+  if (do_refine) {
+    const int search_range = 8;
+    MV best_mv = *dst_mv;
+    thissme =
+        masked_refining_search_sad(x, mask, mask_stride, &best_mv, sadpb,
+                                   search_range, fn_ptr, ref_mv, is_second);
+    if (thissme < INT_MAX)
+      thissme = get_masked_mvpred_var(x, mask, mask_stride, &best_mv, ref_mv,
+                                      fn_ptr, 1, is_second);
+    if (thissme < bestsme) {
+      bestsme = thissme;
+      *dst_mv = best_mv;
+    }
+  }
+  return bestsme;
+}
+#endif  // CONFIG_EXT_INTER
+
+#if CONFIG_OBMC
+/* returns subpixel variance error function */
+#define DIST(r, c) \
+  vfp->osvf(pre(y, y_stride, r, c), y_stride, sp(c), sp(r), z, mask, &sse)
+
+/* checks if (r, c) has better score than previous best */
+#define MVC(r, c)                                                         \
+  (mvcost                                                                 \
+       ? ((mvjcost[((r) != rr) * 2 + ((c) != rc)] + mvcost[0][((r)-rr)] + \
+           mvcost[1][((c)-rc)]) *                                         \
+              error_per_bit +                                             \
+          4096) >>                                                        \
+             13                                                           \
+       : 0)
+
+#define CHECK_BETTER(v, r, c)                             \
+  if (c >= minc && c <= maxc && r >= minr && r <= maxr) { \
+    thismse = (DIST(r, c));                               \
+    if ((v = MVC(r, c) + thismse) < besterr) {            \
+      besterr = v;                                        \
+      br = r;                                             \
+      bc = c;                                             \
+      *distortion = thismse;                              \
+      *sse1 = sse;                                        \
+    }                                                     \
+  } else {                                                \
+    v = INT_MAX;                                          \
+  }
+
+#undef CHECK_BETTER0
+#define CHECK_BETTER0(v, r, c) CHECK_BETTER(v, r, c)
+
+#undef CHECK_BETTER1
+#define CHECK_BETTER1(v, r, c)                                            \
+  if (c >= minc && c <= maxc && r >= minr && r <= maxr) {                 \
+    thismse = upsampled_obmc_pref_error(                                  \
+        xd, mask, vfp, z, upre(y, y_stride, r, c), y_stride, w, h, &sse); \
+    if ((v = MVC(r, c) + thismse) < besterr) {                            \
+      besterr = v;                                                        \
+      br = r;                                                             \
+      bc = c;                                                             \
+      *distortion = thismse;                                              \
+      *sse1 = sse;                                                        \
+    }                                                                     \
+  } else {                                                                \
+    v = INT_MAX;                                                          \
+  }
+
+static unsigned int setup_obmc_center_error(
+    const int32_t *mask, const MV *bestmv, const MV *ref_mv, int error_per_bit,
+    const vpx_variance_fn_ptr_t *vfp, const int32_t *const wsrc,
+    const uint8_t *const y, int y_stride, int offset, int *mvjcost,
+    int *mvcost[2], unsigned int *sse1, int *distortion) {
+  unsigned int besterr;
+  besterr = vfp->ovf(y + offset, y_stride, wsrc, mask, sse1);
+  *distortion = besterr;
+  besterr += mv_err_cost(bestmv, ref_mv, mvjcost, mvcost, error_per_bit);
+  return besterr;
+}
+
+static int upsampled_obmc_pref_error(const MACROBLOCKD *xd, const int32_t *mask,
+                                     const vpx_variance_fn_ptr_t *vfp,
+                                     const int32_t *const wsrc,
+                                     const uint8_t *const y, int y_stride,
+                                     int w, int h, unsigned int *sse) {
+  unsigned int besterr;
+#if CONFIG_VP9_HIGHBITDEPTH
+  if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+    DECLARE_ALIGNED(16, uint16_t, pred16[MAX_SB_SQUARE]);
+    vpx_highbd_upsampled_pred(pred16, w, h, y, y_stride);
+
+    besterr = vfp->ovf(CONVERT_TO_BYTEPTR(pred16), w, wsrc, mask, sse);
+  } else {
+    DECLARE_ALIGNED(16, uint8_t, pred[MAX_SB_SQUARE]);
+#else
+  DECLARE_ALIGNED(16, uint8_t, pred[MAX_SB_SQUARE]);
+  (void)xd;
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+    vpx_upsampled_pred(pred, w, h, y, y_stride);
+
+    besterr = vfp->ovf(pred, w, wsrc, mask, sse);
+#if CONFIG_VP9_HIGHBITDEPTH
+  }
+#endif
+  return besterr;
+}
+
+static unsigned int upsampled_setup_obmc_center_error(
+    const MACROBLOCKD *xd, const int32_t *mask, const MV *bestmv,
+    const MV *ref_mv, int error_per_bit, const vpx_variance_fn_ptr_t *vfp,
+    const int32_t *const wsrc, const uint8_t *const y, int y_stride, int w,
+    int h, int offset, int *mvjcost, int *mvcost[2], unsigned int *sse1,
+    int *distortion) {
+  unsigned int besterr = upsampled_obmc_pref_error(
+      xd, mask, vfp, wsrc, y + offset, y_stride, w, h, sse1);
+  *distortion = besterr;
+  besterr += mv_err_cost(bestmv, ref_mv, mvjcost, mvcost, error_per_bit);
+  return besterr;
+}
+
+int vp10_find_best_obmc_sub_pixel_tree_up(
+    VP10_COMP *cpi, MACROBLOCK *x, const int32_t *wsrc, const int32_t *mask,
+    int mi_row, int mi_col, MV *bestmv, const MV *ref_mv, int allow_hp,
+    int error_per_bit, const vpx_variance_fn_ptr_t *vfp, int forced_stop,
+    int iters_per_step, int *mvjcost, int *mvcost[2], int *distortion,
+    unsigned int *sse1, int is_second, int use_upsampled_ref) {
+  const int *const z = wsrc;
+  const int *const src_address = z;
+  MACROBLOCKD *xd = &x->e_mbd;
+  struct macroblockd_plane *const pd = &xd->plane[0];
+  MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
+  unsigned int besterr = INT_MAX;
+  unsigned int sse;
+  unsigned int thismse;
+
+  int rr = ref_mv->row;
+  int rc = ref_mv->col;
+  int br = bestmv->row * 8;
+  int bc = bestmv->col * 8;
+  int hstep = 4;
+  int iter;
+  int round = 3 - forced_stop;
+  const int minc = VPXMAX(x->mv_col_min * 8, ref_mv->col - MV_MAX);
+  const int maxc = VPXMIN(x->mv_col_max * 8, ref_mv->col + MV_MAX);
+  const int minr = VPXMAX(x->mv_row_min * 8, ref_mv->row - MV_MAX);
+  const int maxr = VPXMIN(x->mv_row_max * 8, ref_mv->row + MV_MAX);
+  int tr = br;
+  int tc = bc;
+  const MV *search_step = search_step_table;
+  int idx, best_idx = -1;
+  unsigned int cost_array[5];
+  int kr, kc;
+  const int w = 4 * num_4x4_blocks_wide_lookup[mbmi->sb_type];
+  const int h = 4 * num_4x4_blocks_high_lookup[mbmi->sb_type];
+  int offset;
+  int y_stride;
+  const uint8_t *y;
+
+  const struct buf_2d backup_pred = pd->pre[is_second];
+  if (use_upsampled_ref) {
+    int ref = xd->mi[0]->mbmi.ref_frame[is_second];
+    const YV12_BUFFER_CONFIG *upsampled_ref = get_upsampled_ref(cpi, ref);
+    setup_pred_plane(&pd->pre[is_second], upsampled_ref->y_buffer,
+                     upsampled_ref->y_crop_width, upsampled_ref->y_crop_height,
+                     upsampled_ref->y_stride, (mi_row << 3), (mi_col << 3),
+                     NULL, pd->subsampling_x, pd->subsampling_y);
+  }
+  y = pd->pre[is_second].buf;
+  y_stride = pd->pre[is_second].stride;
+  offset = bestmv->row * y_stride + bestmv->col;
+
+  if (!(allow_hp && vp10_use_mv_hp(ref_mv)))
+    if (round == 3) round = 2;
+
+  bestmv->row *= 8;
+  bestmv->col *= 8;
+  // use_upsampled_ref can be 0 or 1
+  if (use_upsampled_ref)
+    besterr = upsampled_setup_obmc_center_error(
+        xd, mask, bestmv, ref_mv, error_per_bit, vfp, z, y, y_stride, w, h,
+        (offset << 3), mvjcost, mvcost, sse1, distortion);
+  else
+    besterr = setup_obmc_center_error(mask, bestmv, ref_mv, error_per_bit, vfp,
+                                      z, y, y_stride, offset, mvjcost, mvcost,
+                                      sse1, distortion);
+
+  for (iter = 0; iter < round; ++iter) {
+    // Check vertical and horizontal sub-pixel positions.
+    for (idx = 0; idx < 4; ++idx) {
+      tr = br + search_step[idx].row;
+      tc = bc + search_step[idx].col;
+      if (tc >= minc && tc <= maxc && tr >= minr && tr <= maxr) {
+        MV this_mv = { tr, tc };
+
+        if (use_upsampled_ref) {
+          const uint8_t *const pre_address = y + tr * y_stride + tc;
+
+          thismse = upsampled_obmc_pref_error(
+              xd, mask, vfp, src_address, pre_address, y_stride, w, h, &sse);
+        } else {
+          const uint8_t *const pre_address =
+              y + (tr >> 3) * y_stride + (tc >> 3);
+          thismse = vfp->osvf(pre_address, y_stride, sp(tc), sp(tr),
+                              src_address, mask, &sse);
+        }
+
+        cost_array[idx] = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost,
+                                                mvcost, error_per_bit);
+        if (cost_array[idx] < besterr) {
+          best_idx = idx;
+          besterr = cost_array[idx];
+          *distortion = thismse;
+          *sse1 = sse;
+        }
+      } else {
+        cost_array[idx] = INT_MAX;
+      }
+    }
+
+    // Check diagonal sub-pixel position
+    kc = (cost_array[0] <= cost_array[1] ? -hstep : hstep);
+    kr = (cost_array[2] <= cost_array[3] ? -hstep : hstep);
+
+    tc = bc + kc;
+    tr = br + kr;
+    if (tc >= minc && tc <= maxc && tr >= minr && tr <= maxr) {
+      MV this_mv = { tr, tc };
+
+      if (use_upsampled_ref) {
+        const uint8_t *const pre_address = y + tr * y_stride + tc;
+
+        thismse = upsampled_obmc_pref_error(xd, mask, vfp, src_address,
+                                            pre_address, y_stride, w, h, &sse);
+      } else {
+        const uint8_t *const pre_address = y + (tr >> 3) * y_stride + (tc >> 3);
+
+        thismse = vfp->osvf(pre_address, y_stride, sp(tc), sp(tr), src_address,
+                            mask, &sse);
+      }
+
+      cost_array[4] = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost,
+                                            error_per_bit);
+
+      if (cost_array[4] < besterr) {
+        best_idx = 4;
+        besterr = cost_array[4];
+        *distortion = thismse;
+        *sse1 = sse;
+      }
+    } else {
+      cost_array[idx] = INT_MAX;
+    }
+
+    if (best_idx < 4 && best_idx >= 0) {
+      br += search_step[best_idx].row;
+      bc += search_step[best_idx].col;
+    } else if (best_idx == 4) {
+      br = tr;
+      bc = tc;
+    }
+
+    if (iters_per_step > 1 && best_idx != -1) {
+      if (use_upsampled_ref) {
+        SECOND_LEVEL_CHECKS_BEST(1);
+      } else {
+        SECOND_LEVEL_CHECKS_BEST(0);
+      }
+    }
+
+    tr = br;
+    tc = bc;
+
+    search_step += 4;
+    hstep >>= 1;
+    best_idx = -1;
+  }
+
+  // These lines insure static analysis doesn't warn that
+  // tr and tc aren't used after the above point.
+  (void)tr;
+  (void)tc;
+
+  bestmv->row = br;
+  bestmv->col = bc;
+
+  if (use_upsampled_ref) {
+    pd->pre[is_second] = backup_pred;
+  }
+
+  if ((abs(bestmv->col - ref_mv->col) > (MAX_FULL_PEL_VAL << 3)) ||
+      (abs(bestmv->row - ref_mv->row) > (MAX_FULL_PEL_VAL << 3)))
+    return INT_MAX;
+
+  return besterr;
+}
+
+#undef DIST
+#undef MVC
+#undef CHECK_BETTER
+
+static int get_obmc_mvpred_var(const MACROBLOCK *x, const int32_t *wsrc,
+                               const int32_t *mask, const MV *best_mv,
+                               const MV *center_mv,
+                               const vpx_variance_fn_ptr_t *vfp, int use_mvcost,
+                               int is_second) {
+  const MACROBLOCKD *const xd = &x->e_mbd;
+  const struct buf_2d *const in_what = &xd->plane[0].pre[is_second];
+  const MV mv = { best_mv->row * 8, best_mv->col * 8 };
+  unsigned int unused;
+
+  return vfp->ovf(get_buf_from_mv(in_what, best_mv), in_what->stride, wsrc,
+                  mask, &unused) +
+         (use_mvcost ? mv_err_cost(&mv, center_mv, x->nmvjointcost, x->mvcost,
+                                   x->errorperbit)
+                     : 0);
+}
+
+int obmc_refining_search_sad(const MACROBLOCK *x, const int32_t *wsrc,
+                             const int32_t *mask, MV *ref_mv, int error_per_bit,
+                             int search_range,
+                             const vpx_variance_fn_ptr_t *fn_ptr,
+                             const MV *center_mv, int is_second) {
+  const MV neighbors[4] = { { -1, 0 }, { 0, -1 }, { 0, 1 }, { 1, 0 } };
+  const MACROBLOCKD *const xd = &x->e_mbd;
+  const struct buf_2d *const in_what = &xd->plane[0].pre[is_second];
+  const MV fcenter_mv = { center_mv->row >> 3, center_mv->col >> 3 };
+  unsigned int best_sad = fn_ptr->osdf(get_buf_from_mv(in_what, ref_mv),
+                                       in_what->stride, wsrc, mask) +
+                          mvsad_err_cost(x, ref_mv, &fcenter_mv, error_per_bit);
+  int i, j;
+
+  for (i = 0; i < search_range; i++) {
+    int best_site = -1;
+
+    for (j = 0; j < 4; j++) {
+      const MV mv = { ref_mv->row + neighbors[j].row,
+                      ref_mv->col + neighbors[j].col };
+      if (is_mv_in(x, &mv)) {
+        unsigned int sad = fn_ptr->osdf(get_buf_from_mv(in_what, &mv),
+                                        in_what->stride, wsrc, mask);
+        if (sad < best_sad) {
+          sad += mvsad_err_cost(x, &mv, &fcenter_mv, error_per_bit);
+          if (sad < best_sad) {
+            best_sad = sad;
+            best_site = j;
+          }
+        }
+      }
+    }
+
+    if (best_site == -1) {
+      break;
+    } else {
+      ref_mv->row += neighbors[best_site].row;
+      ref_mv->col += neighbors[best_site].col;
+    }
+  }
+  return best_sad;
+}
+
+int obmc_diamond_search_sad(const MACROBLOCK *x, const search_site_config *cfg,
+                            const int32_t *wsrc, const int32_t *mask,
+                            MV *ref_mv, MV *best_mv, int search_param,
+                            int sad_per_bit, int *num00,
+                            const vpx_variance_fn_ptr_t *fn_ptr,
+                            const MV *center_mv, int is_second) {
+  const MACROBLOCKD *const xd = &x->e_mbd;
+  const struct buf_2d *const in_what = &xd->plane[0].pre[is_second];
+  // search_param determines the length of the initial step and hence the number
+  // of iterations
+  // 0 = initial step (MAX_FIRST_STEP) pel : 1 = (MAX_FIRST_STEP/2) pel, 2 =
+  // (MAX_FIRST_STEP/4) pel... etc.
+  const search_site *const ss = &cfg->ss[search_param * cfg->searches_per_step];
+  const int tot_steps = (cfg->ss_count / cfg->searches_per_step) - search_param;
+  const MV fcenter_mv = { center_mv->row >> 3, center_mv->col >> 3 };
+  const uint8_t *best_address, *in_what_ref;
+  int best_sad = INT_MAX;
+  int best_site = 0;
+  int last_site = 0;
+  int i, j, step;
+
+  clamp_mv(ref_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max);
+  in_what_ref = in_what->buf + ref_mv->row * in_what->stride + ref_mv->col;
+  best_address = in_what_ref;
+  *num00 = 0;
+  *best_mv = *ref_mv;
+
+  // Check the starting position
+  best_sad = fn_ptr->osdf(best_address, in_what->stride, wsrc, mask) +
+             mvsad_err_cost(x, best_mv, &fcenter_mv, sad_per_bit);
+
+  i = 1;
+
+  for (step = 0; step < tot_steps; step++) {
+    for (j = 0; j < cfg->searches_per_step; j++) {
+      const MV mv = { best_mv->row + ss[i].mv.row,
+                      best_mv->col + ss[i].mv.col };
+      if (is_mv_in(x, &mv)) {
+        int sad = fn_ptr->osdf(best_address + ss[i].offset, in_what->stride,
+                               wsrc, mask);
+        if (sad < best_sad) {
+          sad += mvsad_err_cost(x, &mv, &fcenter_mv, sad_per_bit);
+          if (sad < best_sad) {
+            best_sad = sad;
+            best_site = i;
+          }
+        }
+      }
+
+      i++;
+    }
+
+    if (best_site != last_site) {
+      best_mv->row += ss[best_site].mv.row;
+      best_mv->col += ss[best_site].mv.col;
+      best_address += ss[best_site].offset;
+      last_site = best_site;
+#if defined(NEW_DIAMOND_SEARCH)
+      while (1) {
+        const MV this_mv = { best_mv->row + ss[best_site].mv.row,
+                             best_mv->col + ss[best_site].mv.col };
+        if (is_mv_in(x, &this_mv)) {
+          int sad = fn_ptr->osdf(best_address + ss[best_site].offset,
+                                 in_what->stride, wsrc, mask);
+          if (sad < best_sad) {
+            sad += mvsad_err_cost(x, &this_mv, &fcenter_mv, sad_per_bit);
+            if (sad < best_sad) {
+              best_sad = sad;
+              best_mv->row += ss[best_site].mv.row;
+              best_mv->col += ss[best_site].mv.col;
+              best_address += ss[best_site].offset;
+              continue;
+            }
+          }
+        }
+        break;
+      }
+#endif
+    } else if (best_address == in_what_ref) {
+      (*num00)++;
+    }
+  }
+  return best_sad;
+}
+
+int vp10_obmc_full_pixel_diamond(const VP10_COMP *cpi, MACROBLOCK *x,
+                                 const int32_t *wsrc, const int32_t *mask,
+                                 MV *mvp_full, int step_param, int sadpb,
+                                 int further_steps, int do_refine,
+                                 const vpx_variance_fn_ptr_t *fn_ptr,
+                                 const MV *ref_mv, MV *dst_mv, int is_second) {
+  MV temp_mv;
+  int thissme, n, num00 = 0;
+  int bestsme =
+      obmc_diamond_search_sad(x, &cpi->ss_cfg, wsrc, mask, mvp_full, &temp_mv,
+                              step_param, sadpb, &n, fn_ptr, ref_mv, is_second);
+  if (bestsme < INT_MAX)
+    bestsme = get_obmc_mvpred_var(x, wsrc, mask, &temp_mv, ref_mv, fn_ptr, 1,
+                                  is_second);
+  *dst_mv = temp_mv;
+
+  // If there won't be more n-step search, check to see if refining search is
+  // needed.
+  if (n > further_steps) do_refine = 0;
+
+  while (n < further_steps) {
+    ++n;
+
+    if (num00) {
+      num00--;
+    } else {
+      thissme = obmc_diamond_search_sad(x, &cpi->ss_cfg, wsrc, mask, mvp_full,
+                                        &temp_mv, step_param + n, sadpb, &num00,
+                                        fn_ptr, ref_mv, is_second);
+      if (thissme < INT_MAX)
+        thissme = get_obmc_mvpred_var(x, wsrc, mask, &temp_mv, ref_mv, fn_ptr,
+                                      1, is_second);
+
+      // check to see if refining search is needed.
+      if (num00 > further_steps - n) do_refine = 0;
+
+      if (thissme < bestsme) {
+        bestsme = thissme;
+        *dst_mv = temp_mv;
+      }
+    }
+  }
+
+  // final 1-away diamond refining search
+  if (do_refine) {
+    const int search_range = 8;
+    MV best_mv = *dst_mv;
+    thissme = obmc_refining_search_sad(x, wsrc, mask, &best_mv, sadpb,
+                                       search_range, fn_ptr, ref_mv, is_second);
+    if (thissme < INT_MAX)
+      thissme = get_obmc_mvpred_var(x, wsrc, mask, &best_mv, ref_mv, fn_ptr, 1,
+                                    is_second);
+    if (thissme < bestsme) {
+      bestsme = thissme;
+      *dst_mv = best_mv;
+    }
+  }
+  return bestsme;
+}
+#endif  // CONFIG_OBMC
diff --git a/av1/encoder/mcomp.h b/av1/encoder/mcomp.h
new file mode 100644
index 0000000..d26b9bd
--- /dev/null
+++ b/av1/encoder/mcomp.h
@@ -0,0 +1,161 @@
+/*
+ *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP10_ENCODER_MCOMP_H_
+#define VP10_ENCODER_MCOMP_H_
+
+#include "av1/encoder/block.h"
+#include "aom_dsp/variance.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// The maximum number of steps in a step search given the largest
+// allowed initial step
+#define MAX_MVSEARCH_STEPS 11
+// Max full pel mv specified in the unit of full pixel
+// Enable the use of motion vector in range [-1023, 1023].
+#define MAX_FULL_PEL_VAL ((1 << (MAX_MVSEARCH_STEPS - 1)) - 1)
+// Maximum size of the first step in full pel units
+#define MAX_FIRST_STEP (1 << (MAX_MVSEARCH_STEPS - 1))
+// Allowed motion vector pixel distance outside image border
+// for Block_16x16
+#define BORDER_MV_PIXELS_B16 (16 + VPX_INTERP_EXTEND)
+
+// motion search site
+typedef struct search_site {
+  MV mv;
+  int offset;
+} search_site;
+
+typedef struct search_site_config {
+  search_site ss[8 * MAX_MVSEARCH_STEPS + 1];
+  int ss_count;
+  int searches_per_step;
+} search_site_config;
+
+void vp10_init_dsmotion_compensation(search_site_config *cfg, int stride);
+void vp10_init3smotion_compensation(search_site_config *cfg, int stride);
+
+void vp10_set_mv_search_range(MACROBLOCK *x, const MV *mv);
+int vp10_mv_bit_cost(const MV *mv, const MV *ref, const int *mvjcost,
+                     int *mvcost[2], int weight);
+
+// Utility to compute variance + MV rate cost for a given MV
+int vp10_get_mvpred_var(const MACROBLOCK *x, const MV *best_mv,
+                        const MV *center_mv, const vpx_variance_fn_ptr_t *vfp,
+                        int use_mvcost);
+int vp10_get_mvpred_av_var(const MACROBLOCK *x, const MV *best_mv,
+                           const MV *center_mv, const uint8_t *second_pred,
+                           const vpx_variance_fn_ptr_t *vfp, int use_mvcost);
+
+struct VP10_COMP;
+struct SPEED_FEATURES;
+
+int vp10_init_search_range(int size);
+
+int vp10_refining_search_sad(struct macroblock *x, struct mv *ref_mv,
+                             int sad_per_bit, int distance,
+                             const vpx_variance_fn_ptr_t *fn_ptr,
+                             const struct mv *center_mv);
+
+// Runs sequence of diamond searches in smaller steps for RD.
+int vp10_full_pixel_diamond(const struct VP10_COMP *cpi, MACROBLOCK *x,
+                            MV *mvp_full, int step_param, int sadpb,
+                            int further_steps, int do_refine, int *cost_list,
+                            const vpx_variance_fn_ptr_t *fn_ptr,
+                            const MV *ref_mv, MV *dst_mv);
+
+// Perform integral projection based motion estimation.
+unsigned int vp10_int_pro_motion_estimation(const struct VP10_COMP *cpi,
+                                            MACROBLOCK *x, BLOCK_SIZE bsize,
+                                            int mi_row, int mi_col);
+
+int vp10_hex_search(MACROBLOCK *x, MV *start_mv, int search_param,
+                    int sad_per_bit, int do_init_search, int *cost_list,
+                    const vpx_variance_fn_ptr_t *vfp, int use_mvcost,
+                    const MV *center_mv);
+
+typedef int(fractional_mv_step_fp)(
+    MACROBLOCK *x, const MV *ref_mv, int allow_hp, int error_per_bit,
+    const vpx_variance_fn_ptr_t *vfp,
+    int forced_stop,  // 0 - full, 1 - qtr only, 2 - half only
+    int iters_per_step, int *cost_list, int *mvjcost, int *mvcost[2],
+    int *distortion, unsigned int *sse1, const uint8_t *second_pred, int w,
+    int h, int use_upsampled_ref);
+
+extern fractional_mv_step_fp vp10_find_best_sub_pixel_tree;
+extern fractional_mv_step_fp vp10_find_best_sub_pixel_tree_pruned;
+extern fractional_mv_step_fp vp10_find_best_sub_pixel_tree_pruned_more;
+extern fractional_mv_step_fp vp10_find_best_sub_pixel_tree_pruned_evenmore;
+
+typedef int (*vp10_full_search_fn_t)(const MACROBLOCK *x, const MV *ref_mv,
+                                     int sad_per_bit, int distance,
+                                     const vpx_variance_fn_ptr_t *fn_ptr,
+                                     const MV *center_mv, MV *best_mv);
+
+typedef int (*vp10_diamond_search_fn_t)(
+    MACROBLOCK *x, const search_site_config *cfg, MV *ref_mv, MV *best_mv,
+    int search_param, int sad_per_bit, int *num00,
+    const vpx_variance_fn_ptr_t *fn_ptr, const MV *center_mv);
+
+int vp10_refining_search_8p_c(MACROBLOCK *x, int error_per_bit,
+                              int search_range,
+                              const vpx_variance_fn_ptr_t *fn_ptr,
+                              const MV *center_mv, const uint8_t *second_pred);
+
+struct VP10_COMP;
+
+int vp10_full_pixel_search(struct VP10_COMP *cpi, MACROBLOCK *x,
+                           BLOCK_SIZE bsize, MV *mvp_full, int step_param,
+                           int error_per_bit, int *cost_list, const MV *ref_mv,
+                           int var_max, int rd);
+
+#if CONFIG_EXT_INTER
+int vp10_find_best_masked_sub_pixel_tree(
+    const MACROBLOCK *x, const uint8_t *mask, int mask_stride, MV *bestmv,
+    const MV *ref_mv, int allow_hp, int error_per_bit,
+    const vpx_variance_fn_ptr_t *vfp, int forced_stop, int iters_per_step,
+    int *mvjcost, int *mvcost[2], int *distortion, unsigned int *sse1,
+    int is_second);
+int vp10_find_best_masked_sub_pixel_tree_up(
+    struct VP10_COMP *cpi, MACROBLOCK *x, const uint8_t *mask, int mask_stride,
+    int mi_row, int mi_col, MV *bestmv, const MV *ref_mv, int allow_hp,
+    int error_per_bit, const vpx_variance_fn_ptr_t *vfp, int forced_stop,
+    int iters_per_step, int *mvjcost, int *mvcost[2], int *distortion,
+    unsigned int *sse1, int is_second, int use_upsampled_ref);
+int vp10_masked_full_pixel_diamond(const struct VP10_COMP *cpi, MACROBLOCK *x,
+                                   const uint8_t *mask, int mask_stride,
+                                   MV *mvp_full, int step_param, int sadpb,
+                                   int further_steps, int do_refine,
+                                   const vpx_variance_fn_ptr_t *fn_ptr,
+                                   const MV *ref_mv, MV *dst_mv, int is_second);
+#endif  // CONFIG_EXT_INTER
+
+#if CONFIG_OBMC
+int vp10_obmc_full_pixel_diamond(const struct VP10_COMP *cpi, MACROBLOCK *x,
+                                 const int32_t *wsrc, const int32_t *mask,
+                                 MV *mvp_full, int step_param, int sadpb,
+                                 int further_steps, int do_refine,
+                                 const vpx_variance_fn_ptr_t *fn_ptr,
+                                 const MV *ref_mv, MV *dst_mv, int is_second);
+int vp10_find_best_obmc_sub_pixel_tree_up(
+    struct VP10_COMP *cpi, MACROBLOCK *x, const int32_t *wsrc,
+    const int32_t *mask, int mi_row, int mi_col, MV *bestmv, const MV *ref_mv,
+    int allow_hp, int error_per_bit, const vpx_variance_fn_ptr_t *vfp,
+    int forced_stop, int iters_per_step, int *mvjcost, int *mvcost[2],
+    int *distortion, unsigned int *sse1, int is_second, int use_upsampled_ref);
+#endif  // CONFIG_OBMC
+#ifdef __cplusplus
+}  // extern "C"
+#endif
+
+#endif  // VP10_ENCODER_MCOMP_H_
diff --git a/av1/encoder/mips/msa/error_msa.c b/av1/encoder/mips/msa/error_msa.c
new file mode 100644
index 0000000..71c5ad3
--- /dev/null
+++ b/av1/encoder/mips/msa/error_msa.c
@@ -0,0 +1,107 @@
+/*
+ *  Copyright (c) 2015 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "./vp10_rtcd.h"
+#include "aom_dsp/mips/macros_msa.h"
+
+#define BLOCK_ERROR_BLOCKSIZE_MSA(BSize)                                     \
+  static int64_t block_error_##BSize##size_msa(                              \
+      const int16_t *coeff_ptr, const int16_t *dq_coeff_ptr, int64_t *ssz) { \
+    int64_t err = 0;                                                         \
+    uint32_t loop_cnt;                                                       \
+    v8i16 coeff, dq_coeff, coeff_r_h, coeff_l_h;                             \
+    v4i32 diff_r, diff_l, coeff_r_w, coeff_l_w;                              \
+    v2i64 sq_coeff_r, sq_coeff_l;                                            \
+    v2i64 err0, err_dup0, err1, err_dup1;                                    \
+                                                                             \
+    coeff = LD_SH(coeff_ptr);                                                \
+    dq_coeff = LD_SH(dq_coeff_ptr);                                          \
+    UNPCK_SH_SW(coeff, coeff_r_w, coeff_l_w);                                \
+    ILVRL_H2_SH(coeff, dq_coeff, coeff_r_h, coeff_l_h);                      \
+    HSUB_UH2_SW(coeff_r_h, coeff_l_h, diff_r, diff_l);                       \
+    DOTP_SW2_SD(coeff_r_w, coeff_l_w, coeff_r_w, coeff_l_w, sq_coeff_r,      \
+                sq_coeff_l);                                                 \
+    DOTP_SW2_SD(diff_r, diff_l, diff_r, diff_l, err0, err1);                 \
+                                                                             \
+    coeff = LD_SH(coeff_ptr + 8);                                            \
+    dq_coeff = LD_SH(dq_coeff_ptr + 8);                                      \
+    UNPCK_SH_SW(coeff, coeff_r_w, coeff_l_w);                                \
+    ILVRL_H2_SH(coeff, dq_coeff, coeff_r_h, coeff_l_h);                      \
+    HSUB_UH2_SW(coeff_r_h, coeff_l_h, diff_r, diff_l);                       \
+    DPADD_SD2_SD(coeff_r_w, coeff_l_w, sq_coeff_r, sq_coeff_l);              \
+    DPADD_SD2_SD(diff_r, diff_l, err0, err1);                                \
+                                                                             \
+    coeff_ptr += 16;                                                         \
+    dq_coeff_ptr += 16;                                                      \
+                                                                             \
+    for (loop_cnt = ((BSize >> 4) - 1); loop_cnt--;) {                       \
+      coeff = LD_SH(coeff_ptr);                                              \
+      dq_coeff = LD_SH(dq_coeff_ptr);                                        \
+      UNPCK_SH_SW(coeff, coeff_r_w, coeff_l_w);                              \
+      ILVRL_H2_SH(coeff, dq_coeff, coeff_r_h, coeff_l_h);                    \
+      HSUB_UH2_SW(coeff_r_h, coeff_l_h, diff_r, diff_l);                     \
+      DPADD_SD2_SD(coeff_r_w, coeff_l_w, sq_coeff_r, sq_coeff_l);            \
+      DPADD_SD2_SD(diff_r, diff_l, err0, err1);                              \
+                                                                             \
+      coeff = LD_SH(coeff_ptr + 8);                                          \
+      dq_coeff = LD_SH(dq_coeff_ptr + 8);                                    \
+      UNPCK_SH_SW(coeff, coeff_r_w, coeff_l_w);                              \
+      ILVRL_H2_SH(coeff, dq_coeff, coeff_r_h, coeff_l_h);                    \
+      HSUB_UH2_SW(coeff_r_h, coeff_l_h, diff_r, diff_l);                     \
+      DPADD_SD2_SD(coeff_r_w, coeff_l_w, sq_coeff_r, sq_coeff_l);            \
+      DPADD_SD2_SD(diff_r, diff_l, err0, err1);                              \
+                                                                             \
+      coeff_ptr += 16;                                                       \
+      dq_coeff_ptr += 16;                                                    \
+    }                                                                        \
+                                                                             \
+    err_dup0 = __msa_splati_d(sq_coeff_r, 1);                                \
+    err_dup1 = __msa_splati_d(sq_coeff_l, 1);                                \
+    sq_coeff_r += err_dup0;                                                  \
+    sq_coeff_l += err_dup1;                                                  \
+    *ssz = __msa_copy_s_d(sq_coeff_r, 0);                                    \
+    *ssz += __msa_copy_s_d(sq_coeff_l, 0);                                   \
+                                                                             \
+    err_dup0 = __msa_splati_d(err0, 1);                                      \
+    err_dup1 = __msa_splati_d(err1, 1);                                      \
+    err0 += err_dup0;                                                        \
+    err1 += err_dup1;                                                        \
+    err = __msa_copy_s_d(err0, 0);                                           \
+    err += __msa_copy_s_d(err1, 0);                                          \
+                                                                             \
+    return err;                                                              \
+  }
+
+/* clang-format off */
+BLOCK_ERROR_BLOCKSIZE_MSA(16)
+BLOCK_ERROR_BLOCKSIZE_MSA(64)
+BLOCK_ERROR_BLOCKSIZE_MSA(256)
+BLOCK_ERROR_BLOCKSIZE_MSA(1024)
+/* clang-format on */
+
+int64_t vp10_block_error_msa(const tran_low_t *coeff_ptr,
+                             const tran_low_t *dq_coeff_ptr, intptr_t blk_size,
+                             int64_t *ssz) {
+  int64_t err;
+  const int16_t *coeff = (const int16_t *)coeff_ptr;
+  const int16_t *dq_coeff = (const int16_t *)dq_coeff_ptr;
+
+  switch (blk_size) {
+    case 16: err = block_error_16size_msa(coeff, dq_coeff, ssz); break;
+    case 64: err = block_error_64size_msa(coeff, dq_coeff, ssz); break;
+    case 256: err = block_error_256size_msa(coeff, dq_coeff, ssz); break;
+    case 1024: err = block_error_1024size_msa(coeff, dq_coeff, ssz); break;
+    default:
+      err = vp10_block_error_c(coeff_ptr, dq_coeff_ptr, blk_size, ssz);
+      break;
+  }
+
+  return err;
+}
diff --git a/av1/encoder/mips/msa/fdct16x16_msa.c b/av1/encoder/mips/msa/fdct16x16_msa.c
new file mode 100644
index 0000000..cda2138
--- /dev/null
+++ b/av1/encoder/mips/msa/fdct16x16_msa.c
@@ -0,0 +1,500 @@
+/*
+ *  Copyright (c) 2015 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+
+#include "av1/common/enums.h"
+#include "av1/encoder/mips/msa/fdct_msa.h"
+#include "aom_dsp/mips/fwd_txfm_msa.h"
+
+static void fadst16_cols_step1_msa(const int16_t *input, int32_t stride,
+                                   const int32_t *const0, int16_t *int_buf) {
+  v8i16 r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15;
+  v8i16 tp0, tp1, tp2, tp3, g0, g1, g2, g3, g8, g9, g10, g11, h0, h1, h2, h3;
+  v4i32 k0, k1, k2, k3;
+
+  /* load input data */
+  r0 = LD_SH(input);
+  r15 = LD_SH(input + 15 * stride);
+  r7 = LD_SH(input + 7 * stride);
+  r8 = LD_SH(input + 8 * stride);
+  SLLI_4V(r0, r15, r7, r8, 2);
+
+  /* stage 1 */
+  LD_SW2(const0, 4, k0, k1);
+  LD_SW2(const0 + 8, 4, k2, k3);
+  MADD_BF(r15, r0, r7, r8, k0, k1, k2, k3, g0, g1, g2, g3);
+
+  r3 = LD_SH(input + 3 * stride);
+  r4 = LD_SH(input + 4 * stride);
+  r11 = LD_SH(input + 11 * stride);
+  r12 = LD_SH(input + 12 * stride);
+  SLLI_4V(r3, r4, r11, r12, 2);
+
+  LD_SW2(const0 + 4 * 4, 4, k0, k1);
+  LD_SW2(const0 + 4 * 6, 4, k2, k3);
+  MADD_BF(r11, r4, r3, r12, k0, k1, k2, k3, g8, g9, g10, g11);
+
+  /* stage 2 */
+  BUTTERFLY_4(g0, g2, g10, g8, tp0, tp2, tp3, tp1);
+  ST_SH2(tp0, tp2, int_buf, 8);
+  ST_SH2(tp1, tp3, int_buf + 4 * 8, 8);
+
+  LD_SW2(const0 + 4 * 8, 4, k0, k1);
+  k2 = LD_SW(const0 + 4 * 10);
+  MADD_BF(g1, g3, g9, g11, k0, k1, k2, k0, h0, h1, h2, h3);
+
+  ST_SH2(h0, h1, int_buf + 8 * 8, 8);
+  ST_SH2(h3, h2, int_buf + 12 * 8, 8);
+
+  r9 = LD_SH(input + 9 * stride);
+  r6 = LD_SH(input + 6 * stride);
+  r1 = LD_SH(input + stride);
+  r14 = LD_SH(input + 14 * stride);
+  SLLI_4V(r9, r6, r1, r14, 2);
+
+  LD_SW2(const0 + 4 * 11, 4, k0, k1);
+  LD_SW2(const0 + 4 * 13, 4, k2, k3);
+  MADD_BF(r9, r6, r1, r14, k0, k1, k2, k3, g0, g1, g2, g3);
+
+  ST_SH2(g1, g3, int_buf + 3 * 8, 4 * 8);
+
+  r13 = LD_SH(input + 13 * stride);
+  r2 = LD_SH(input + 2 * stride);
+  r5 = LD_SH(input + 5 * stride);
+  r10 = LD_SH(input + 10 * stride);
+  SLLI_4V(r13, r2, r5, r10, 2);
+
+  LD_SW2(const0 + 4 * 15, 4, k0, k1);
+  LD_SW2(const0 + 4 * 17, 4, k2, k3);
+  MADD_BF(r13, r2, r5, r10, k0, k1, k2, k3, h0, h1, h2, h3);
+
+  ST_SH2(h1, h3, int_buf + 11 * 8, 4 * 8);
+
+  BUTTERFLY_4(h0, h2, g2, g0, tp0, tp1, tp2, tp3);
+  ST_SH4(tp0, tp1, tp2, tp3, int_buf + 2 * 8, 4 * 8);
+}
+
+static void fadst16_cols_step2_msa(int16_t *int_buf, const int32_t *const0,
+                                   int16_t *out) {
+  int16_t *out_ptr = out + 128;
+  v8i16 tp0, tp1, tp2, tp3, g5, g7, g13, g15;
+  v8i16 h0, h1, h2, h3, h4, h5, h6, h7, h10, h11;
+  v8i16 out0, out1, out2, out3, out4, out5, out6, out7;
+  v8i16 out8, out9, out10, out11, out12, out13, out14, out15;
+  v4i32 k0, k1, k2, k3;
+
+  LD_SH2(int_buf + 3 * 8, 4 * 8, g13, g15);
+  LD_SH2(int_buf + 11 * 8, 4 * 8, g5, g7);
+  LD_SW2(const0 + 4 * 19, 4, k0, k1);
+  k2 = LD_SW(const0 + 4 * 21);
+  MADD_BF(g7, g5, g15, g13, k0, k1, k2, k0, h4, h5, h6, h7);
+
+  tp0 = LD_SH(int_buf + 4 * 8);
+  tp1 = LD_SH(int_buf + 5 * 8);
+  tp3 = LD_SH(int_buf + 10 * 8);
+  tp2 = LD_SH(int_buf + 14 * 8);
+  LD_SW2(const0 + 4 * 22, 4, k0, k1);
+  k2 = LD_SW(const0 + 4 * 24);
+  MADD_BF(tp0, tp1, tp2, tp3, k0, k1, k2, k0, out4, out6, out5, out7);
+  out4 = -out4;
+  ST_SH(out4, (out + 3 * 16));
+  ST_SH(out5, (out_ptr + 4 * 16));
+
+  h1 = LD_SH(int_buf + 9 * 8);
+  h3 = LD_SH(int_buf + 12 * 8);
+  MADD_BF(h1, h3, h5, h7, k0, k1, k2, k0, out12, out14, out13, out15);
+  out13 = -out13;
+  ST_SH(out12, (out + 2 * 16));
+  ST_SH(out13, (out_ptr + 5 * 16));
+
+  tp0 = LD_SH(int_buf);
+  tp1 = LD_SH(int_buf + 8);
+  tp2 = LD_SH(int_buf + 2 * 8);
+  tp3 = LD_SH(int_buf + 6 * 8);
+
+  BUTTERFLY_4(tp0, tp1, tp3, tp2, out0, out1, h11, h10);
+  out1 = -out1;
+  ST_SH(out0, (out));
+  ST_SH(out1, (out_ptr + 7 * 16));
+
+  h0 = LD_SH(int_buf + 8 * 8);
+  h2 = LD_SH(int_buf + 13 * 8);
+
+  BUTTERFLY_4(h0, h2, h6, h4, out8, out9, out11, out10);
+  out8 = -out8;
+  ST_SH(out8, (out + 16));
+  ST_SH(out9, (out_ptr + 6 * 16));
+
+  /* stage 4 */
+  LD_SW2(const0 + 4 * 25, 4, k0, k1);
+  LD_SW2(const0 + 4 * 27, 4, k2, k3);
+  MADD_SHORT(h10, h11, k1, k2, out2, out3);
+  ST_SH(out2, (out + 7 * 16));
+  ST_SH(out3, (out_ptr));
+
+  MADD_SHORT(out6, out7, k0, k3, out6, out7);
+  ST_SH(out6, (out + 4 * 16));
+  ST_SH(out7, (out_ptr + 3 * 16));
+
+  MADD_SHORT(out10, out11, k0, k3, out10, out11);
+  ST_SH(out10, (out + 6 * 16));
+  ST_SH(out11, (out_ptr + 16));
+
+  MADD_SHORT(out14, out15, k1, k2, out14, out15);
+  ST_SH(out14, (out + 5 * 16));
+  ST_SH(out15, (out_ptr + 2 * 16));
+}
+
+static void fadst16_transpose_postproc_msa(int16_t *input, int16_t *out) {
+  v8i16 r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15;
+  v8i16 l0, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12, l13, l14, l15;
+
+  /* load input data */
+  LD_SH8(input, 16, l0, l1, l2, l3, l4, l5, l6, l7);
+  TRANSPOSE8x8_SH_SH(l0, l1, l2, l3, l4, l5, l6, l7, r0, r1, r2, r3, r4, r5, r6,
+                     r7);
+  FDCT_POSTPROC_2V_NEG_H(r0, r1);
+  FDCT_POSTPROC_2V_NEG_H(r2, r3);
+  FDCT_POSTPROC_2V_NEG_H(r4, r5);
+  FDCT_POSTPROC_2V_NEG_H(r6, r7);
+  ST_SH8(r0, r1, r2, r3, r4, r5, r6, r7, out, 8);
+  out += 64;
+
+  LD_SH8(input + 8, 16, l8, l9, l10, l11, l12, l13, l14, l15);
+  TRANSPOSE8x8_SH_SH(l8, l9, l10, l11, l12, l13, l14, l15, r8, r9, r10, r11,
+                     r12, r13, r14, r15);
+  FDCT_POSTPROC_2V_NEG_H(r8, r9);
+  FDCT_POSTPROC_2V_NEG_H(r10, r11);
+  FDCT_POSTPROC_2V_NEG_H(r12, r13);
+  FDCT_POSTPROC_2V_NEG_H(r14, r15);
+  ST_SH8(r8, r9, r10, r11, r12, r13, r14, r15, out, 8);
+  out += 64;
+
+  /* load input data */
+  input += 128;
+  LD_SH8(input, 16, l0, l1, l2, l3, l4, l5, l6, l7);
+  TRANSPOSE8x8_SH_SH(l0, l1, l2, l3, l4, l5, l6, l7, r0, r1, r2, r3, r4, r5, r6,
+                     r7);
+  FDCT_POSTPROC_2V_NEG_H(r0, r1);
+  FDCT_POSTPROC_2V_NEG_H(r2, r3);
+  FDCT_POSTPROC_2V_NEG_H(r4, r5);
+  FDCT_POSTPROC_2V_NEG_H(r6, r7);
+  ST_SH8(r0, r1, r2, r3, r4, r5, r6, r7, out, 8);
+  out += 64;
+
+  LD_SH8(input + 8, 16, l8, l9, l10, l11, l12, l13, l14, l15);
+  TRANSPOSE8x8_SH_SH(l8, l9, l10, l11, l12, l13, l14, l15, r8, r9, r10, r11,
+                     r12, r13, r14, r15);
+  FDCT_POSTPROC_2V_NEG_H(r8, r9);
+  FDCT_POSTPROC_2V_NEG_H(r10, r11);
+  FDCT_POSTPROC_2V_NEG_H(r12, r13);
+  FDCT_POSTPROC_2V_NEG_H(r14, r15);
+  ST_SH8(r8, r9, r10, r11, r12, r13, r14, r15, out, 8);
+}
+
+static void fadst16_rows_step1_msa(int16_t *input, const int32_t *const0,
+                                   int16_t *int_buf) {
+  v8i16 r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15;
+  v8i16 tp0, tp1, tp2, tp3, g0, g1, g2, g3, g8, g9, g10, g11, h0, h1, h2, h3;
+  v4i32 k0, k1, k2, k3;
+
+  /* load input data */
+  r0 = LD_SH(input);
+  r7 = LD_SH(input + 7 * 8);
+  r8 = LD_SH(input + 8 * 8);
+  r15 = LD_SH(input + 15 * 8);
+
+  /* stage 1 */
+  LD_SW2(const0, 4, k0, k1);
+  LD_SW2(const0 + 4 * 2, 4, k2, k3);
+  MADD_BF(r15, r0, r7, r8, k0, k1, k2, k3, g0, g1, g2, g3);
+
+  r3 = LD_SH(input + 3 * 8);
+  r4 = LD_SH(input + 4 * 8);
+  r11 = LD_SH(input + 11 * 8);
+  r12 = LD_SH(input + 12 * 8);
+
+  LD_SW2(const0 + 4 * 4, 4, k0, k1);
+  LD_SW2(const0 + 4 * 6, 4, k2, k3);
+  MADD_BF(r11, r4, r3, r12, k0, k1, k2, k3, g8, g9, g10, g11);
+
+  /* stage 2 */
+  BUTTERFLY_4(g0, g2, g10, g8, tp0, tp2, tp3, tp1);
+  ST_SH2(tp0, tp1, int_buf, 4 * 8);
+  ST_SH2(tp2, tp3, int_buf + 8, 4 * 8);
+
+  LD_SW2(const0 + 4 * 8, 4, k0, k1);
+  k2 = LD_SW(const0 + 4 * 10);
+  MADD_BF(g1, g3, g9, g11, k0, k1, k2, k0, h0, h1, h2, h3);
+  ST_SH2(h0, h3, int_buf + 8 * 8, 4 * 8);
+  ST_SH2(h1, h2, int_buf + 9 * 8, 4 * 8);
+
+  r1 = LD_SH(input + 8);
+  r6 = LD_SH(input + 6 * 8);
+  r9 = LD_SH(input + 9 * 8);
+  r14 = LD_SH(input + 14 * 8);
+
+  LD_SW2(const0 + 4 * 11, 4, k0, k1);
+  LD_SW2(const0 + 4 * 13, 4, k2, k3);
+  MADD_BF(r9, r6, r1, r14, k0, k1, k2, k3, g0, g1, g2, g3);
+  ST_SH2(g1, g3, int_buf + 3 * 8, 4 * 8);
+
+  r2 = LD_SH(input + 2 * 8);
+  r5 = LD_SH(input + 5 * 8);
+  r10 = LD_SH(input + 10 * 8);
+  r13 = LD_SH(input + 13 * 8);
+
+  LD_SW2(const0 + 4 * 15, 4, k0, k1);
+  LD_SW2(const0 + 4 * 17, 4, k2, k3);
+  MADD_BF(r13, r2, r5, r10, k0, k1, k2, k3, h0, h1, h2, h3);
+  ST_SH2(h1, h3, int_buf + 11 * 8, 4 * 8);
+  BUTTERFLY_4(h0, h2, g2, g0, tp0, tp1, tp2, tp3);
+  ST_SH4(tp0, tp1, tp2, tp3, int_buf + 2 * 8, 4 * 8);
+}
+
+static void fadst16_rows_step2_msa(int16_t *int_buf, const int32_t *const0,
+                                   int16_t *out) {
+  int16_t *out_ptr = out + 8;
+  v8i16 tp0, tp1, tp2, tp3, g5, g7, g13, g15;
+  v8i16 h0, h1, h2, h3, h4, h5, h6, h7, h10, h11;
+  v8i16 out0, out1, out2, out3, out4, out5, out6, out7;
+  v8i16 out8, out9, out10, out11, out12, out13, out14, out15;
+  v4i32 k0, k1, k2, k3;
+
+  g13 = LD_SH(int_buf + 3 * 8);
+  g15 = LD_SH(int_buf + 7 * 8);
+  g5 = LD_SH(int_buf + 11 * 8);
+  g7 = LD_SH(int_buf + 15 * 8);
+
+  LD_SW2(const0 + 4 * 19, 4, k0, k1);
+  k2 = LD_SW(const0 + 4 * 21);
+  MADD_BF(g7, g5, g15, g13, k0, k1, k2, k0, h4, h5, h6, h7);
+
+  tp0 = LD_SH(int_buf + 4 * 8);
+  tp1 = LD_SH(int_buf + 5 * 8);
+  tp3 = LD_SH(int_buf + 10 * 8);
+  tp2 = LD_SH(int_buf + 14 * 8);
+
+  LD_SW2(const0 + 4 * 22, 4, k0, k1);
+  k2 = LD_SW(const0 + 4 * 24);
+  MADD_BF(tp0, tp1, tp2, tp3, k0, k1, k2, k0, out4, out6, out5, out7);
+  out4 = -out4;
+  ST_SH(out4, (out + 3 * 16));
+  ST_SH(out5, (out_ptr + 4 * 16));
+
+  h1 = LD_SH(int_buf + 9 * 8);
+  h3 = LD_SH(int_buf + 12 * 8);
+  MADD_BF(h1, h3, h5, h7, k0, k1, k2, k0, out12, out14, out13, out15);
+  out13 = -out13;
+  ST_SH(out12, (out + 2 * 16));
+  ST_SH(out13, (out_ptr + 5 * 16));
+
+  tp0 = LD_SH(int_buf);
+  tp1 = LD_SH(int_buf + 8);
+  tp2 = LD_SH(int_buf + 2 * 8);
+  tp3 = LD_SH(int_buf + 6 * 8);
+
+  BUTTERFLY_4(tp0, tp1, tp3, tp2, out0, out1, h11, h10);
+  out1 = -out1;
+  ST_SH(out0, (out));
+  ST_SH(out1, (out_ptr + 7 * 16));
+
+  h0 = LD_SH(int_buf + 8 * 8);
+  h2 = LD_SH(int_buf + 13 * 8);
+  BUTTERFLY_4(h0, h2, h6, h4, out8, out9, out11, out10);
+  out8 = -out8;
+  ST_SH(out8, (out + 16));
+  ST_SH(out9, (out_ptr + 6 * 16));
+
+  /* stage 4 */
+  LD_SW2(const0 + 4 * 25, 4, k0, k1);
+  LD_SW2(const0 + 4 * 27, 4, k2, k3);
+  MADD_SHORT(h10, h11, k1, k2, out2, out3);
+  ST_SH(out2, (out + 7 * 16));
+  ST_SH(out3, (out_ptr));
+
+  MADD_SHORT(out6, out7, k0, k3, out6, out7);
+  ST_SH(out6, (out + 4 * 16));
+  ST_SH(out7, (out_ptr + 3 * 16));
+
+  MADD_SHORT(out10, out11, k0, k3, out10, out11);
+  ST_SH(out10, (out + 6 * 16));
+  ST_SH(out11, (out_ptr + 16));
+
+  MADD_SHORT(out14, out15, k1, k2, out14, out15);
+  ST_SH(out14, (out + 5 * 16));
+  ST_SH(out15, (out_ptr + 2 * 16));
+}
+
+static void fadst16_transpose_msa(int16_t *input, int16_t *out) {
+  v8i16 r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15;
+  v8i16 l0, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12, l13, l14, l15;
+
+  /* load input data */
+  LD_SH16(input, 8, l0, l8, l1, l9, l2, l10, l3, l11, l4, l12, l5, l13, l6, l14,
+          l7, l15);
+  TRANSPOSE8x8_SH_SH(l0, l1, l2, l3, l4, l5, l6, l7, r0, r1, r2, r3, r4, r5, r6,
+                     r7);
+  TRANSPOSE8x8_SH_SH(l8, l9, l10, l11, l12, l13, l14, l15, r8, r9, r10, r11,
+                     r12, r13, r14, r15);
+  ST_SH8(r0, r8, r1, r9, r2, r10, r3, r11, out, 8);
+  ST_SH8(r4, r12, r5, r13, r6, r14, r7, r15, (out + 64), 8);
+  out += 16 * 8;
+
+  /* load input data */
+  input += 128;
+  LD_SH16(input, 8, l0, l8, l1, l9, l2, l10, l3, l11, l4, l12, l5, l13, l6, l14,
+          l7, l15);
+  TRANSPOSE8x8_SH_SH(l0, l1, l2, l3, l4, l5, l6, l7, r0, r1, r2, r3, r4, r5, r6,
+                     r7);
+  TRANSPOSE8x8_SH_SH(l8, l9, l10, l11, l12, l13, l14, l15, r8, r9, r10, r11,
+                     r12, r13, r14, r15);
+  ST_SH8(r0, r8, r1, r9, r2, r10, r3, r11, out, 8);
+  ST_SH8(r4, r12, r5, r13, r6, r14, r7, r15, (out + 64), 8);
+}
+
+static void postproc_fdct16x8_1d_row(int16_t *intermediate, int16_t *output) {
+  int16_t *temp = intermediate;
+  int16_t *out = output;
+  v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+  v8i16 in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, in11;
+  v8i16 in12, in13, in14, in15;
+
+  LD_SH8(temp, 16, in0, in1, in2, in3, in4, in5, in6, in7);
+  temp = intermediate + 8;
+  LD_SH8(temp, 16, in8, in9, in10, in11, in12, in13, in14, in15);
+  TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
+                     in4, in5, in6, in7);
+  TRANSPOSE8x8_SH_SH(in8, in9, in10, in11, in12, in13, in14, in15, in8, in9,
+                     in10, in11, in12, in13, in14, in15);
+  FDCT_POSTPROC_2V_NEG_H(in0, in1);
+  FDCT_POSTPROC_2V_NEG_H(in2, in3);
+  FDCT_POSTPROC_2V_NEG_H(in4, in5);
+  FDCT_POSTPROC_2V_NEG_H(in6, in7);
+  FDCT_POSTPROC_2V_NEG_H(in8, in9);
+  FDCT_POSTPROC_2V_NEG_H(in10, in11);
+  FDCT_POSTPROC_2V_NEG_H(in12, in13);
+  FDCT_POSTPROC_2V_NEG_H(in14, in15);
+  BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, in11,
+               in12, in13, in14, in15, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6,
+               tmp7, in8, in9, in10, in11, in12, in13, in14, in15);
+  temp = intermediate;
+  ST_SH8(in8, in9, in10, in11, in12, in13, in14, in15, temp, 16);
+  FDCT8x16_EVEN(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp0, tmp1,
+                tmp2, tmp3, tmp4, tmp5, tmp6, tmp7);
+  temp = intermediate;
+  LD_SH8(temp, 16, in8, in9, in10, in11, in12, in13, in14, in15);
+  FDCT8x16_ODD(in8, in9, in10, in11, in12, in13, in14, in15, in0, in1, in2, in3,
+               in4, in5, in6, in7);
+  TRANSPOSE8x8_SH_SH(tmp0, in0, tmp1, in1, tmp2, in2, tmp3, in3, tmp0, in0,
+                     tmp1, in1, tmp2, in2, tmp3, in3);
+  ST_SH8(tmp0, in0, tmp1, in1, tmp2, in2, tmp3, in3, out, 16);
+  TRANSPOSE8x8_SH_SH(tmp4, in4, tmp5, in5, tmp6, in6, tmp7, in7, tmp4, in4,
+                     tmp5, in5, tmp6, in6, tmp7, in7);
+  out = output + 8;
+  ST_SH8(tmp4, in4, tmp5, in5, tmp6, in6, tmp7, in7, out, 16);
+}
+
+void vp10_fht16x16_msa(const int16_t *input, int16_t *output, int32_t stride,
+                       int32_t tx_type) {
+  DECLARE_ALIGNED(32, int16_t, tmp[256]);
+  DECLARE_ALIGNED(32, int16_t, trans_buf[256]);
+  DECLARE_ALIGNED(32, int16_t, tmp_buf[128]);
+  int32_t i;
+  int16_t *ptmpbuf = &tmp_buf[0];
+  int16_t *trans = &trans_buf[0];
+  const int32_t const_arr[29 * 4] = {
+    52707308,    52707308,    52707308,    52707308,    -1072430300,
+    -1072430300, -1072430300, -1072430300, 795618043,   795618043,
+    795618043,   795618043,   -721080468,  -721080468,  -721080468,
+    -721080468,  459094491,   459094491,   459094491,   459094491,
+    -970646691,  -970646691,  -970646691,  -970646691,  1010963856,
+    1010963856,  1010963856,  1010963856,  -361743294,  -361743294,
+    -361743294,  -361743294,  209469125,   209469125,   209469125,
+    209469125,   -1053094788, -1053094788, -1053094788, -1053094788,
+    1053160324,  1053160324,  1053160324,  1053160324,  639644520,
+    639644520,   639644520,   639644520,   -862444000,  -862444000,
+    -862444000,  -862444000,  1062144356,  1062144356,  1062144356,
+    1062144356,  -157532337,  -157532337,  -157532337,  -157532337,
+    260914709,   260914709,   260914709,   260914709,   -1041559667,
+    -1041559667, -1041559667, -1041559667, 920985831,   920985831,
+    920985831,   920985831,   -551995675,  -551995675,  -551995675,
+    -551995675,  596522295,   596522295,   596522295,   596522295,
+    892853362,   892853362,   892853362,   892853362,   -892787826,
+    -892787826,  -892787826,  -892787826,  410925857,   410925857,
+    410925857,   410925857,   -992012162,  -992012162,  -992012162,
+    -992012162,  992077698,   992077698,   992077698,   992077698,
+    759246145,   759246145,   759246145,   759246145,   -759180609,
+    -759180609,  -759180609,  -759180609,  -759222975,  -759222975,
+    -759222975,  -759222975,  759288511,   759288511,   759288511,
+    759288511
+  };
+
+  switch (tx_type) {
+    case DCT_DCT:
+      /* column transform */
+      for (i = 0; i < 2; ++i) {
+        fdct8x16_1d_column(input + 8 * i, tmp + 8 * i, stride);
+      }
+
+      /* row transform */
+      for (i = 0; i < 2; ++i) {
+        fdct16x8_1d_row(tmp + (128 * i), output + (128 * i));
+      }
+      break;
+    case ADST_DCT:
+      /* column transform */
+      for (i = 0; i < 2; ++i) {
+        fadst16_cols_step1_msa(input + (i << 3), stride, const_arr, ptmpbuf);
+        fadst16_cols_step2_msa(ptmpbuf, const_arr, tmp + (i << 3));
+      }
+
+      /* row transform */
+      for (i = 0; i < 2; ++i) {
+        postproc_fdct16x8_1d_row(tmp + (128 * i), output + (128 * i));
+      }
+      break;
+    case DCT_ADST:
+      /* column transform */
+      for (i = 0; i < 2; ++i) {
+        fdct8x16_1d_column(input + 8 * i, tmp + 8 * i, stride);
+      }
+
+      fadst16_transpose_postproc_msa(tmp, trans);
+
+      /* row transform */
+      for (i = 0; i < 2; ++i) {
+        fadst16_rows_step1_msa(trans + (i << 7), const_arr, ptmpbuf);
+        fadst16_rows_step2_msa(ptmpbuf, const_arr, tmp + (i << 7));
+      }
+
+      fadst16_transpose_msa(tmp, output);
+      break;
+    case ADST_ADST:
+      /* column transform */
+      for (i = 0; i < 2; ++i) {
+        fadst16_cols_step1_msa(input + (i << 3), stride, const_arr, ptmpbuf);
+        fadst16_cols_step2_msa(ptmpbuf, const_arr, tmp + (i << 3));
+      }
+
+      fadst16_transpose_postproc_msa(tmp, trans);
+
+      /* row transform */
+      for (i = 0; i < 2; ++i) {
+        fadst16_rows_step1_msa(trans + (i << 7), const_arr, ptmpbuf);
+        fadst16_rows_step2_msa(ptmpbuf, const_arr, tmp + (i << 7));
+      }
+
+      fadst16_transpose_msa(tmp, output);
+      break;
+    default: assert(0); break;
+  }
+}
diff --git a/av1/encoder/mips/msa/fdct4x4_msa.c b/av1/encoder/mips/msa/fdct4x4_msa.c
new file mode 100644
index 0000000..a3731c3
--- /dev/null
+++ b/av1/encoder/mips/msa/fdct4x4_msa.c
@@ -0,0 +1,97 @@
+/*
+ *  Copyright (c) 2015 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+
+#include "av1/common/enums.h"
+#include "av1/encoder/mips/msa/fdct_msa.h"
+
+void vp10_fwht4x4_msa(const int16_t *input, int16_t *output,
+                      int32_t src_stride) {
+  v8i16 in0, in1, in2, in3, in4;
+
+  LD_SH4(input, src_stride, in0, in1, in2, in3);
+
+  in0 += in1;
+  in3 -= in2;
+  in4 = (in0 - in3) >> 1;
+  SUB2(in4, in1, in4, in2, in1, in2);
+  in0 -= in2;
+  in3 += in1;
+
+  TRANSPOSE4x4_SH_SH(in0, in2, in3, in1, in0, in2, in3, in1);
+
+  in0 += in2;
+  in1 -= in3;
+  in4 = (in0 - in1) >> 1;
+  SUB2(in4, in2, in4, in3, in2, in3);
+  in0 -= in3;
+  in1 += in2;
+
+  SLLI_4V(in0, in1, in2, in3, 2);
+
+  TRANSPOSE4x4_SH_SH(in0, in3, in1, in2, in0, in3, in1, in2);
+
+  ST4x2_UB(in0, output, 4);
+  ST4x2_UB(in3, output + 4, 4);
+  ST4x2_UB(in1, output + 8, 4);
+  ST4x2_UB(in2, output + 12, 4);
+}
+
+void vp10_fht4x4_msa(const int16_t *input, int16_t *output, int32_t stride,
+                     int32_t tx_type) {
+  v8i16 in0, in1, in2, in3;
+
+  LD_SH4(input, stride, in0, in1, in2, in3);
+
+  /* fdct4 pre-process */
+  {
+    v8i16 temp, mask;
+    v16i8 zero = { 0 };
+    v16i8 one = __msa_ldi_b(1);
+
+    mask = (v8i16)__msa_sldi_b(zero, one, 15);
+    SLLI_4V(in0, in1, in2, in3, 4);
+    temp = __msa_ceqi_h(in0, 0);
+    temp = (v8i16)__msa_xori_b((v16u8)temp, 255);
+    temp = mask & temp;
+    in0 += temp;
+  }
+
+  switch (tx_type) {
+    case DCT_DCT:
+      VPX_FDCT4(in0, in1, in2, in3, in0, in1, in2, in3);
+      TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
+      VPX_FDCT4(in0, in1, in2, in3, in0, in1, in2, in3);
+      break;
+    case ADST_DCT:
+      VPX_FADST4(in0, in1, in2, in3, in0, in1, in2, in3);
+      TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
+      VPX_FDCT4(in0, in1, in2, in3, in0, in1, in2, in3);
+      break;
+    case DCT_ADST:
+      VPX_FDCT4(in0, in1, in2, in3, in0, in1, in2, in3);
+      TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
+      VPX_FADST4(in0, in1, in2, in3, in0, in1, in2, in3);
+      break;
+    case ADST_ADST:
+      VPX_FADST4(in0, in1, in2, in3, in0, in1, in2, in3);
+      TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
+      VPX_FADST4(in0, in1, in2, in3, in0, in1, in2, in3);
+      break;
+    default: assert(0); break;
+  }
+
+  TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
+  ADD4(in0, 1, in1, 1, in2, 1, in3, 1, in0, in1, in2, in3);
+  SRA_4V(in0, in1, in2, in3, 2);
+  PCKEV_D2_SH(in1, in0, in3, in2, in0, in2);
+  ST_SH2(in0, in2, output, 8);
+}
diff --git a/av1/encoder/mips/msa/fdct8x8_msa.c b/av1/encoder/mips/msa/fdct8x8_msa.c
new file mode 100644
index 0000000..3b6532a
--- /dev/null
+++ b/av1/encoder/mips/msa/fdct8x8_msa.c
@@ -0,0 +1,64 @@
+/*
+ *  Copyright (c) 2015 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+
+#include "av1/common/enums.h"
+#include "av1/encoder/mips/msa/fdct_msa.h"
+
+void vp10_fht8x8_msa(const int16_t *input, int16_t *output, int32_t stride,
+                     int32_t tx_type) {
+  v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
+
+  LD_SH8(input, stride, in0, in1, in2, in3, in4, in5, in6, in7);
+  SLLI_4V(in0, in1, in2, in3, 2);
+  SLLI_4V(in4, in5, in6, in7, 2);
+
+  switch (tx_type) {
+    case DCT_DCT:
+      VPX_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+                in5, in6, in7);
+      TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2,
+                         in3, in4, in5, in6, in7);
+      VPX_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+                in5, in6, in7);
+      break;
+    case ADST_DCT:
+      VPX_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+                in5, in6, in7);
+      TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2,
+                         in3, in4, in5, in6, in7);
+      VPX_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+                in5, in6, in7);
+      break;
+    case DCT_ADST:
+      VPX_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+                in5, in6, in7);
+      TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2,
+                         in3, in4, in5, in6, in7);
+      VPX_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+                in5, in6, in7);
+      break;
+    case ADST_ADST:
+      VPX_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+                in5, in6, in7);
+      TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2,
+                         in3, in4, in5, in6, in7);
+      VPX_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+                in5, in6, in7);
+      break;
+    default: assert(0); break;
+  }
+
+  TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
+                     in4, in5, in6, in7);
+  SRLI_AVE_S_4V_H(in0, in1, in2, in3, in4, in5, in6, in7);
+  ST_SH8(in0, in1, in2, in3, in4, in5, in6, in7, output, 8);
+}
diff --git a/av1/encoder/mips/msa/fdct_msa.h b/av1/encoder/mips/msa/fdct_msa.h
new file mode 100644
index 0000000..07471d0
--- /dev/null
+++ b/av1/encoder/mips/msa/fdct_msa.h
@@ -0,0 +1,116 @@
+/*
+ *  Copyright (c) 2015 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP10_ENCODER_MIPS_MSA_VP10_FDCT_MSA_H_
+#define VP10_ENCODER_MIPS_MSA_VP10_FDCT_MSA_H_
+
+#include "aom_dsp/mips/fwd_txfm_msa.h"
+#include "aom_dsp/mips/txfm_macros_msa.h"
+#include "aom_ports/mem.h"
+
+#define VPX_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2,  \
+                  out3, out4, out5, out6, out7)                              \
+  {                                                                          \
+    v8i16 cnst0_m, cnst1_m, cnst2_m, cnst3_m, cnst4_m;                       \
+    v8i16 vec0_m, vec1_m, vec2_m, vec3_m, s0_m, s1_m;                        \
+    v8i16 coeff0_m = { cospi_2_64,  cospi_6_64,  cospi_10_64, cospi_14_64,   \
+                       cospi_18_64, cospi_22_64, cospi_26_64, cospi_30_64 }; \
+    v8i16 coeff1_m = { cospi_8_64,  -cospi_8_64,  cospi_16_64, -cospi_16_64, \
+                       cospi_24_64, -cospi_24_64, 0,           0 };          \
+                                                                             \
+    SPLATI_H2_SH(coeff0_m, 0, 7, cnst0_m, cnst1_m);                          \
+    cnst2_m = -cnst0_m;                                                      \
+    ILVEV_H2_SH(cnst0_m, cnst1_m, cnst1_m, cnst2_m, cnst0_m, cnst1_m);       \
+    SPLATI_H2_SH(coeff0_m, 4, 3, cnst2_m, cnst3_m);                          \
+    cnst4_m = -cnst2_m;                                                      \
+    ILVEV_H2_SH(cnst2_m, cnst3_m, cnst3_m, cnst4_m, cnst2_m, cnst3_m);       \
+                                                                             \
+    ILVRL_H2_SH(in0, in7, vec1_m, vec0_m);                                   \
+    ILVRL_H2_SH(in4, in3, vec3_m, vec2_m);                                   \
+    DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m, cnst1_m,  \
+                          cnst2_m, cnst3_m, in7, in0, in4, in3);             \
+                                                                             \
+    SPLATI_H2_SH(coeff0_m, 2, 5, cnst0_m, cnst1_m);                          \
+    cnst2_m = -cnst0_m;                                                      \
+    ILVEV_H2_SH(cnst0_m, cnst1_m, cnst1_m, cnst2_m, cnst0_m, cnst1_m);       \
+    SPLATI_H2_SH(coeff0_m, 6, 1, cnst2_m, cnst3_m);                          \
+    cnst4_m = -cnst2_m;                                                      \
+    ILVEV_H2_SH(cnst2_m, cnst3_m, cnst3_m, cnst4_m, cnst2_m, cnst3_m);       \
+                                                                             \
+    ILVRL_H2_SH(in2, in5, vec1_m, vec0_m);                                   \
+    ILVRL_H2_SH(in6, in1, vec3_m, vec2_m);                                   \
+                                                                             \
+    DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m, cnst1_m,  \
+                          cnst2_m, cnst3_m, in5, in2, in6, in1);             \
+    BUTTERFLY_4(in7, in0, in2, in5, s1_m, s0_m, in2, in5);                   \
+    out7 = -s0_m;                                                            \
+    out0 = s1_m;                                                             \
+                                                                             \
+    SPLATI_H4_SH(coeff1_m, 0, 4, 1, 5, cnst0_m, cnst1_m, cnst2_m, cnst3_m);  \
+                                                                             \
+    ILVEV_H2_SH(cnst3_m, cnst0_m, cnst1_m, cnst2_m, cnst3_m, cnst2_m);       \
+    cnst0_m = __msa_ilvev_h(cnst1_m, cnst0_m);                               \
+    cnst1_m = cnst0_m;                                                       \
+                                                                             \
+    ILVRL_H2_SH(in4, in3, vec1_m, vec0_m);                                   \
+    ILVRL_H2_SH(in6, in1, vec3_m, vec2_m);                                   \
+    DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m, cnst2_m,  \
+                          cnst3_m, cnst1_m, out1, out6, s0_m, s1_m);         \
+                                                                             \
+    SPLATI_H2_SH(coeff1_m, 2, 3, cnst0_m, cnst1_m);                          \
+    cnst1_m = __msa_ilvev_h(cnst1_m, cnst0_m);                               \
+                                                                             \
+    ILVRL_H2_SH(in2, in5, vec1_m, vec0_m);                                   \
+    ILVRL_H2_SH(s0_m, s1_m, vec3_m, vec2_m);                                 \
+    out3 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst0_m);                   \
+    out4 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst1_m);                   \
+    out2 = DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst0_m);                   \
+    out5 = DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst1_m);                   \
+                                                                             \
+    out1 = -out1;                                                            \
+    out3 = -out3;                                                            \
+    out5 = -out5;                                                            \
+  }
+
+#define VPX_FADST4(in0, in1, in2, in3, out0, out1, out2, out3)              \
+  {                                                                         \
+    v4i32 s0_m, s1_m, s2_m, s3_m, constant_m;                               \
+    v4i32 in0_r_m, in1_r_m, in2_r_m, in3_r_m;                               \
+                                                                            \
+    UNPCK_R_SH_SW(in0, in0_r_m);                                            \
+    UNPCK_R_SH_SW(in1, in1_r_m);                                            \
+    UNPCK_R_SH_SW(in2, in2_r_m);                                            \
+    UNPCK_R_SH_SW(in3, in3_r_m);                                            \
+                                                                            \
+    constant_m = __msa_fill_w(sinpi_4_9);                                   \
+    MUL2(in0_r_m, constant_m, in3_r_m, constant_m, s1_m, s0_m);             \
+                                                                            \
+    constant_m = __msa_fill_w(sinpi_1_9);                                   \
+    s0_m += in0_r_m * constant_m;                                           \
+    s1_m -= in1_r_m * constant_m;                                           \
+                                                                            \
+    constant_m = __msa_fill_w(sinpi_2_9);                                   \
+    s0_m += in1_r_m * constant_m;                                           \
+    s1_m += in3_r_m * constant_m;                                           \
+                                                                            \
+    s2_m = in0_r_m + in1_r_m - in3_r_m;                                     \
+                                                                            \
+    constant_m = __msa_fill_w(sinpi_3_9);                                   \
+    MUL2(in2_r_m, constant_m, s2_m, constant_m, s3_m, in1_r_m);             \
+                                                                            \
+    in0_r_m = s0_m + s3_m;                                                  \
+    s2_m = s1_m - s3_m;                                                     \
+    s3_m = s1_m - s0_m + s3_m;                                              \
+                                                                            \
+    SRARI_W4_SW(in0_r_m, in1_r_m, s2_m, s3_m, DCT_CONST_BITS);              \
+    PCKEV_H4_SH(in0_r_m, in0_r_m, in1_r_m, in1_r_m, s2_m, s2_m, s3_m, s3_m, \
+                out0, out1, out2, out3);                                    \
+  }
+#endif  // VP10_ENCODER_MIPS_MSA_VP10_FDCT_MSA_H_
diff --git a/av1/encoder/mips/msa/temporal_filter_msa.c b/av1/encoder/mips/msa/temporal_filter_msa.c
new file mode 100644
index 0000000..4d60d37
--- /dev/null
+++ b/av1/encoder/mips/msa/temporal_filter_msa.c
@@ -0,0 +1,283 @@
+/*
+ *  Copyright (c) 2015 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "./vp10_rtcd.h"
+#include "aom_dsp/mips/macros_msa.h"
+
+static void temporal_filter_apply_8size_msa(uint8_t *frm1_ptr, uint32_t stride,
+                                            uint8_t *frm2_ptr, int32_t filt_sth,
+                                            int32_t filt_wgt, uint32_t *acc,
+                                            uint16_t *cnt) {
+  uint32_t row;
+  uint64_t f0, f1, f2, f3;
+  v16i8 frm2, frm1 = { 0 };
+  v16i8 frm4, frm3 = { 0 };
+  v16u8 frm_r, frm_l;
+  v8i16 frm2_r, frm2_l;
+  v8i16 diff0, diff1, mod0_h, mod1_h;
+  v4i32 cnst3, cnst16, filt_wt, strength;
+  v4i32 mod0_w, mod1_w, mod2_w, mod3_w;
+  v4i32 diff0_r, diff0_l, diff1_r, diff1_l;
+  v4i32 frm2_rr, frm2_rl, frm2_lr, frm2_ll;
+  v4i32 acc0, acc1, acc2, acc3;
+  v8i16 cnt0, cnt1;
+
+  filt_wt = __msa_fill_w(filt_wgt);
+  strength = __msa_fill_w(filt_sth);
+  cnst3 = __msa_ldi_w(3);
+  cnst16 = __msa_ldi_w(16);
+
+  for (row = 2; row--;) {
+    LD4(frm1_ptr, stride, f0, f1, f2, f3);
+    frm1_ptr += (4 * stride);
+
+    LD_SB2(frm2_ptr, 16, frm2, frm4);
+    frm2_ptr += 32;
+
+    LD_SW2(acc, 4, acc0, acc1);
+    LD_SW2(acc + 8, 4, acc2, acc3);
+    LD_SH2(cnt, 8, cnt0, cnt1);
+
+    INSERT_D2_SB(f0, f1, frm1);
+    INSERT_D2_SB(f2, f3, frm3);
+    ILVRL_B2_UB(frm1, frm2, frm_r, frm_l);
+    HSUB_UB2_SH(frm_r, frm_l, diff0, diff1);
+    UNPCK_SH_SW(diff0, diff0_r, diff0_l);
+    UNPCK_SH_SW(diff1, diff1_r, diff1_l);
+    MUL4(diff0_r, diff0_r, diff0_l, diff0_l, diff1_r, diff1_r, diff1_l, diff1_l,
+         mod0_w, mod1_w, mod2_w, mod3_w);
+    MUL4(mod0_w, cnst3, mod1_w, cnst3, mod2_w, cnst3, mod3_w, cnst3, mod0_w,
+         mod1_w, mod2_w, mod3_w);
+    SRAR_W4_SW(mod0_w, mod1_w, mod2_w, mod3_w, strength);
+
+    diff0_r = (mod0_w < cnst16);
+    diff0_l = (mod1_w < cnst16);
+    diff1_r = (mod2_w < cnst16);
+    diff1_l = (mod3_w < cnst16);
+
+    SUB4(cnst16, mod0_w, cnst16, mod1_w, cnst16, mod2_w, cnst16, mod3_w, mod0_w,
+         mod1_w, mod2_w, mod3_w);
+
+    mod0_w = diff0_r & mod0_w;
+    mod1_w = diff0_l & mod1_w;
+    mod2_w = diff1_r & mod2_w;
+    mod3_w = diff1_l & mod3_w;
+
+    MUL4(mod0_w, filt_wt, mod1_w, filt_wt, mod2_w, filt_wt, mod3_w, filt_wt,
+         mod0_w, mod1_w, mod2_w, mod3_w);
+    PCKEV_H2_SH(mod1_w, mod0_w, mod3_w, mod2_w, mod0_h, mod1_h);
+    ADD2(mod0_h, cnt0, mod1_h, cnt1, mod0_h, mod1_h);
+    ST_SH2(mod0_h, mod1_h, cnt, 8);
+    cnt += 16;
+
+    UNPCK_UB_SH(frm2, frm2_r, frm2_l);
+    UNPCK_SH_SW(frm2_r, frm2_rr, frm2_rl);
+    UNPCK_SH_SW(frm2_l, frm2_lr, frm2_ll);
+    MUL4(mod0_w, frm2_rr, mod1_w, frm2_rl, mod2_w, frm2_lr, mod3_w, frm2_ll,
+         mod0_w, mod1_w, mod2_w, mod3_w);
+    ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, mod0_w, mod1_w,
+         mod2_w, mod3_w);
+
+    ST_SW2(mod0_w, mod1_w, acc, 4);
+    acc += 8;
+    ST_SW2(mod2_w, mod3_w, acc, 4);
+    acc += 8;
+
+    LD_SW2(acc, 4, acc0, acc1);
+    LD_SW2(acc + 8, 4, acc2, acc3);
+    LD_SH2(cnt, 8, cnt0, cnt1);
+
+    ILVRL_B2_UB(frm3, frm4, frm_r, frm_l);
+    HSUB_UB2_SH(frm_r, frm_l, diff0, diff1);
+    UNPCK_SH_SW(diff0, diff0_r, diff0_l);
+    UNPCK_SH_SW(diff1, diff1_r, diff1_l);
+    MUL4(diff0_r, diff0_r, diff0_l, diff0_l, diff1_r, diff1_r, diff1_l, diff1_l,
+         mod0_w, mod1_w, mod2_w, mod3_w);
+    MUL4(mod0_w, cnst3, mod1_w, cnst3, mod2_w, cnst3, mod3_w, cnst3, mod0_w,
+         mod1_w, mod2_w, mod3_w);
+    SRAR_W4_SW(mod0_w, mod1_w, mod2_w, mod3_w, strength);
+
+    diff0_r = (mod0_w < cnst16);
+    diff0_l = (mod1_w < cnst16);
+    diff1_r = (mod2_w < cnst16);
+    diff1_l = (mod3_w < cnst16);
+
+    SUB4(cnst16, mod0_w, cnst16, mod1_w, cnst16, mod2_w, cnst16, mod3_w, mod0_w,
+         mod1_w, mod2_w, mod3_w);
+
+    mod0_w = diff0_r & mod0_w;
+    mod1_w = diff0_l & mod1_w;
+    mod2_w = diff1_r & mod2_w;
+    mod3_w = diff1_l & mod3_w;
+
+    MUL4(mod0_w, filt_wt, mod1_w, filt_wt, mod2_w, filt_wt, mod3_w, filt_wt,
+         mod0_w, mod1_w, mod2_w, mod3_w);
+    PCKEV_H2_SH(mod1_w, mod0_w, mod3_w, mod2_w, mod0_h, mod1_h);
+    ADD2(mod0_h, cnt0, mod1_h, cnt1, mod0_h, mod1_h);
+    ST_SH2(mod0_h, mod1_h, cnt, 8);
+    cnt += 16;
+    UNPCK_UB_SH(frm4, frm2_r, frm2_l);
+    UNPCK_SH_SW(frm2_r, frm2_rr, frm2_rl);
+    UNPCK_SH_SW(frm2_l, frm2_lr, frm2_ll);
+    MUL4(mod0_w, frm2_rr, mod1_w, frm2_rl, mod2_w, frm2_lr, mod3_w, frm2_ll,
+         mod0_w, mod1_w, mod2_w, mod3_w);
+    ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, mod0_w, mod1_w,
+         mod2_w, mod3_w);
+
+    ST_SW2(mod0_w, mod1_w, acc, 4);
+    acc += 8;
+    ST_SW2(mod2_w, mod3_w, acc, 4);
+    acc += 8;
+  }
+}
+
+static void temporal_filter_apply_16size_msa(uint8_t *frm1_ptr, uint32_t stride,
+                                             uint8_t *frm2_ptr,
+                                             int32_t filt_sth, int32_t filt_wgt,
+                                             uint32_t *acc, uint16_t *cnt) {
+  uint32_t row;
+  v16i8 frm1, frm2, frm3, frm4;
+  v16u8 frm_r, frm_l;
+  v16i8 zero = { 0 };
+  v8u16 frm2_r, frm2_l;
+  v8i16 diff0, diff1, mod0_h, mod1_h;
+  v4i32 cnst3, cnst16, filt_wt, strength;
+  v4i32 mod0_w, mod1_w, mod2_w, mod3_w;
+  v4i32 diff0_r, diff0_l, diff1_r, diff1_l;
+  v4i32 frm2_rr, frm2_rl, frm2_lr, frm2_ll;
+  v4i32 acc0, acc1, acc2, acc3;
+  v8i16 cnt0, cnt1;
+
+  filt_wt = __msa_fill_w(filt_wgt);
+  strength = __msa_fill_w(filt_sth);
+  cnst3 = __msa_ldi_w(3);
+  cnst16 = __msa_ldi_w(16);
+
+  for (row = 8; row--;) {
+    LD_SB2(frm1_ptr, stride, frm1, frm3);
+    frm1_ptr += stride;
+
+    LD_SB2(frm2_ptr, 16, frm2, frm4);
+    frm2_ptr += 16;
+
+    LD_SW2(acc, 4, acc0, acc1);
+    LD_SW2(acc, 4, acc2, acc3);
+    LD_SH2(cnt, 8, cnt0, cnt1);
+
+    ILVRL_B2_UB(frm1, frm2, frm_r, frm_l);
+    HSUB_UB2_SH(frm_r, frm_l, diff0, diff1);
+    UNPCK_SH_SW(diff0, diff0_r, diff0_l);
+    UNPCK_SH_SW(diff1, diff1_r, diff1_l);
+    MUL4(diff0_r, diff0_r, diff0_l, diff0_l, diff1_r, diff1_r, diff1_l, diff1_l,
+         mod0_w, mod1_w, mod2_w, mod3_w);
+    MUL4(mod0_w, cnst3, mod1_w, cnst3, mod2_w, cnst3, mod3_w, cnst3, mod0_w,
+         mod1_w, mod2_w, mod3_w);
+    SRAR_W4_SW(mod0_w, mod1_w, mod2_w, mod3_w, strength);
+
+    diff0_r = (mod0_w < cnst16);
+    diff0_l = (mod1_w < cnst16);
+    diff1_r = (mod2_w < cnst16);
+    diff1_l = (mod3_w < cnst16);
+
+    SUB4(cnst16, mod0_w, cnst16, mod1_w, cnst16, mod2_w, cnst16, mod3_w, mod0_w,
+         mod1_w, mod2_w, mod3_w);
+
+    mod0_w = diff0_r & mod0_w;
+    mod1_w = diff0_l & mod1_w;
+    mod2_w = diff1_r & mod2_w;
+    mod3_w = diff1_l & mod3_w;
+
+    MUL4(mod0_w, filt_wt, mod1_w, filt_wt, mod2_w, filt_wt, mod3_w, filt_wt,
+         mod0_w, mod1_w, mod2_w, mod3_w);
+    PCKEV_H2_SH(mod1_w, mod0_w, mod3_w, mod2_w, mod0_h, mod1_h);
+    ADD2(mod0_h, cnt0, mod1_h, cnt1, mod0_h, mod1_h);
+    ST_SH2(mod0_h, mod1_h, cnt, 8);
+    cnt += 16;
+
+    ILVRL_B2_UH(zero, frm2, frm2_r, frm2_l);
+    UNPCK_SH_SW(frm2_r, frm2_rr, frm2_rl);
+    UNPCK_SH_SW(frm2_l, frm2_lr, frm2_ll);
+    MUL4(mod0_w, frm2_rr, mod1_w, frm2_rl, mod2_w, frm2_lr, mod3_w, frm2_ll,
+         mod0_w, mod1_w, mod2_w, mod3_w);
+    ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, mod0_w, mod1_w,
+         mod2_w, mod3_w);
+
+    ST_SW2(mod0_w, mod1_w, acc, 4);
+    acc += 8;
+    ST_SW2(mod2_w, mod3_w, acc, 4);
+    acc += 8;
+
+    LD_SW2(acc, 4, acc0, acc1);
+    LD_SW2(acc + 8, 4, acc2, acc3);
+    LD_SH2(cnt, 8, cnt0, cnt1);
+
+    ILVRL_B2_UB(frm3, frm4, frm_r, frm_l);
+    HSUB_UB2_SH(frm_r, frm_l, diff0, diff1);
+    UNPCK_SH_SW(diff0, diff0_r, diff0_l);
+    UNPCK_SH_SW(diff1, diff1_r, diff1_l);
+    MUL4(diff0_r, diff0_r, diff0_l, diff0_l, diff1_r, diff1_r, diff1_l, diff1_l,
+         mod0_w, mod1_w, mod2_w, mod3_w);
+    MUL4(mod0_w, cnst3, mod1_w, cnst3, mod2_w, cnst3, mod3_w, cnst3, mod0_w,
+         mod1_w, mod2_w, mod3_w);
+    SRAR_W4_SW(mod0_w, mod1_w, mod2_w, mod3_w, strength);
+
+    diff0_r = (mod0_w < cnst16);
+    diff0_l = (mod1_w < cnst16);
+    diff1_r = (mod2_w < cnst16);
+    diff1_l = (mod3_w < cnst16);
+
+    SUB4(cnst16, mod0_w, cnst16, mod1_w, cnst16, mod2_w, cnst16, mod3_w, mod0_w,
+         mod1_w, mod2_w, mod3_w);
+
+    mod0_w = diff0_r & mod0_w;
+    mod1_w = diff0_l & mod1_w;
+    mod2_w = diff1_r & mod2_w;
+    mod3_w = diff1_l & mod3_w;
+
+    MUL4(mod0_w, filt_wt, mod1_w, filt_wt, mod2_w, filt_wt, mod3_w, filt_wt,
+         mod0_w, mod1_w, mod2_w, mod3_w);
+    PCKEV_H2_SH(mod1_w, mod0_w, mod3_w, mod2_w, mod0_h, mod1_h);
+    ADD2(mod0_h, cnt0, mod1_h, cnt1, mod0_h, mod1_h);
+    ST_SH2(mod0_h, mod1_h, cnt, 8);
+    cnt += 16;
+
+    ILVRL_B2_UH(zero, frm4, frm2_r, frm2_l);
+    UNPCK_SH_SW(frm2_r, frm2_rr, frm2_rl);
+    UNPCK_SH_SW(frm2_l, frm2_lr, frm2_ll);
+    MUL4(mod0_w, frm2_rr, mod1_w, frm2_rl, mod2_w, frm2_lr, mod3_w, frm2_ll,
+         mod0_w, mod1_w, mod2_w, mod3_w);
+    ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, mod0_w, mod1_w,
+         mod2_w, mod3_w);
+    ST_SW2(mod0_w, mod1_w, acc, 4);
+    acc += 8;
+    ST_SW2(mod2_w, mod3_w, acc, 4);
+    acc += 8;
+
+    frm1_ptr += stride;
+    frm2_ptr += 16;
+  }
+}
+
+void vp10_temporal_filter_apply_msa(uint8_t *frame1_ptr, uint32_t stride,
+                                    uint8_t *frame2_ptr, uint32_t blk_w,
+                                    uint32_t blk_h, int32_t strength,
+                                    int32_t filt_wgt, uint32_t *accu,
+                                    uint16_t *cnt) {
+  if (8 == (blk_w * blk_h)) {
+    temporal_filter_apply_8size_msa(frame1_ptr, stride, frame2_ptr, strength,
+                                    filt_wgt, accu, cnt);
+  } else if (16 == (blk_w * blk_h)) {
+    temporal_filter_apply_16size_msa(frame1_ptr, stride, frame2_ptr, strength,
+                                     filt_wgt, accu, cnt);
+  } else {
+    vp10_temporal_filter_apply_c(frame1_ptr, stride, frame2_ptr, blk_w, blk_h,
+                                 strength, filt_wgt, accu, cnt);
+  }
+}
diff --git a/av1/encoder/palette.c b/av1/encoder/palette.c
new file mode 100644
index 0000000..74f91b7
--- /dev/null
+++ b/av1/encoder/palette.c
@@ -0,0 +1,191 @@
+/*
+ *  Copyright (c) 2015 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <math.h>
+#include <stdlib.h>
+#include "av1/encoder/palette.h"
+
+static float calc_dist(const float *p1, const float *p2, int dim) {
+  float dist = 0;
+  int i;
+  for (i = 0; i < dim; ++i) {
+    const float diff = p1[i] - p2[i];
+    dist += diff * diff;
+  }
+  return dist;
+}
+
+void vp10_calc_indices(const float *data, const float *centroids,
+                       uint8_t *indices, int n, int k, int dim) {
+  int i, j;
+  for (i = 0; i < n; ++i) {
+    float min_dist = calc_dist(data + i * dim, centroids, dim);
+    indices[i] = 0;
+    for (j = 1; j < k; ++j) {
+      const float this_dist =
+          calc_dist(data + i * dim, centroids + j * dim, dim);
+      if (this_dist < min_dist) {
+        min_dist = this_dist;
+        indices[i] = j;
+      }
+    }
+  }
+}
+
+// Generate a random number in the range [0, 32768).
+static unsigned int lcg_rand16(unsigned int *state) {
+  *state = *state * 1103515245 + 12345;
+  return *state / 65536 % 32768;
+}
+
+static void calc_centroids(const float *data, float *centroids,
+                           const uint8_t *indices, int n, int k, int dim) {
+  int i, j, index;
+  int count[PALETTE_MAX_SIZE];
+  unsigned int rand_state = (unsigned int)data[0];
+
+  assert(n <= 32768);
+
+  memset(count, 0, sizeof(count[0]) * k);
+  memset(centroids, 0, sizeof(centroids[0]) * k * dim);
+
+  for (i = 0; i < n; ++i) {
+    index = indices[i];
+    assert(index < k);
+    ++count[index];
+    for (j = 0; j < dim; ++j) {
+      centroids[index * dim + j] += data[i * dim + j];
+    }
+  }
+
+  for (i = 0; i < k; ++i) {
+    if (count[i] == 0) {
+      memcpy(centroids + i * dim, data + (lcg_rand16(&rand_state) % n) * dim,
+             sizeof(centroids[0]) * dim);
+    } else {
+      const float norm = 1.0f / count[i];
+      for (j = 0; j < dim; ++j) centroids[i * dim + j] *= norm;
+    }
+  }
+
+  // Round to nearest integers.
+  for (i = 0; i < k * dim; ++i) {
+    centroids[i] = roundf(centroids[i]);
+  }
+}
+
+static float calc_total_dist(const float *data, const float *centroids,
+                             const uint8_t *indices, int n, int k, int dim) {
+  float dist = 0;
+  int i;
+  (void)k;
+
+  for (i = 0; i < n; ++i)
+    dist += calc_dist(data + i * dim, centroids + indices[i] * dim, dim);
+
+  return dist;
+}
+
+void vp10_k_means(const float *data, float *centroids, uint8_t *indices, int n,
+                  int k, int dim, int max_itr) {
+  int i;
+  float this_dist;
+  float pre_centroids[2 * PALETTE_MAX_SIZE];
+  uint8_t pre_indices[MAX_SB_SQUARE];
+
+  vp10_calc_indices(data, centroids, indices, n, k, dim);
+  this_dist = calc_total_dist(data, centroids, indices, n, k, dim);
+
+  for (i = 0; i < max_itr; ++i) {
+    const float pre_dist = this_dist;
+    memcpy(pre_centroids, centroids, sizeof(pre_centroids[0]) * k * dim);
+    memcpy(pre_indices, indices, sizeof(pre_indices[0]) * n);
+
+    calc_centroids(data, centroids, indices, n, k, dim);
+    vp10_calc_indices(data, centroids, indices, n, k, dim);
+    this_dist = calc_total_dist(data, centroids, indices, n, k, dim);
+
+    if (this_dist > pre_dist) {
+      memcpy(centroids, pre_centroids, sizeof(pre_centroids[0]) * k * dim);
+      memcpy(indices, pre_indices, sizeof(pre_indices[0]) * n);
+      break;
+    }
+    if (!memcmp(centroids, pre_centroids, sizeof(pre_centroids[0]) * k * dim))
+      break;
+  }
+}
+
+static int float_comparer(const void *a, const void *b) {
+  const float fa = *(const float *)a;
+  const float fb = *(const float *)b;
+  return (fa > fb) - (fb < fa);
+}
+
+int vp10_remove_duplicates(float *centroids, int num_centroids) {
+  int num_unique;  // number of unique centroids
+  int i;
+  qsort(centroids, num_centroids, sizeof(*centroids), float_comparer);
+  // Remove duplicates.
+  num_unique = 1;
+  for (i = 1; i < num_centroids; ++i) {
+    if (centroids[i] != centroids[i - 1]) {  // found a new unique centroid
+      centroids[num_unique++] = centroids[i];
+    }
+  }
+  return num_unique;
+}
+
+int vp10_count_colors(const uint8_t *src, int stride, int rows, int cols) {
+  int n = 0, r, c, i, val_count[256];
+  uint8_t val;
+  memset(val_count, 0, sizeof(val_count));
+
+  for (r = 0; r < rows; ++r) {
+    for (c = 0; c < cols; ++c) {
+      val = src[r * stride + c];
+      ++val_count[val];
+    }
+  }
+
+  for (i = 0; i < 256; ++i) {
+    if (val_count[i]) {
+      ++n;
+    }
+  }
+
+  return n;
+}
+
+#if CONFIG_VP9_HIGHBITDEPTH
+int vp10_count_colors_highbd(const uint8_t *src8, int stride, int rows,
+                             int cols, int bit_depth) {
+  int n = 0, r, c, i;
+  uint16_t val;
+  uint16_t *src = CONVERT_TO_SHORTPTR(src8);
+  int val_count[1 << 12];
+
+  assert(bit_depth <= 12);
+  memset(val_count, 0, (1 << 12) * sizeof(val_count[0]));
+  for (r = 0; r < rows; ++r) {
+    for (c = 0; c < cols; ++c) {
+      val = src[r * stride + c];
+      ++val_count[val];
+    }
+  }
+
+  for (i = 0; i < (1 << bit_depth); ++i) {
+    if (val_count[i]) {
+      ++n;
+    }
+  }
+
+  return n;
+}
+#endif  // CONFIG_VP9_HIGHBITDEPTH
diff --git a/av1/encoder/palette.h b/av1/encoder/palette.h
new file mode 100644
index 0000000..fbbb39c
--- /dev/null
+++ b/av1/encoder/palette.h
@@ -0,0 +1,46 @@
+/*
+ *  Copyright (c) 2015 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP10_ENCODER_PALETTE_H_
+#define VP10_ENCODER_PALETTE_H_
+
+#include "av1/common/blockd.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void vp10_calc_indices(const float *data, const float *centroids,
+                       uint8_t *indices, int n, int k, int dim);
+
+// Given 'data' of size 'n' and initial guess of 'centroids' of size 'k x dim',
+// runs up to 'max_itr' iterations of k-means algorithm to get updated
+// 'centroids' and the centroid 'indices' for elements in 'data'.
+// Note: the output centroids are rounded off to nearest integers.
+void vp10_k_means(const float *data, float *centroids, uint8_t *indices, int n,
+                  int k, int dim, int max_itr);
+
+// Given a list of centroids, returns the unique number of centroids 'k', and
+// puts these unique centroids in first 'k' indices of 'centroids' array.
+// Ideally, the centroids should be rounded to integers before calling this
+// method.
+int vp10_remove_duplicates(float *centroids, int num_centroids);
+
+int vp10_count_colors(const uint8_t *src, int stride, int rows, int cols);
+#if CONFIG_VP9_HIGHBITDEPTH
+int vp10_count_colors_highbd(const uint8_t *src8, int stride, int rows,
+                             int cols, int bit_depth);
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+#ifdef __cplusplus
+}  // extern "C"
+#endif
+
+#endif /* VP10_ENCODER_PALETTE_H_ */
diff --git a/av1/encoder/pickdering.c b/av1/encoder/pickdering.c
new file mode 100644
index 0000000..91e9b54
--- /dev/null
+++ b/av1/encoder/pickdering.c
@@ -0,0 +1,177 @@
+/*
+ *  Copyright (c) 2015 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <string.h>
+
+#include "./vpx_scale_rtcd.h"
+#include "av1/common/dering.h"
+#include "av1/common/onyxc_int.h"
+#include "av1/common/reconinter.h"
+#include "av1/encoder/encoder.h"
+#include "aom/vpx_integer.h"
+
+static double compute_dist(int16_t *x, int xstride, int16_t *y, int ystride,
+                           int nhb, int nvb, int coeff_shift) {
+  int i, j;
+  double sum;
+  sum = 0;
+  for (i = 0; i < nvb << 3; i++) {
+    for (j = 0; j < nhb << 3; j++) {
+      double tmp;
+      tmp = x[i * xstride + j] - y[i * ystride + j];
+      sum += tmp * tmp;
+    }
+  }
+  return sum / (double)(1 << 2 * coeff_shift);
+}
+
+int vp10_dering_search(YV12_BUFFER_CONFIG *frame, const YV12_BUFFER_CONFIG *ref,
+                       VP10_COMMON *cm, MACROBLOCKD *xd) {
+  int r, c;
+  int sbr, sbc;
+  int nhsb, nvsb;
+  od_dering_in *src;
+  int16_t *ref_coeff;
+  unsigned char *bskip;
+  int dir[OD_DERING_NBLOCKS][OD_DERING_NBLOCKS] = { { 0 } };
+  int stride;
+  int bsize[3];
+  int dec[3];
+  int pli;
+  int(*mse)[MAX_DERING_LEVEL];
+  int best_count[MAX_DERING_LEVEL] = { 0 };
+  double tot_mse[MAX_DERING_LEVEL] = { 0 };
+  int level;
+  int best_level;
+  int global_level;
+  double best_tot_mse = 1e15;
+  int coeff_shift = VPXMAX(cm->bit_depth - 8, 0);
+  src = vpx_malloc(sizeof(*src) * cm->mi_rows * cm->mi_cols * 64);
+  ref_coeff = vpx_malloc(sizeof(*ref_coeff) * cm->mi_rows * cm->mi_cols * 64);
+  bskip = vpx_malloc(sizeof(*bskip) * cm->mi_rows * cm->mi_cols);
+  vp10_setup_dst_planes(xd->plane, frame, 0, 0);
+  for (pli = 0; pli < 3; pli++) {
+    dec[pli] = xd->plane[pli].subsampling_x;
+    bsize[pli] = 8 >> dec[pli];
+  }
+  stride = bsize[0] * cm->mi_cols;
+  for (r = 0; r < bsize[0] * cm->mi_rows; ++r) {
+    for (c = 0; c < bsize[0] * cm->mi_cols; ++c) {
+#if CONFIG_VP9_HIGHBITDEPTH
+      if (cm->use_highbitdepth) {
+        src[r * stride + c] = CONVERT_TO_SHORTPTR(
+            xd->plane[0].dst.buf)[r * xd->plane[0].dst.stride + c];
+        ref_coeff[r * stride + c] =
+            CONVERT_TO_SHORTPTR(ref->y_buffer)[r * ref->y_stride + c];
+      } else {
+#endif
+        src[r * stride + c] =
+            xd->plane[0].dst.buf[r * xd->plane[0].dst.stride + c];
+        ref_coeff[r * stride + c] = ref->y_buffer[r * ref->y_stride + c];
+#if CONFIG_VP9_HIGHBITDEPTH
+      }
+#endif
+    }
+  }
+  for (r = 0; r < cm->mi_rows; ++r) {
+    for (c = 0; c < cm->mi_cols; ++c) {
+      const MB_MODE_INFO *mbmi =
+          &cm->mi_grid_visible[r * cm->mi_stride + c]->mbmi;
+      bskip[r * cm->mi_cols + c] = mbmi->skip;
+    }
+  }
+  nvsb = (cm->mi_rows + MAX_MIB_SIZE - 1) / MAX_MIB_SIZE;
+  nhsb = (cm->mi_cols + MAX_MIB_SIZE - 1) / MAX_MIB_SIZE;
+  mse = vpx_malloc(nvsb * nhsb * sizeof(*mse));
+  for (sbr = 0; sbr < nvsb; sbr++) {
+    for (sbc = 0; sbc < nhsb; sbc++) {
+      int best_mse = 1000000000;
+      int nvb, nhb;
+      int16_t dst[MAX_MIB_SIZE * MAX_MIB_SIZE * 8 * 8];
+      best_level = 0;
+      nhb = VPXMIN(MAX_MIB_SIZE, cm->mi_cols - MAX_MIB_SIZE * sbc);
+      nvb = VPXMIN(MAX_MIB_SIZE, cm->mi_rows - MAX_MIB_SIZE * sbr);
+      for (level = 0; level < 64; level++) {
+        int threshold;
+        threshold = level << coeff_shift;
+        od_dering(
+            &OD_DERING_VTBL_C, dst, MAX_MIB_SIZE * bsize[0],
+            &src[sbr * stride * bsize[0] * MAX_MIB_SIZE +
+                 sbc * bsize[0] * MAX_MIB_SIZE],
+            cm->mi_cols * bsize[0], nhb, nvb, sbc, sbr, nhsb, nvsb, 0, dir, 0,
+            &bskip[MAX_MIB_SIZE * sbr * cm->mi_cols + MAX_MIB_SIZE * sbc],
+            cm->mi_cols, threshold, OD_DERING_NO_CHECK_OVERLAP, coeff_shift);
+        mse[nhsb * sbr + sbc][level] = (int)compute_dist(
+            dst, MAX_MIB_SIZE * bsize[0],
+            &ref_coeff[sbr * stride * bsize[0] * MAX_MIB_SIZE +
+                       sbc * bsize[0] * MAX_MIB_SIZE],
+            stride, nhb, nvb, coeff_shift);
+        tot_mse[level] += mse[nhsb * sbr + sbc][level];
+        if (mse[nhsb * sbr + sbc][level] < best_mse) {
+          best_mse = mse[nhsb * sbr + sbc][level];
+          best_level = level;
+        }
+      }
+      best_count[best_level]++;
+    }
+  }
+#if DERING_REFINEMENT
+  best_level = 0;
+  /* Search for the best global level one value at a time. */
+  for (global_level = 2; global_level < MAX_DERING_LEVEL; global_level++) {
+    double tot_mse = 0;
+    for (sbr = 0; sbr < nvsb; sbr++) {
+      for (sbc = 0; sbc < nhsb; sbc++) {
+        int gi;
+        int best_mse = mse[nhsb * sbr + sbc][0];
+        for (gi = 1; gi < 4; gi++) {
+          level = compute_level_from_index(global_level, gi);
+          if (mse[nhsb * sbr + sbc][level] < best_mse) {
+            best_mse = mse[nhsb * sbr + sbc][level];
+          }
+        }
+        tot_mse += best_mse;
+      }
+    }
+    if (tot_mse < best_tot_mse) {
+      best_level = global_level;
+      best_tot_mse = tot_mse;
+    }
+  }
+  for (sbr = 0; sbr < nvsb; sbr++) {
+    for (sbc = 0; sbc < nhsb; sbc++) {
+      int gi;
+      int best_gi;
+      int best_mse = mse[nhsb * sbr + sbc][0];
+      best_gi = 0;
+      for (gi = 1; gi < DERING_REFINEMENT_LEVELS; gi++) {
+        level = compute_level_from_index(best_level, gi);
+        if (mse[nhsb * sbr + sbc][level] < best_mse) {
+          best_gi = gi;
+          best_mse = mse[nhsb * sbr + sbc][level];
+        }
+      }
+      cm->mi_grid_visible[MAX_MIB_SIZE * sbr * cm->mi_stride +
+                          MAX_MIB_SIZE * sbc]
+          ->mbmi.dering_gain = best_gi;
+    }
+  }
+#else
+    best_level = 0;
+    for (level = 0; level < MAX_DERING_LEVEL; level++) {
+      if (tot_mse[level] < tot_mse[best_level]) best_level = level;
+    }
+#endif
+  vpx_free(src);
+  vpx_free(ref_coeff);
+  vpx_free(bskip);
+  vpx_free(mse);
+  return best_level;
+}
diff --git a/av1/encoder/picklpf.c b/av1/encoder/picklpf.c
new file mode 100644
index 0000000..c4e9b7d
--- /dev/null
+++ b/av1/encoder/picklpf.c
@@ -0,0 +1,212 @@
+/*
+ *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include <limits.h>
+
+#include "./vpx_scale_rtcd.h"
+
+#include "aom_dsp/psnr.h"
+#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_mem/vpx_mem.h"
+#include "aom_ports/mem.h"
+
+#include "av1/common/loopfilter.h"
+#include "av1/common/onyxc_int.h"
+#include "av1/common/quant_common.h"
+
+#include "av1/encoder/encoder.h"
+#include "av1/encoder/picklpf.h"
+#include "av1/encoder/quantize.h"
+
+int vp10_get_max_filter_level(const VP10_COMP *cpi) {
+  if (cpi->oxcf.pass == 2) {
+    return cpi->twopass.section_intra_rating > 8 ? MAX_LOOP_FILTER * 3 / 4
+                                                 : MAX_LOOP_FILTER;
+  } else {
+    return MAX_LOOP_FILTER;
+  }
+}
+
+static int64_t try_filter_frame(const YV12_BUFFER_CONFIG *sd,
+                                VP10_COMP *const cpi, int filt_level,
+                                int partial_frame) {
+  VP10_COMMON *const cm = &cpi->common;
+  int64_t filt_err;
+
+#if CONFIG_VAR_TX || CONFIG_EXT_PARTITION
+  vp10_loop_filter_frame(cm->frame_to_show, cm, &cpi->td.mb.e_mbd, filt_level,
+                         1, partial_frame);
+#else
+  if (cpi->num_workers > 1)
+    vp10_loop_filter_frame_mt(cm->frame_to_show, cm, cpi->td.mb.e_mbd.plane,
+                              filt_level, 1, partial_frame, cpi->workers,
+                              cpi->num_workers, &cpi->lf_row_sync);
+  else
+    vp10_loop_filter_frame(cm->frame_to_show, cm, &cpi->td.mb.e_mbd, filt_level,
+                           1, partial_frame);
+#endif
+
+#if CONFIG_VP9_HIGHBITDEPTH
+  if (cm->use_highbitdepth) {
+    filt_err = vpx_highbd_get_y_sse(sd, cm->frame_to_show);
+  } else {
+    filt_err = vpx_get_y_sse(sd, cm->frame_to_show);
+  }
+#else
+  filt_err = vpx_get_y_sse(sd, cm->frame_to_show);
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+  // Re-instate the unfiltered frame
+  vpx_yv12_copy_y(&cpi->last_frame_uf, cm->frame_to_show);
+
+  return filt_err;
+}
+
+int vp10_search_filter_level(const YV12_BUFFER_CONFIG *sd, VP10_COMP *cpi,
+                             int partial_frame, double *best_cost_ret) {
+  const VP10_COMMON *const cm = &cpi->common;
+  const struct loopfilter *const lf = &cm->lf;
+  const int min_filter_level = 0;
+  const int max_filter_level = vp10_get_max_filter_level(cpi);
+  int filt_direction = 0;
+  int64_t best_err;
+  int filt_best;
+  MACROBLOCK *x = &cpi->td.mb;
+
+  // Start the search at the previous frame filter level unless it is now out of
+  // range.
+  int filt_mid = clamp(lf->filter_level, min_filter_level, max_filter_level);
+  int filter_step = filt_mid < 16 ? 4 : filt_mid / 4;
+  // Sum squared error at each filter level
+  int64_t ss_err[MAX_LOOP_FILTER + 1];
+
+  // Set each entry to -1
+  memset(ss_err, 0xFF, sizeof(ss_err));
+
+  //  Make a copy of the unfiltered / processed recon buffer
+  vpx_yv12_copy_y(cm->frame_to_show, &cpi->last_frame_uf);
+
+  best_err = try_filter_frame(sd, cpi, filt_mid, partial_frame);
+  filt_best = filt_mid;
+  ss_err[filt_mid] = best_err;
+
+  while (filter_step > 0) {
+    const int filt_high = VPXMIN(filt_mid + filter_step, max_filter_level);
+    const int filt_low = VPXMAX(filt_mid - filter_step, min_filter_level);
+
+    // Bias against raising loop filter in favor of lowering it.
+    int64_t bias = (best_err >> (15 - (filt_mid / 8))) * filter_step;
+
+    if ((cpi->oxcf.pass == 2) && (cpi->twopass.section_intra_rating < 20))
+      bias = (bias * cpi->twopass.section_intra_rating) / 20;
+
+    // yx, bias less for large block size
+    if (cm->tx_mode != ONLY_4X4) bias >>= 1;
+
+    if (filt_direction <= 0 && filt_low != filt_mid) {
+      // Get Low filter error score
+      if (ss_err[filt_low] < 0) {
+        ss_err[filt_low] = try_filter_frame(sd, cpi, filt_low, partial_frame);
+      }
+      // If value is close to the best so far then bias towards a lower loop
+      // filter value.
+      if (ss_err[filt_low] < (best_err + bias)) {
+        // Was it actually better than the previous best?
+        if (ss_err[filt_low] < best_err) {
+          best_err = ss_err[filt_low];
+        }
+        filt_best = filt_low;
+      }
+    }
+
+    // Now look at filt_high
+    if (filt_direction >= 0 && filt_high != filt_mid) {
+      if (ss_err[filt_high] < 0) {
+        ss_err[filt_high] = try_filter_frame(sd, cpi, filt_high, partial_frame);
+      }
+      // If value is significantly better than previous best, bias added against
+      // raising filter value
+      if (ss_err[filt_high] < (best_err - bias)) {
+        best_err = ss_err[filt_high];
+        filt_best = filt_high;
+      }
+    }
+
+    // Half the step distance if the best filter value was the same as last time
+    if (filt_best == filt_mid) {
+      filter_step /= 2;
+      filt_direction = 0;
+    } else {
+      filt_direction = (filt_best < filt_mid) ? -1 : 1;
+      filt_mid = filt_best;
+    }
+  }
+
+  // Update best error
+  best_err = ss_err[filt_best];
+
+  if (best_cost_ret)
+    *best_cost_ret = RDCOST_DBL(x->rdmult, x->rddiv, 0, best_err);
+  return filt_best;
+}
+
+#if !CONFIG_LOOP_RESTORATION
+void vp10_pick_filter_level(const YV12_BUFFER_CONFIG *sd, VP10_COMP *cpi,
+                            LPF_PICK_METHOD method) {
+  VP10_COMMON *const cm = &cpi->common;
+  struct loopfilter *const lf = &cm->lf;
+
+  lf->sharpness_level = cm->frame_type == KEY_FRAME ? 0 : cpi->oxcf.sharpness;
+
+  if (method == LPF_PICK_MINIMAL_LPF && lf->filter_level) {
+    lf->filter_level = 0;
+  } else if (method >= LPF_PICK_FROM_Q) {
+    const int min_filter_level = 0;
+    const int max_filter_level = vp10_get_max_filter_level(cpi);
+    const int q = vp10_ac_quant(cm->base_qindex, 0, cm->bit_depth);
+// These values were determined by linear fitting the result of the
+// searched level, filt_guess = q * 0.316206 + 3.87252
+#if CONFIG_VP9_HIGHBITDEPTH
+    int filt_guess;
+    switch (cm->bit_depth) {
+      case VPX_BITS_8:
+        filt_guess = ROUND_POWER_OF_TWO(q * 20723 + 1015158, 18);
+        break;
+      case VPX_BITS_10:
+        filt_guess = ROUND_POWER_OF_TWO(q * 20723 + 4060632, 20);
+        break;
+      case VPX_BITS_12:
+        filt_guess = ROUND_POWER_OF_TWO(q * 20723 + 16242526, 22);
+        break;
+      default:
+        assert(0 &&
+               "bit_depth should be VPX_BITS_8, VPX_BITS_10 "
+               "or VPX_BITS_12");
+        return;
+    }
+#else
+    int filt_guess = ROUND_POWER_OF_TWO(q * 20723 + 1015158, 18);
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+    if (cm->frame_type == KEY_FRAME) filt_guess -= 4;
+    lf->filter_level = clamp(filt_guess, min_filter_level, max_filter_level);
+  } else {
+    lf->filter_level = vp10_search_filter_level(
+        sd, cpi, method == LPF_PICK_FROM_SUBIMAGE, NULL);
+  }
+
+#if CONFIG_EXT_TILE
+  // TODO(any): 0 loopfilter level is only necessary if individual tile
+  // decoding is required. We need to communicate this requirement to this
+  // code and force loop filter level 0 only if required.
+  lf->filter_level = 0;
+#endif  // CONFIG_EXT_TILE
+}
+#endif  // !CONFIG_LOOP_RESTORATION
diff --git a/av1/encoder/picklpf.h b/av1/encoder/picklpf.h
new file mode 100644
index 0000000..cd8afc6
--- /dev/null
+++ b/av1/encoder/picklpf.h
@@ -0,0 +1,31 @@
+/*
+ *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP10_ENCODER_PICKLPF_H_
+#define VP10_ENCODER_PICKLPF_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "av1/encoder/encoder.h"
+
+struct yv12_buffer_config;
+struct VP10_COMP;
+int vp10_get_max_filter_level(const VP10_COMP *cpi);
+int vp10_search_filter_level(const YV12_BUFFER_CONFIG *sd, VP10_COMP *cpi,
+                             int partial_frame, double *err);
+void vp10_pick_filter_level(const struct yv12_buffer_config *sd,
+                            struct VP10_COMP *cpi, LPF_PICK_METHOD method);
+#ifdef __cplusplus
+}  // extern "C"
+#endif
+
+#endif  // VP10_ENCODER_PICKLPF_H_
diff --git a/av1/encoder/pickrst.c b/av1/encoder/pickrst.c
new file mode 100644
index 0000000..b6ee6f0
--- /dev/null
+++ b/av1/encoder/pickrst.c
@@ -0,0 +1,808 @@
+/*
+ *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include <float.h>
+#include <limits.h>
+#include <math.h>
+
+#include "./vpx_scale_rtcd.h"
+
+#include "aom_dsp/psnr.h"
+#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_mem/vpx_mem.h"
+#include "aom_ports/mem.h"
+
+#include "av1/common/onyxc_int.h"
+#include "av1/common/quant_common.h"
+
+#include "av1/encoder/encoder.h"
+#include "av1/encoder/picklpf.h"
+#include "av1/encoder/pickrst.h"
+#include "av1/encoder/quantize.h"
+
+static int64_t try_restoration_frame(const YV12_BUFFER_CONFIG *sd,
+                                     VP10_COMP *const cpi, RestorationInfo *rsi,
+                                     int partial_frame) {
+  VP10_COMMON *const cm = &cpi->common;
+  int64_t filt_err;
+  vp10_loop_restoration_frame(cm->frame_to_show, cm, rsi, 1, partial_frame);
+#if CONFIG_VP9_HIGHBITDEPTH
+  if (cm->use_highbitdepth) {
+    filt_err = vpx_highbd_get_y_sse(sd, cm->frame_to_show);
+  } else {
+    filt_err = vpx_get_y_sse(sd, cm->frame_to_show);
+  }
+#else
+  filt_err = vpx_get_y_sse(sd, cm->frame_to_show);
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+  // Re-instate the unfiltered frame
+  vpx_yv12_copy_y(&cpi->last_frame_db, cm->frame_to_show);
+  return filt_err;
+}
+
+static int search_bilateral_level(const YV12_BUFFER_CONFIG *sd, VP10_COMP *cpi,
+                                  int filter_level, int partial_frame,
+                                  int *bilateral_level, double *best_cost_ret) {
+  VP10_COMMON *const cm = &cpi->common;
+  int i, j, tile_idx;
+  int64_t err;
+  int bits;
+  double cost, best_cost, cost_norestore, cost_bilateral;
+  const int bilateral_level_bits = vp10_bilateral_level_bits(&cpi->common);
+  const int bilateral_levels = 1 << bilateral_level_bits;
+  MACROBLOCK *x = &cpi->td.mb;
+  RestorationInfo rsi;
+  const int ntiles =
+      vp10_get_restoration_ntiles(BILATERAL_TILESIZE, cm->width, cm->height);
+
+  //  Make a copy of the unfiltered / processed recon buffer
+  vpx_yv12_copy_y(cm->frame_to_show, &cpi->last_frame_uf);
+  vp10_loop_filter_frame(cm->frame_to_show, cm, &cpi->td.mb.e_mbd, filter_level,
+                         1, partial_frame);
+  vpx_yv12_copy_y(cm->frame_to_show, &cpi->last_frame_db);
+
+  // RD cost associated with no restoration
+  rsi.restoration_type = RESTORE_NONE;
+  err = try_restoration_frame(sd, cpi, &rsi, partial_frame);
+  bits = 0;
+  cost_norestore = RDCOST_DBL(x->rdmult, x->rddiv,
+                              (bits << (VP10_PROB_COST_SHIFT - 4)), err);
+  best_cost = cost_norestore;
+
+  // RD cost associated with bilateral filtering
+  rsi.restoration_type = RESTORE_BILATERAL;
+  rsi.bilateral_level =
+      (int *)vpx_malloc(sizeof(*rsi.bilateral_level) * ntiles);
+  assert(rsi.bilateral_level != NULL);
+
+  for (j = 0; j < ntiles; ++j) bilateral_level[j] = -1;
+
+  // Find best filter for each tile
+  for (tile_idx = 0; tile_idx < ntiles; ++tile_idx) {
+    for (j = 0; j < ntiles; ++j) rsi.bilateral_level[j] = -1;
+    best_cost = cost_norestore;
+    for (i = 0; i < bilateral_levels; ++i) {
+      rsi.bilateral_level[tile_idx] = i;
+      err = try_restoration_frame(sd, cpi, &rsi, partial_frame);
+      bits = bilateral_level_bits + 1;
+      // Normally the rate is rate in bits * 256 and dist is sum sq err * 64
+      // when RDCOST is used.  However below we just scale both in the correct
+      // ratios appropriately but not exactly by these values.
+      cost = RDCOST_DBL(x->rdmult, x->rddiv,
+                        (bits << (VP10_PROB_COST_SHIFT - 4)), err);
+      if (cost < best_cost) {
+        bilateral_level[tile_idx] = i;
+        best_cost = cost;
+      }
+    }
+  }
+  // Find cost for combined configuration
+  bits = 0;
+  for (j = 0; j < ntiles; ++j) {
+    rsi.bilateral_level[j] = bilateral_level[j];
+    if (rsi.bilateral_level[j] >= 0) {
+      bits += (bilateral_level_bits + 1);
+    } else {
+      bits += 1;
+    }
+  }
+  err = try_restoration_frame(sd, cpi, &rsi, partial_frame);
+  cost_bilateral = RDCOST_DBL(x->rdmult, x->rddiv,
+                              (bits << (VP10_PROB_COST_SHIFT - 4)), err);
+
+  vpx_free(rsi.bilateral_level);
+
+  vpx_yv12_copy_y(&cpi->last_frame_uf, cm->frame_to_show);
+  if (cost_bilateral < cost_norestore) {
+    if (best_cost_ret) *best_cost_ret = cost_bilateral;
+    return 1;
+  } else {
+    if (best_cost_ret) *best_cost_ret = cost_norestore;
+    return 0;
+  }
+}
+
+static int search_filter_bilateral_level(const YV12_BUFFER_CONFIG *sd,
+                                         VP10_COMP *cpi, int partial_frame,
+                                         int *filter_best, int *bilateral_level,
+                                         double *best_cost_ret) {
+  const VP10_COMMON *const cm = &cpi->common;
+  const struct loopfilter *const lf = &cm->lf;
+  const int min_filter_level = 0;
+  const int max_filter_level = vp10_get_max_filter_level(cpi);
+  int filt_direction = 0;
+  int filt_best;
+  double best_err;
+  int i, j;
+  int *tmp_level;
+  int bilateral_success[MAX_LOOP_FILTER + 1];
+
+  const int ntiles =
+      vp10_get_restoration_ntiles(BILATERAL_TILESIZE, cm->width, cm->height);
+
+  // Start the search at the previous frame filter level unless it is now out of
+  // range.
+  int filt_mid = clamp(lf->filter_level, min_filter_level, max_filter_level);
+  int filter_step = filt_mid < 16 ? 4 : filt_mid / 4;
+  double ss_err[MAX_LOOP_FILTER + 1];
+  // Set each entry to -1
+  for (i = 0; i <= MAX_LOOP_FILTER; ++i) ss_err[i] = -1.0;
+
+  tmp_level = (int *)vpx_malloc(sizeof(*tmp_level) * ntiles);
+
+  bilateral_success[filt_mid] = search_bilateral_level(
+      sd, cpi, filt_mid, partial_frame, tmp_level, &best_err);
+  filt_best = filt_mid;
+  ss_err[filt_mid] = best_err;
+  for (j = 0; j < ntiles; ++j) {
+    bilateral_level[j] = tmp_level[j];
+  }
+
+  while (filter_step > 0) {
+    const int filt_high = VPXMIN(filt_mid + filter_step, max_filter_level);
+    const int filt_low = VPXMAX(filt_mid - filter_step, min_filter_level);
+
+    // Bias against raising loop filter in favor of lowering it.
+    double bias = (best_err / (1 << (15 - (filt_mid / 8)))) * filter_step;
+
+    if ((cpi->oxcf.pass == 2) && (cpi->twopass.section_intra_rating < 20))
+      bias = (bias * cpi->twopass.section_intra_rating) / 20;
+
+    // yx, bias less for large block size
+    if (cm->tx_mode != ONLY_4X4) bias /= 2;
+
+    if (filt_direction <= 0 && filt_low != filt_mid) {
+      // Get Low filter error score
+      if (ss_err[filt_low] < 0) {
+        bilateral_success[filt_low] = search_bilateral_level(
+            sd, cpi, filt_low, partial_frame, tmp_level, &ss_err[filt_low]);
+      }
+      // If value is close to the best so far then bias towards a lower loop
+      // filter value.
+      if (ss_err[filt_low] < (best_err + bias)) {
+        // Was it actually better than the previous best?
+        if (ss_err[filt_low] < best_err) {
+          best_err = ss_err[filt_low];
+        }
+        filt_best = filt_low;
+        for (j = 0; j < ntiles; ++j) {
+          bilateral_level[j] = tmp_level[j];
+        }
+      }
+    }
+
+    // Now look at filt_high
+    if (filt_direction >= 0 && filt_high != filt_mid) {
+      if (ss_err[filt_high] < 0) {
+        bilateral_success[filt_high] = search_bilateral_level(
+            sd, cpi, filt_high, partial_frame, tmp_level, &ss_err[filt_high]);
+      }
+      // If value is significantly better than previous best, bias added against
+      // raising filter value
+      if (ss_err[filt_high] < (best_err - bias)) {
+        best_err = ss_err[filt_high];
+        filt_best = filt_high;
+        for (j = 0; j < ntiles; ++j) {
+          bilateral_level[j] = tmp_level[j];
+        }
+      }
+    }
+
+    // Half the step distance if the best filter value was the same as last time
+    if (filt_best == filt_mid) {
+      filter_step /= 2;
+      filt_direction = 0;
+    } else {
+      filt_direction = (filt_best < filt_mid) ? -1 : 1;
+      filt_mid = filt_best;
+    }
+  }
+
+  vpx_free(tmp_level);
+
+  // Update best error
+  best_err = ss_err[filt_best];
+
+  if (best_cost_ret) *best_cost_ret = best_err;
+  if (filter_best) *filter_best = filt_best;
+
+  return bilateral_success[filt_best];
+}
+
+static double find_average(uint8_t *src, int h_start, int h_end, int v_start,
+                           int v_end, int stride) {
+  uint64_t sum = 0;
+  double avg = 0;
+  int i, j;
+  for (i = v_start; i < v_end; i++)
+    for (j = h_start; j < h_end; j++) sum += src[i * stride + j];
+  avg = (double)sum / ((v_end - v_start) * (h_end - h_start));
+  return avg;
+}
+
+static void compute_stats(uint8_t *dgd, uint8_t *src, int h_start, int h_end,
+                          int v_start, int v_end, int dgd_stride,
+                          int src_stride, double *M, double *H) {
+  int i, j, k, l;
+  double Y[RESTORATION_WIN2];
+  const double avg =
+      find_average(dgd, h_start, h_end, v_start, v_end, dgd_stride);
+
+  memset(M, 0, sizeof(*M) * RESTORATION_WIN2);
+  memset(H, 0, sizeof(*H) * RESTORATION_WIN2 * RESTORATION_WIN2);
+  for (i = v_start; i < v_end; i++) {
+    for (j = h_start; j < h_end; j++) {
+      const double X = (double)src[i * src_stride + j] - avg;
+      int idx = 0;
+      for (k = -RESTORATION_HALFWIN; k <= RESTORATION_HALFWIN; k++) {
+        for (l = -RESTORATION_HALFWIN; l <= RESTORATION_HALFWIN; l++) {
+          Y[idx] = (double)dgd[(i + l) * dgd_stride + (j + k)] - avg;
+          idx++;
+        }
+      }
+      for (k = 0; k < RESTORATION_WIN2; ++k) {
+        M[k] += Y[k] * X;
+        H[k * RESTORATION_WIN2 + k] += Y[k] * Y[k];
+        for (l = k + 1; l < RESTORATION_WIN2; ++l) {
+          double value = Y[k] * Y[l];
+          H[k * RESTORATION_WIN2 + l] += value;
+          H[l * RESTORATION_WIN2 + k] += value;
+        }
+      }
+    }
+  }
+}
+
+#if CONFIG_VP9_HIGHBITDEPTH
+static double find_average_highbd(uint16_t *src, int h_start, int h_end,
+                                  int v_start, int v_end, int stride) {
+  uint64_t sum = 0;
+  double avg = 0;
+  int i, j;
+  for (i = v_start; i < v_end; i++)
+    for (j = h_start; j < h_end; j++) sum += src[i * stride + j];
+  avg = (double)sum / ((v_end - v_start) * (h_end - h_start));
+  return avg;
+}
+
+static void compute_stats_highbd(uint8_t *dgd8, uint8_t *src8, int h_start,
+                                 int h_end, int v_start, int v_end,
+                                 int dgd_stride, int src_stride, double *M,
+                                 double *H) {
+  int i, j, k, l;
+  double Y[RESTORATION_WIN2];
+  uint16_t *src = CONVERT_TO_SHORTPTR(src8);
+  uint16_t *dgd = CONVERT_TO_SHORTPTR(dgd8);
+  const double avg =
+      find_average_highbd(dgd, h_start, h_end, v_start, v_end, dgd_stride);
+
+  memset(M, 0, sizeof(*M) * RESTORATION_WIN2);
+  memset(H, 0, sizeof(*H) * RESTORATION_WIN2 * RESTORATION_WIN2);
+  for (i = v_start; i < v_end; i++) {
+    for (j = h_start; j < h_end; j++) {
+      const double X = (double)src[i * src_stride + j] - avg;
+      int idx = 0;
+      for (k = -RESTORATION_HALFWIN; k <= RESTORATION_HALFWIN; k++) {
+        for (l = -RESTORATION_HALFWIN; l <= RESTORATION_HALFWIN; l++) {
+          Y[idx] = (double)dgd[(i + l) * dgd_stride + (j + k)] - avg;
+          idx++;
+        }
+      }
+      for (k = 0; k < RESTORATION_WIN2; ++k) {
+        M[k] += Y[k] * X;
+        H[k * RESTORATION_WIN2 + k] += Y[k] * Y[k];
+        for (l = k + 1; l < RESTORATION_WIN2; ++l) {
+          double value = Y[k] * Y[l];
+          H[k * RESTORATION_WIN2 + l] += value;
+          H[l * RESTORATION_WIN2 + k] += value;
+        }
+      }
+    }
+  }
+}
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+// Solves Ax = b, where x and b are column vectors
+static int linsolve(int n, double *A, int stride, double *b, double *x) {
+  int i, j, k;
+  double c;
+  // Partial pivoting
+  for (i = n - 1; i > 0; i--) {
+    if (A[(i - 1) * stride] < A[i * stride]) {
+      for (j = 0; j < n; j++) {
+        c = A[i * stride + j];
+        A[i * stride + j] = A[(i - 1) * stride + j];
+        A[(i - 1) * stride + j] = c;
+      }
+      c = b[i];
+      b[i] = b[i - 1];
+      b[i - 1] = c;
+    }
+  }
+  // Forward elimination
+  for (k = 0; k < n - 1; k++) {
+    for (i = k; i < n - 1; i++) {
+      c = A[(i + 1) * stride + k] / A[k * stride + k];
+      for (j = 0; j < n; j++) A[(i + 1) * stride + j] -= c * A[k * stride + j];
+      b[i + 1] -= c * b[k];
+    }
+  }
+  // Backward substitution
+  for (i = n - 1; i >= 0; i--) {
+    if (fabs(A[i * stride + i]) < 1e-10) return 0;
+    c = 0;
+    for (j = i + 1; j <= n - 1; j++) c += A[i * stride + j] * x[j];
+    x[i] = (b[i] - c) / A[i * stride + i];
+  }
+  return 1;
+}
+
+static INLINE int wrap_index(int i) {
+  return (i >= RESTORATION_HALFWIN1 ? RESTORATION_WIN - 1 - i : i);
+}
+
+// Fix vector b, update vector a
+static void update_a_sep_sym(double **Mc, double **Hc, double *a, double *b) {
+  int i, j;
+  double S[RESTORATION_WIN];
+  double A[RESTORATION_WIN], B[RESTORATION_WIN2];
+  int w, w2;
+  memset(A, 0, sizeof(A));
+  memset(B, 0, sizeof(B));
+  for (i = 0; i < RESTORATION_WIN; i++) {
+    int j;
+    for (j = 0; j < RESTORATION_WIN; ++j) {
+      const int jj = wrap_index(j);
+      A[jj] += Mc[i][j] * b[i];
+    }
+  }
+  for (i = 0; i < RESTORATION_WIN; i++) {
+    for (j = 0; j < RESTORATION_WIN; j++) {
+      int k, l;
+      for (k = 0; k < RESTORATION_WIN; ++k)
+        for (l = 0; l < RESTORATION_WIN; ++l) {
+          const int kk = wrap_index(k);
+          const int ll = wrap_index(l);
+          B[ll * RESTORATION_HALFWIN1 + kk] +=
+              Hc[j * RESTORATION_WIN + i][k * RESTORATION_WIN2 + l] * b[i] *
+              b[j];
+        }
+    }
+  }
+  // Normalization enforcement in the system of equations itself
+  w = RESTORATION_WIN;
+  w2 = (w >> 1) + 1;
+  for (i = 0; i < w2 - 1; ++i)
+    A[i] -=
+        A[w2 - 1] * 2 + B[i * w2 + w2 - 1] - 2 * B[(w2 - 1) * w2 + (w2 - 1)];
+  for (i = 0; i < w2 - 1; ++i)
+    for (j = 0; j < w2 - 1; ++j)
+      B[i * w2 + j] -= 2 * (B[i * w2 + (w2 - 1)] + B[(w2 - 1) * w2 + j] -
+                            2 * B[(w2 - 1) * w2 + (w2 - 1)]);
+  if (linsolve(w2 - 1, B, w2, A, S)) {
+    S[w2 - 1] = 1.0;
+    for (i = w2; i < w; ++i) {
+      S[i] = S[w - 1 - i];
+      S[w2 - 1] -= 2 * S[i];
+    }
+    memcpy(a, S, w * sizeof(*a));
+  }
+}
+
+// Fix vector a, update vector b
+static void update_b_sep_sym(double **Mc, double **Hc, double *a, double *b) {
+  int i, j;
+  double S[RESTORATION_WIN];
+  double A[RESTORATION_WIN], B[RESTORATION_WIN2];
+  int w, w2;
+  memset(A, 0, sizeof(A));
+  memset(B, 0, sizeof(B));
+  for (i = 0; i < RESTORATION_WIN; i++) {
+    int j;
+    const int ii = wrap_index(i);
+    for (j = 0; j < RESTORATION_WIN; j++) A[ii] += Mc[i][j] * a[j];
+  }
+
+  for (i = 0; i < RESTORATION_WIN; i++) {
+    for (j = 0; j < RESTORATION_WIN; j++) {
+      const int ii = wrap_index(i);
+      const int jj = wrap_index(j);
+      int k, l;
+      for (k = 0; k < RESTORATION_WIN; ++k)
+        for (l = 0; l < RESTORATION_WIN; ++l)
+          B[jj * RESTORATION_HALFWIN1 + ii] +=
+              Hc[i * RESTORATION_WIN + j][k * RESTORATION_WIN2 + l] * a[k] *
+              a[l];
+    }
+  }
+  // Normalization enforcement in the system of equations itself
+  w = RESTORATION_WIN;
+  w2 = RESTORATION_HALFWIN1;
+  for (i = 0; i < w2 - 1; ++i)
+    A[i] -=
+        A[w2 - 1] * 2 + B[i * w2 + w2 - 1] - 2 * B[(w2 - 1) * w2 + (w2 - 1)];
+  for (i = 0; i < w2 - 1; ++i)
+    for (j = 0; j < w2 - 1; ++j)
+      B[i * w2 + j] -= 2 * (B[i * w2 + (w2 - 1)] + B[(w2 - 1) * w2 + j] -
+                            2 * B[(w2 - 1) * w2 + (w2 - 1)]);
+  if (linsolve(w2 - 1, B, w2, A, S)) {
+    S[w2 - 1] = 1.0;
+    for (i = w2; i < w; ++i) {
+      S[i] = S[w - 1 - i];
+      S[w2 - 1] -= 2 * S[i];
+    }
+    memcpy(b, S, w * sizeof(*b));
+  }
+}
+
+static int wiener_decompose_sep_sym(double *M, double *H, double *a,
+                                    double *b) {
+  static const double init_filt[RESTORATION_WIN] = {
+    0.035623, -0.127154, 0.211436, 0.760190, 0.211436, -0.127154, 0.035623,
+  };
+  int i, j, iter;
+  double *Hc[RESTORATION_WIN2];
+  double *Mc[RESTORATION_WIN];
+  for (i = 0; i < RESTORATION_WIN; i++) {
+    Mc[i] = M + i * RESTORATION_WIN;
+    for (j = 0; j < RESTORATION_WIN; j++) {
+      Hc[i * RESTORATION_WIN + j] =
+          H + i * RESTORATION_WIN * RESTORATION_WIN2 + j * RESTORATION_WIN;
+    }
+  }
+  memcpy(a, init_filt, sizeof(*a) * RESTORATION_WIN);
+  memcpy(b, init_filt, sizeof(*b) * RESTORATION_WIN);
+
+  iter = 1;
+  while (iter < 10) {
+    update_a_sep_sym(Mc, Hc, a, b);
+    update_b_sep_sym(Mc, Hc, a, b);
+    iter++;
+  }
+  return 1;
+}
+
+// Computes the function x'*A*x - x'*b for the learned filters, and compares
+// against identity filters; Final score is defined as the difference between
+// the function values
+static double compute_score(double *M, double *H, int *vfilt, int *hfilt) {
+  double ab[RESTORATION_WIN * RESTORATION_WIN];
+  int i, k, l;
+  double P = 0, Q = 0;
+  double iP = 0, iQ = 0;
+  double Score, iScore;
+  int w;
+  double a[RESTORATION_WIN], b[RESTORATION_WIN];
+  w = RESTORATION_WIN;
+  a[RESTORATION_HALFWIN] = b[RESTORATION_HALFWIN] = 1.0;
+  for (i = 0; i < RESTORATION_HALFWIN; ++i) {
+    a[i] = a[RESTORATION_WIN - i - 1] =
+        (double)vfilt[i] / RESTORATION_FILT_STEP;
+    b[i] = b[RESTORATION_WIN - i - 1] =
+        (double)hfilt[i] / RESTORATION_FILT_STEP;
+    a[RESTORATION_HALFWIN] -= 2 * a[i];
+    b[RESTORATION_HALFWIN] -= 2 * b[i];
+  }
+  for (k = 0; k < w; ++k) {
+    for (l = 0; l < w; ++l) {
+      ab[k * w + l] = a[l] * b[k];
+    }
+  }
+  for (k = 0; k < w * w; ++k) {
+    P += ab[k] * M[k];
+    for (l = 0; l < w * w; ++l) Q += ab[k] * H[k * w * w + l] * ab[l];
+  }
+  Score = Q - 2 * P;
+
+  iP = M[(w * w) >> 1];
+  iQ = H[((w * w) >> 1) * w * w + ((w * w) >> 1)];
+  iScore = iQ - 2 * iP;
+
+  return Score - iScore;
+}
+
+#define CLIP(x, lo, hi) ((x) < (lo) ? (lo) : (x) > (hi) ? (hi) : (x))
+#define RINT(x) ((x) < 0 ? (int)((x)-0.5) : (int)((x) + 0.5))
+
+static void quantize_sym_filter(double *f, int *fi) {
+  int i;
+  for (i = 0; i < RESTORATION_HALFWIN; ++i) {
+    fi[i] = RINT(f[i] * RESTORATION_FILT_STEP);
+  }
+  // Specialize for 7-tap filter
+  fi[0] = CLIP(fi[0], WIENER_FILT_TAP0_MINV, WIENER_FILT_TAP0_MAXV);
+  fi[1] = CLIP(fi[1], WIENER_FILT_TAP1_MINV, WIENER_FILT_TAP1_MAXV);
+  fi[2] = CLIP(fi[2], WIENER_FILT_TAP2_MINV, WIENER_FILT_TAP2_MAXV);
+}
+
+static int search_wiener_filter(const YV12_BUFFER_CONFIG *src, VP10_COMP *cpi,
+                                int filter_level, int partial_frame,
+                                int (*vfilter)[RESTORATION_HALFWIN],
+                                int (*hfilter)[RESTORATION_HALFWIN],
+                                int *process_tile, double *best_cost_ret) {
+  VP10_COMMON *const cm = &cpi->common;
+  RestorationInfo rsi;
+  int64_t err;
+  int bits;
+  double cost_wiener, cost_norestore;
+  MACROBLOCK *x = &cpi->td.mb;
+  double M[RESTORATION_WIN2];
+  double H[RESTORATION_WIN2 * RESTORATION_WIN2];
+  double vfilterd[RESTORATION_WIN], hfilterd[RESTORATION_WIN];
+  const YV12_BUFFER_CONFIG *dgd = cm->frame_to_show;
+  const int width = cm->width;
+  const int height = cm->height;
+  const int src_stride = src->y_stride;
+  const int dgd_stride = dgd->y_stride;
+  double score;
+  int tile_idx, htile_idx, vtile_idx, tile_width, tile_height, nhtiles, nvtiles;
+  int h_start, h_end, v_start, v_end;
+  int i, j;
+
+  const int tilesize = WIENER_TILESIZE;
+  const int ntiles = vp10_get_restoration_ntiles(tilesize, width, height);
+
+  assert(width == dgd->y_crop_width);
+  assert(height == dgd->y_crop_height);
+  assert(width == src->y_crop_width);
+  assert(height == src->y_crop_height);
+
+  vp10_get_restoration_tile_size(tilesize, width, height, &tile_width,
+                                 &tile_height, &nhtiles, &nvtiles);
+
+  //  Make a copy of the unfiltered / processed recon buffer
+  vpx_yv12_copy_y(cm->frame_to_show, &cpi->last_frame_uf);
+  vp10_loop_filter_frame(cm->frame_to_show, cm, &cpi->td.mb.e_mbd, filter_level,
+                         1, partial_frame);
+  vpx_yv12_copy_y(cm->frame_to_show, &cpi->last_frame_db);
+
+  rsi.restoration_type = RESTORE_NONE;
+  err = try_restoration_frame(src, cpi, &rsi, partial_frame);
+  bits = 0;
+  cost_norestore = RDCOST_DBL(x->rdmult, x->rddiv,
+                              (bits << (VP10_PROB_COST_SHIFT - 4)), err);
+
+  rsi.restoration_type = RESTORE_WIENER;
+  rsi.vfilter =
+      (int(*)[RESTORATION_HALFWIN])vpx_malloc(sizeof(*rsi.vfilter) * ntiles);
+  assert(rsi.vfilter != NULL);
+  rsi.hfilter =
+      (int(*)[RESTORATION_HALFWIN])vpx_malloc(sizeof(*rsi.hfilter) * ntiles);
+  assert(rsi.hfilter != NULL);
+  rsi.wiener_level = (int *)vpx_malloc(sizeof(*rsi.wiener_level) * ntiles);
+  assert(rsi.wiener_level != NULL);
+
+  // Compute best Wiener filters for each tile
+  for (tile_idx = 0; tile_idx < ntiles; ++tile_idx) {
+    htile_idx = tile_idx % nhtiles;
+    vtile_idx = tile_idx / nhtiles;
+    h_start =
+        htile_idx * tile_width + ((htile_idx > 0) ? 0 : RESTORATION_HALFWIN);
+    h_end = (htile_idx < nhtiles - 1) ? ((htile_idx + 1) * tile_width)
+                                      : (width - RESTORATION_HALFWIN);
+    v_start =
+        vtile_idx * tile_height + ((vtile_idx > 0) ? 0 : RESTORATION_HALFWIN);
+    v_end = (vtile_idx < nvtiles - 1) ? ((vtile_idx + 1) * tile_height)
+                                      : (height - RESTORATION_HALFWIN);
+
+#if CONFIG_VP9_HIGHBITDEPTH
+    if (cm->use_highbitdepth)
+      compute_stats_highbd(dgd->y_buffer, src->y_buffer, h_start, h_end,
+                           v_start, v_end, dgd_stride, src_stride, M, H);
+    else
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+      compute_stats(dgd->y_buffer, src->y_buffer, h_start, h_end, v_start,
+                    v_end, dgd_stride, src_stride, M, H);
+
+    if (!wiener_decompose_sep_sym(M, H, vfilterd, hfilterd)) {
+      for (i = 0; i < RESTORATION_HALFWIN; ++i)
+        rsi.vfilter[tile_idx][i] = rsi.hfilter[tile_idx][i] = 0;
+      process_tile[tile_idx] = 0;
+      continue;
+    }
+    quantize_sym_filter(vfilterd, rsi.vfilter[tile_idx]);
+    quantize_sym_filter(hfilterd, rsi.hfilter[tile_idx]);
+    process_tile[tile_idx] = 1;
+
+    // Filter score computes the value of the function x'*A*x - x'*b for the
+    // learned filter and compares it against identity filer. If there is no
+    // reduction in the function, the filter is reverted back to identity
+    score = compute_score(M, H, rsi.vfilter[tile_idx], rsi.hfilter[tile_idx]);
+    if (score > 0.0) {
+      for (i = 0; i < RESTORATION_HALFWIN; ++i)
+        rsi.vfilter[tile_idx][i] = rsi.hfilter[tile_idx][i] = 0;
+      process_tile[tile_idx] = 0;
+      continue;
+    }
+
+    for (j = 0; j < ntiles; ++j) rsi.wiener_level[j] = 0;
+    rsi.wiener_level[tile_idx] = 1;
+
+    err = try_restoration_frame(src, cpi, &rsi, partial_frame);
+    bits = 1 + WIENER_FILT_BITS;
+    cost_wiener = RDCOST_DBL(x->rdmult, x->rddiv,
+                             (bits << (VP10_PROB_COST_SHIFT - 4)), err);
+    if (cost_wiener >= cost_norestore) process_tile[tile_idx] = 0;
+  }
+  // Cost for Wiener filtering
+  bits = 0;
+  for (tile_idx = 0; tile_idx < ntiles; ++tile_idx) {
+    bits += (process_tile[tile_idx] ? (WIENER_FILT_BITS + 1) : 1);
+    rsi.wiener_level[tile_idx] = process_tile[tile_idx];
+  }
+  err = try_restoration_frame(src, cpi, &rsi, partial_frame);
+  cost_wiener = RDCOST_DBL(x->rdmult, x->rddiv,
+                           (bits << (VP10_PROB_COST_SHIFT - 4)), err);
+
+  for (tile_idx = 0; tile_idx < ntiles; ++tile_idx) {
+    if (process_tile[tile_idx] == 0) continue;
+    for (i = 0; i < RESTORATION_HALFWIN; ++i) {
+      vfilter[tile_idx][i] = rsi.vfilter[tile_idx][i];
+      hfilter[tile_idx][i] = rsi.hfilter[tile_idx][i];
+    }
+  }
+
+  vpx_free(rsi.vfilter);
+  vpx_free(rsi.hfilter);
+  vpx_free(rsi.wiener_level);
+
+  vpx_yv12_copy_y(&cpi->last_frame_uf, cm->frame_to_show);
+  if (cost_wiener < cost_norestore) {
+    if (best_cost_ret) *best_cost_ret = cost_wiener;
+    return 1;
+  } else {
+    if (best_cost_ret) *best_cost_ret = cost_norestore;
+    return 0;
+  }
+}
+
+void vp10_pick_filter_restoration(const YV12_BUFFER_CONFIG *sd, VP10_COMP *cpi,
+                                  LPF_PICK_METHOD method) {
+  VP10_COMMON *const cm = &cpi->common;
+  struct loopfilter *const lf = &cm->lf;
+  int wiener_success = 0;
+  int bilateral_success = 0;
+  double cost_bilateral = DBL_MAX;
+  double cost_wiener = DBL_MAX;
+  double cost_norestore = DBL_MAX;
+  int ntiles;
+
+  ntiles =
+      vp10_get_restoration_ntiles(BILATERAL_TILESIZE, cm->width, cm->height);
+  cm->rst_info.bilateral_level =
+      (int *)vpx_realloc(cm->rst_info.bilateral_level,
+                         sizeof(*cm->rst_info.bilateral_level) * ntiles);
+  assert(cm->rst_info.bilateral_level != NULL);
+
+  ntiles = vp10_get_restoration_ntiles(WIENER_TILESIZE, cm->width, cm->height);
+  cm->rst_info.wiener_level = (int *)vpx_realloc(
+      cm->rst_info.wiener_level, sizeof(*cm->rst_info.wiener_level) * ntiles);
+  assert(cm->rst_info.wiener_level != NULL);
+  cm->rst_info.vfilter = (int(*)[RESTORATION_HALFWIN])vpx_realloc(
+      cm->rst_info.vfilter, sizeof(*cm->rst_info.vfilter) * ntiles);
+  assert(cm->rst_info.vfilter != NULL);
+  cm->rst_info.hfilter = (int(*)[RESTORATION_HALFWIN])vpx_realloc(
+      cm->rst_info.hfilter, sizeof(*cm->rst_info.hfilter) * ntiles);
+  assert(cm->rst_info.hfilter != NULL);
+
+  lf->sharpness_level = cm->frame_type == KEY_FRAME ? 0 : cpi->oxcf.sharpness;
+
+  if (method == LPF_PICK_MINIMAL_LPF && lf->filter_level) {
+    lf->filter_level = 0;
+    cm->rst_info.restoration_type = RESTORE_NONE;
+  } else if (method >= LPF_PICK_FROM_Q) {
+    const int min_filter_level = 0;
+    const int max_filter_level = vp10_get_max_filter_level(cpi);
+    const int q = vp10_ac_quant(cm->base_qindex, 0, cm->bit_depth);
+// These values were determined by linear fitting the result of the
+// searched level, filt_guess = q * 0.316206 + 3.87252
+#if CONFIG_VP9_HIGHBITDEPTH
+    int filt_guess;
+    switch (cm->bit_depth) {
+      case VPX_BITS_8:
+        filt_guess = ROUND_POWER_OF_TWO(q * 20723 + 1015158, 18);
+        break;
+      case VPX_BITS_10:
+        filt_guess = ROUND_POWER_OF_TWO(q * 20723 + 4060632, 20);
+        break;
+      case VPX_BITS_12:
+        filt_guess = ROUND_POWER_OF_TWO(q * 20723 + 16242526, 22);
+        break;
+      default:
+        assert(0 &&
+               "bit_depth should be VPX_BITS_8, VPX_BITS_10 "
+               "or VPX_BITS_12");
+        return;
+    }
+#else
+    int filt_guess = ROUND_POWER_OF_TWO(q * 20723 + 1015158, 18);
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+    if (cm->frame_type == KEY_FRAME) filt_guess -= 4;
+    lf->filter_level = clamp(filt_guess, min_filter_level, max_filter_level);
+    bilateral_success = search_bilateral_level(
+        sd, cpi, lf->filter_level, method == LPF_PICK_FROM_SUBIMAGE,
+        cm->rst_info.bilateral_level, &cost_bilateral);
+    wiener_success = search_wiener_filter(
+        sd, cpi, lf->filter_level, method == LPF_PICK_FROM_SUBIMAGE,
+        cm->rst_info.vfilter, cm->rst_info.hfilter, cm->rst_info.wiener_level,
+        &cost_wiener);
+    if (cost_bilateral < cost_wiener) {
+      if (bilateral_success)
+        cm->rst_info.restoration_type = RESTORE_BILATERAL;
+      else
+        cm->rst_info.restoration_type = RESTORE_NONE;
+    } else {
+      if (wiener_success)
+        cm->rst_info.restoration_type = RESTORE_WIENER;
+      else
+        cm->rst_info.restoration_type = RESTORE_NONE;
+    }
+  } else {
+    int blf_filter_level = -1;
+    bilateral_success = search_filter_bilateral_level(
+        sd, cpi, method == LPF_PICK_FROM_SUBIMAGE, &blf_filter_level,
+        cm->rst_info.bilateral_level, &cost_bilateral);
+    lf->filter_level = vp10_search_filter_level(
+        sd, cpi, method == LPF_PICK_FROM_SUBIMAGE, &cost_norestore);
+    wiener_success = search_wiener_filter(
+        sd, cpi, lf->filter_level, method == LPF_PICK_FROM_SUBIMAGE,
+        cm->rst_info.vfilter, cm->rst_info.hfilter, cm->rst_info.wiener_level,
+        &cost_wiener);
+    if (cost_bilateral < cost_wiener) {
+      lf->filter_level = blf_filter_level;
+      if (bilateral_success)
+        cm->rst_info.restoration_type = RESTORE_BILATERAL;
+      else
+        cm->rst_info.restoration_type = RESTORE_NONE;
+    } else {
+      if (wiener_success)
+        cm->rst_info.restoration_type = RESTORE_WIENER;
+      else
+        cm->rst_info.restoration_type = RESTORE_NONE;
+    }
+    // printf("[%d] Costs %g %g (%d) %g (%d)\n", cm->rst_info.restoration_type,
+    //        cost_norestore, cost_bilateral, lf->filter_level, cost_wiener,
+    //        wiener_success);
+  }
+  if (cm->rst_info.restoration_type != RESTORE_BILATERAL) {
+    vpx_free(cm->rst_info.bilateral_level);
+    cm->rst_info.bilateral_level = NULL;
+  }
+  if (cm->rst_info.restoration_type != RESTORE_WIENER) {
+    vpx_free(cm->rst_info.vfilter);
+    cm->rst_info.vfilter = NULL;
+    vpx_free(cm->rst_info.hfilter);
+    cm->rst_info.hfilter = NULL;
+    vpx_free(cm->rst_info.wiener_level);
+    cm->rst_info.wiener_level = NULL;
+  }
+}
diff --git a/av1/encoder/pickrst.h b/av1/encoder/pickrst.h
new file mode 100644
index 0000000..6d94cef
--- /dev/null
+++ b/av1/encoder/pickrst.h
@@ -0,0 +1,30 @@
+/*
+ *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP10_ENCODER_PICKRST_H_
+#define VP10_ENCODER_PICKRST_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "av1/encoder/encoder.h"
+
+struct yv12_buffer_config;
+struct VP10_COMP;
+
+void vp10_pick_filter_restoration(const YV12_BUFFER_CONFIG *sd, VP10_COMP *cpi,
+                                  LPF_PICK_METHOD method);
+
+#ifdef __cplusplus
+}  // extern "C"
+#endif
+
+#endif  // VP10_ENCODER_PICKRST_H_
diff --git a/av1/encoder/quantize.c b/av1/encoder/quantize.c
new file mode 100644
index 0000000..ed8a04b
--- /dev/null
+++ b/av1/encoder/quantize.c
@@ -0,0 +1,1260 @@
+/*
+ *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <math.h>
+#include "./vpx_dsp_rtcd.h"
+#include "aom_dsp/quantize.h"
+#include "aom_mem/vpx_mem.h"
+#include "aom_ports/mem.h"
+
+#include "av1/common/quant_common.h"
+#include "av1/common/scan.h"
+#include "av1/common/seg_common.h"
+
+#include "av1/encoder/encoder.h"
+#include "av1/encoder/quantize.h"
+#include "av1/encoder/rd.h"
+
+#if CONFIG_NEW_QUANT
+static INLINE int quantize_coeff_nuq(
+    const tran_low_t coeffv, const int16_t quant, const int16_t quant_shift,
+    const int16_t dequant, const tran_low_t *cuml_bins_ptr,
+    const tran_low_t *dequant_val, tran_low_t *qcoeff_ptr,
+    tran_low_t *dqcoeff_ptr) {
+  const int coeff = coeffv;
+  const int coeff_sign = (coeff >> 31);
+  const int abs_coeff = (coeff ^ coeff_sign) - coeff_sign;
+  int i, q;
+  int tmp = clamp(abs_coeff, INT16_MIN, INT16_MAX);
+  for (i = 0; i < NUQ_KNOTS; i++) {
+    if (tmp < cuml_bins_ptr[i]) {
+      q = i;
+      break;
+    }
+  }
+  if (i == NUQ_KNOTS) {
+    tmp -= cuml_bins_ptr[NUQ_KNOTS - 1];
+    q = NUQ_KNOTS + (((((tmp * quant) >> 16) + tmp) * quant_shift) >> 16);
+  }
+  if (q) {
+    *dqcoeff_ptr = vp10_dequant_abscoeff_nuq(q, dequant, dequant_val);
+    *qcoeff_ptr = (q ^ coeff_sign) - coeff_sign;
+    *dqcoeff_ptr = *qcoeff_ptr < 0 ? -*dqcoeff_ptr : *dqcoeff_ptr;
+  } else {
+    *qcoeff_ptr = 0;
+    *dqcoeff_ptr = 0;
+  }
+  return (q != 0);
+}
+
+static INLINE int quantize_coeff_bigtx_nuq(
+    const tran_low_t coeffv, const int16_t quant, const int16_t quant_shift,
+    const int16_t dequant, const tran_low_t *cuml_bins_ptr,
+    const tran_low_t *dequant_val, tran_low_t *qcoeff_ptr,
+    tran_low_t *dqcoeff_ptr, int logsizeby32) {
+  const int coeff = coeffv;
+  const int coeff_sign = (coeff >> 31);
+  const int abs_coeff = (coeff ^ coeff_sign) - coeff_sign;
+  int i, q;
+  int tmp = clamp(abs_coeff, INT16_MIN, INT16_MAX);
+  for (i = 0; i < NUQ_KNOTS; i++) {
+    if (tmp < ROUND_POWER_OF_TWO(cuml_bins_ptr[i], 1 + logsizeby32)) {
+      q = i;
+      break;
+    }
+  }
+  if (i == NUQ_KNOTS) {
+    tmp -= ROUND_POWER_OF_TWO(cuml_bins_ptr[NUQ_KNOTS - 1], 1 + logsizeby32);
+    q = NUQ_KNOTS +
+        (((((tmp * quant) >> 16) + tmp) * quant_shift) >> (15 - logsizeby32));
+  }
+  if (q) {
+    *dqcoeff_ptr = ROUND_POWER_OF_TWO(
+        vp10_dequant_abscoeff_nuq(q, dequant, dequant_val), 1 + logsizeby32);
+    // *dqcoeff_ptr = vp10_dequant_abscoeff_nuq(q, dequant, dequant_val) >>
+    // (1 + logsizeby32);
+    *qcoeff_ptr = (q ^ coeff_sign) - coeff_sign;
+    *dqcoeff_ptr = *qcoeff_ptr < 0 ? -*dqcoeff_ptr : *dqcoeff_ptr;
+  } else {
+    *qcoeff_ptr = 0;
+    *dqcoeff_ptr = 0;
+  }
+  return (q != 0);
+}
+
+static INLINE int quantize_coeff_fp_nuq(
+    const tran_low_t coeffv, const int16_t quant, const int16_t dequant,
+    const tran_low_t *cuml_bins_ptr, const tran_low_t *dequant_val,
+    tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr) {
+  const int coeff = coeffv;
+  const int coeff_sign = (coeff >> 31);
+  const int abs_coeff = (coeff ^ coeff_sign) - coeff_sign;
+  int i, q;
+  int tmp = clamp(abs_coeff, INT16_MIN, INT16_MAX);
+  for (i = 0; i < NUQ_KNOTS; i++) {
+    if (tmp < cuml_bins_ptr[i]) {
+      q = i;
+      break;
+    }
+  }
+  if (i == NUQ_KNOTS) {
+    q = NUQ_KNOTS +
+        ((((int64_t)tmp - cuml_bins_ptr[NUQ_KNOTS - 1]) * quant) >> 16);
+  }
+  if (q) {
+    *dqcoeff_ptr = vp10_dequant_abscoeff_nuq(q, dequant, dequant_val);
+    *qcoeff_ptr = (q ^ coeff_sign) - coeff_sign;
+    *dqcoeff_ptr = *qcoeff_ptr < 0 ? -*dqcoeff_ptr : *dqcoeff_ptr;
+  } else {
+    *qcoeff_ptr = 0;
+    *dqcoeff_ptr = 0;
+  }
+  return (q != 0);
+}
+
+static INLINE int quantize_coeff_bigtx_fp_nuq(
+    const tran_low_t coeffv, const int16_t quant, const int16_t dequant,
+    const tran_low_t *cuml_bins_ptr, const tran_low_t *dequant_val,
+    tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, int logsizeby32) {
+  const int coeff = coeffv;
+  const int coeff_sign = (coeff >> 31);
+  const int abs_coeff = (coeff ^ coeff_sign) - coeff_sign;
+  int i, q;
+  int tmp = clamp(abs_coeff, INT16_MIN, INT16_MAX);
+  for (i = 0; i < NUQ_KNOTS; i++) {
+    if (tmp < ROUND_POWER_OF_TWO(cuml_bins_ptr[i], 1 + logsizeby32)) {
+      q = i;
+      break;
+    }
+  }
+  if (i == NUQ_KNOTS) {
+    q = NUQ_KNOTS +
+        ((((int64_t)tmp -
+           ROUND_POWER_OF_TWO(cuml_bins_ptr[NUQ_KNOTS - 1], 1 + logsizeby32)) *
+          quant) >>
+         (15 - logsizeby32));
+  }
+  if (q) {
+    *dqcoeff_ptr = ROUND_POWER_OF_TWO(
+        vp10_dequant_abscoeff_nuq(q, dequant, dequant_val), 1 + logsizeby32);
+    // *dqcoeff_ptr = vp10_dequant_abscoeff_nuq(q, dequant, dequant_val) >>
+    // (1 + logsizeby32);
+    *qcoeff_ptr = (q ^ coeff_sign) - coeff_sign;
+    *dqcoeff_ptr = *qcoeff_ptr < 0 ? -*dqcoeff_ptr : *dqcoeff_ptr;
+  } else {
+    *qcoeff_ptr = 0;
+    *dqcoeff_ptr = 0;
+  }
+  return (q != 0);
+}
+
+void quantize_dc_nuq(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+                     int skip_block, const int16_t quant,
+                     const int16_t quant_shift, const int16_t dequant,
+                     const tran_low_t *cuml_bins_ptr,
+                     const tran_low_t *dequant_val, tran_low_t *qcoeff_ptr,
+                     tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr) {
+  int eob = -1;
+  memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
+  memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
+  if (!skip_block) {
+    const int rc = 0;
+    if (quantize_coeff_nuq(coeff_ptr[rc], quant, quant_shift, dequant,
+                           cuml_bins_ptr, dequant_val, qcoeff_ptr, dqcoeff_ptr))
+      eob = 0;
+  }
+  *eob_ptr = eob + 1;
+}
+
+void quantize_dc_fp_nuq(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+                        int skip_block, const int16_t quant,
+                        const int16_t dequant, const tran_low_t *cuml_bins_ptr,
+                        const tran_low_t *dequant_val, tran_low_t *qcoeff_ptr,
+                        tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr) {
+  int eob = -1;
+  memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
+  memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
+  if (!skip_block) {
+    const int rc = 0;
+    if (quantize_coeff_fp_nuq(coeff_ptr[rc], quant, dequant, cuml_bins_ptr,
+                              dequant_val, qcoeff_ptr, dqcoeff_ptr))
+      eob = 0;
+  }
+  *eob_ptr = eob + 1;
+}
+
+void quantize_dc_32x32_nuq(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+                           int skip_block, const int16_t quant,
+                           const int16_t quant_shift, const int16_t dequant,
+                           const tran_low_t *cuml_bins_ptr,
+                           const tran_low_t *dequant_val,
+                           tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
+                           uint16_t *eob_ptr) {
+  int eob = -1;
+  memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
+  memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
+  if (!skip_block) {
+    const int rc = 0;
+    if (quantize_coeff_bigtx_nuq(coeff_ptr[rc], quant, quant_shift, dequant,
+                                 cuml_bins_ptr, dequant_val, qcoeff_ptr,
+                                 dqcoeff_ptr, 0))
+      eob = 0;
+  }
+  *eob_ptr = eob + 1;
+}
+
+void quantize_dc_32x32_fp_nuq(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+                              int skip_block, const int16_t quant,
+                              const int16_t dequant,
+                              const tran_low_t *cuml_bins_ptr,
+                              const tran_low_t *dequant_val,
+                              tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
+                              uint16_t *eob_ptr) {
+  int eob = -1;
+  memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
+  memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
+  if (!skip_block) {
+    const int rc = 0;
+    if (quantize_coeff_bigtx_fp_nuq(coeff_ptr[rc], quant, dequant,
+                                    cuml_bins_ptr, dequant_val, qcoeff_ptr,
+                                    dqcoeff_ptr, 0))
+      eob = 0;
+  }
+  *eob_ptr = eob + 1;
+}
+
+void quantize_nuq_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+                    int skip_block, const int16_t *quant_ptr,
+                    const int16_t *quant_shift_ptr, const int16_t *dequant_ptr,
+                    const cuml_bins_type_nuq *cuml_bins_ptr,
+                    const dequant_val_type_nuq *dequant_val,
+                    tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
+                    uint16_t *eob_ptr, const int16_t *scan,
+                    const uint8_t *band) {
+  int eob = -1;
+  memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
+  memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
+  if (!skip_block) {
+    int i;
+    for (i = 0; i < n_coeffs; i++) {
+      const int rc = scan[i];
+      if (quantize_coeff_nuq(coeff_ptr[rc], quant_ptr[rc != 0],
+                             quant_shift_ptr[rc != 0], dequant_ptr[rc != 0],
+                             cuml_bins_ptr[band[i]], dequant_val[band[i]],
+                             &qcoeff_ptr[rc], &dqcoeff_ptr[rc]))
+        eob = i;
+    }
+  }
+  *eob_ptr = eob + 1;
+}
+
+void quantize_fp_nuq_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+                       int skip_block, const int16_t *quant_ptr,
+                       const int16_t *dequant_ptr,
+                       const cuml_bins_type_nuq *cuml_bins_ptr,
+                       const dequant_val_type_nuq *dequant_val,
+                       tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
+                       uint16_t *eob_ptr, const int16_t *scan,
+                       const uint8_t *band) {
+  int eob = -1;
+  memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
+  memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
+  if (!skip_block) {
+    int i;
+    for (i = 0; i < n_coeffs; i++) {
+      const int rc = scan[i];
+      if (quantize_coeff_fp_nuq(coeff_ptr[rc], quant_ptr[rc != 0],
+                                dequant_ptr[rc != 0], cuml_bins_ptr[band[i]],
+                                dequant_val[band[i]], &qcoeff_ptr[rc],
+                                &dqcoeff_ptr[rc]))
+        eob = i;
+    }
+  }
+  *eob_ptr = eob + 1;
+}
+
+void quantize_32x32_nuq_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+                          int skip_block, const int16_t *quant_ptr,
+                          const int16_t *quant_shift_ptr,
+                          const int16_t *dequant_ptr,
+                          const cuml_bins_type_nuq *cuml_bins_ptr,
+                          const dequant_val_type_nuq *dequant_val,
+                          tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
+                          uint16_t *eob_ptr, const int16_t *scan,
+                          const uint8_t *band) {
+  int eob = -1;
+  memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
+  memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
+  if (!skip_block) {
+    int i;
+    for (i = 0; i < n_coeffs; i++) {
+      const int rc = scan[i];
+      if (quantize_coeff_bigtx_nuq(
+              coeff_ptr[rc], quant_ptr[rc != 0], quant_shift_ptr[rc != 0],
+              dequant_ptr[rc != 0], cuml_bins_ptr[band[i]],
+              dequant_val[band[i]], &qcoeff_ptr[rc], &dqcoeff_ptr[rc], 0))
+        eob = i;
+    }
+  }
+  *eob_ptr = eob + 1;
+}
+
+void quantize_32x32_fp_nuq_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+                             int skip_block, const int16_t *quant_ptr,
+                             const int16_t *dequant_ptr,
+                             const cuml_bins_type_nuq *cuml_bins_ptr,
+                             const dequant_val_type_nuq *dequant_val,
+                             tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
+                             uint16_t *eob_ptr, const int16_t *scan,
+                             const uint8_t *band) {
+  int eob = -1;
+  memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
+  memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
+  if (!skip_block) {
+    int i;
+    for (i = 0; i < n_coeffs; i++) {
+      const int rc = scan[i];
+      if (quantize_coeff_bigtx_fp_nuq(
+              coeff_ptr[rc], quant_ptr[rc != 0], dequant_ptr[rc != 0],
+              cuml_bins_ptr[band[i]], dequant_val[band[i]], &qcoeff_ptr[rc],
+              &dqcoeff_ptr[rc], 0))
+        eob = i;
+    }
+  }
+  *eob_ptr = eob + 1;
+}
+#endif  // CONFIG_NEW_QUANT
+
+void vp10_quantize_skip(intptr_t n_coeffs, tran_low_t *qcoeff_ptr,
+                        tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr) {
+  memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
+  memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
+  *eob_ptr = 0;
+}
+
+void vp10_quantize_fp_facade(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+                             const MACROBLOCK_PLANE *p, tran_low_t *qcoeff_ptr,
+                             const MACROBLOCKD_PLANE *pd,
+                             tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr,
+                             const scan_order *sc, const QUANT_PARAM *qparam) {
+  // obsolete skip_block
+  const int skip_block = 0;
+
+  if (qparam->log_scale == 0) {
+    vp10_quantize_fp(coeff_ptr, n_coeffs, skip_block, p->zbin, p->round_fp,
+                     p->quant_fp, p->quant_shift, qcoeff_ptr, dqcoeff_ptr,
+                     pd->dequant, eob_ptr, sc->scan, sc->iscan);
+  } else {
+    vp10_quantize_fp_32x32(coeff_ptr, n_coeffs, skip_block, p->zbin,
+                           p->round_fp, p->quant_fp, p->quant_shift, qcoeff_ptr,
+                           dqcoeff_ptr, pd->dequant, eob_ptr, sc->scan,
+                           sc->iscan);
+  }
+}
+
+void vp10_quantize_b_facade(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+                            const MACROBLOCK_PLANE *p, tran_low_t *qcoeff_ptr,
+                            const MACROBLOCKD_PLANE *pd,
+                            tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr,
+                            const scan_order *sc, const QUANT_PARAM *qparam) {
+  // obsolete skip_block
+  const int skip_block = 0;
+
+  if (qparam->log_scale == 0) {
+    vpx_quantize_b(coeff_ptr, n_coeffs, skip_block, p->zbin, p->round, p->quant,
+                   p->quant_shift, qcoeff_ptr, dqcoeff_ptr, pd->dequant,
+                   eob_ptr, sc->scan, sc->iscan);
+  } else {
+    vpx_quantize_b_32x32(coeff_ptr, n_coeffs, skip_block, p->zbin, p->round,
+                         p->quant, p->quant_shift, qcoeff_ptr, dqcoeff_ptr,
+                         pd->dequant, eob_ptr, sc->scan, sc->iscan);
+  }
+}
+
+void vp10_quantize_dc_facade(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+                             const MACROBLOCK_PLANE *p, tran_low_t *qcoeff_ptr,
+                             const MACROBLOCKD_PLANE *pd,
+                             tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr,
+                             const scan_order *sc, const QUANT_PARAM *qparam) {
+  // obsolete skip_block
+  const int skip_block = 0;
+  (void)sc;
+  if (qparam->log_scale == 0) {
+    vpx_quantize_dc(coeff_ptr, (int)n_coeffs, skip_block, p->round,
+                    p->quant_fp[0], qcoeff_ptr, dqcoeff_ptr, pd->dequant[0],
+                    eob_ptr);
+  } else {
+    vpx_quantize_dc_32x32(coeff_ptr, skip_block, p->round, p->quant_fp[0],
+                          qcoeff_ptr, dqcoeff_ptr, pd->dequant[0], eob_ptr);
+  }
+}
+
+#if CONFIG_VP9_HIGHBITDEPTH
+void vp10_highbd_quantize_fp_facade(
+    const tran_low_t *coeff_ptr, intptr_t n_coeffs, const MACROBLOCK_PLANE *p,
+    tran_low_t *qcoeff_ptr, const MACROBLOCKD_PLANE *pd,
+    tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const scan_order *sc,
+    const QUANT_PARAM *qparam) {
+  // obsolete skip_block
+  const int skip_block = 0;
+
+  vp10_highbd_quantize_fp(coeff_ptr, n_coeffs, skip_block, p->zbin, p->round_fp,
+                          p->quant_fp, p->quant_shift, qcoeff_ptr, dqcoeff_ptr,
+                          pd->dequant, eob_ptr, sc->scan, sc->iscan,
+                          qparam->log_scale);
+}
+
+void vp10_highbd_quantize_b_facade(const tran_low_t *coeff_ptr,
+                                   intptr_t n_coeffs, const MACROBLOCK_PLANE *p,
+                                   tran_low_t *qcoeff_ptr,
+                                   const MACROBLOCKD_PLANE *pd,
+                                   tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr,
+                                   const scan_order *sc,
+                                   const QUANT_PARAM *qparam) {
+  // obsolete skip_block
+  const int skip_block = 0;
+
+  vp10_highbd_quantize_b(coeff_ptr, n_coeffs, skip_block, p->zbin, p->round,
+                         p->quant, p->quant_shift, qcoeff_ptr, dqcoeff_ptr,
+                         pd->dequant, eob_ptr, sc->scan, sc->iscan,
+                         qparam->log_scale);
+}
+
+void vp10_highbd_quantize_dc_facade(
+    const tran_low_t *coeff_ptr, intptr_t n_coeffs, const MACROBLOCK_PLANE *p,
+    tran_low_t *qcoeff_ptr, const MACROBLOCKD_PLANE *pd,
+    tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const scan_order *sc,
+    const QUANT_PARAM *qparam) {
+  // obsolete skip_block
+  const int skip_block = 0;
+
+  (void)sc;
+
+  vp10_highbd_quantize_dc(coeff_ptr, (int)n_coeffs, skip_block, p->round,
+                          p->quant_fp[0], qcoeff_ptr, dqcoeff_ptr,
+                          pd->dequant[0], eob_ptr, qparam->log_scale);
+}
+
+#if CONFIG_NEW_QUANT
+static INLINE int highbd_quantize_coeff_nuq(
+    const tran_low_t coeffv, const int16_t quant, const int16_t quant_shift,
+    const int16_t dequant, const tran_low_t *cuml_bins_ptr,
+    const tran_low_t *dequant_val, tran_low_t *qcoeff_ptr,
+    tran_low_t *dqcoeff_ptr) {
+  const int coeff = coeffv;
+  const int coeff_sign = (coeff >> 31);
+  const int abs_coeff = (coeff ^ coeff_sign) - coeff_sign;
+  int i, q;
+  int64_t tmp = clamp(abs_coeff, INT32_MIN, INT32_MAX);
+  for (i = 0; i < NUQ_KNOTS; i++) {
+    if (tmp < cuml_bins_ptr[i]) {
+      q = i;
+      break;
+    }
+  }
+  if (i == NUQ_KNOTS) {
+    tmp -= cuml_bins_ptr[NUQ_KNOTS - 1];
+    q = NUQ_KNOTS + (((((tmp * quant) >> 16) + tmp) * quant_shift) >> 16);
+  }
+  if (q) {
+    *dqcoeff_ptr = vp10_dequant_abscoeff_nuq(q, dequant, dequant_val);
+    *qcoeff_ptr = (q ^ coeff_sign) - coeff_sign;
+    *dqcoeff_ptr = *qcoeff_ptr < 0 ? -*dqcoeff_ptr : *dqcoeff_ptr;
+  } else {
+    *qcoeff_ptr = 0;
+    *dqcoeff_ptr = 0;
+  }
+  return (q != 0);
+}
+
+static INLINE int highbd_quantize_coeff_fp_nuq(
+    const tran_low_t coeffv, const int16_t quant, const int16_t dequant,
+    const tran_low_t *cuml_bins_ptr, const tran_low_t *dequant_val,
+    tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr) {
+  const int coeff = coeffv;
+  const int coeff_sign = (coeff >> 31);
+  const int abs_coeff = (coeff ^ coeff_sign) - coeff_sign;
+  int i, q;
+  int64_t tmp = clamp(abs_coeff, INT32_MIN, INT32_MAX);
+  for (i = 0; i < NUQ_KNOTS; i++) {
+    if (tmp < cuml_bins_ptr[i]) {
+      q = i;
+      break;
+    }
+  }
+  if (i == NUQ_KNOTS) {
+    q = NUQ_KNOTS + (((tmp - cuml_bins_ptr[NUQ_KNOTS - 1]) * quant) >> 16);
+  }
+  if (q) {
+    *dqcoeff_ptr = vp10_dequant_abscoeff_nuq(q, dequant, dequant_val);
+    *qcoeff_ptr = (q ^ coeff_sign) - coeff_sign;
+    *dqcoeff_ptr = *qcoeff_ptr < 0 ? -*dqcoeff_ptr : *dqcoeff_ptr;
+  } else {
+    *qcoeff_ptr = 0;
+    *dqcoeff_ptr = 0;
+  }
+  return (q != 0);
+}
+
+static INLINE int highbd_quantize_coeff_bigtx_fp_nuq(
+    const tran_low_t coeffv, const int16_t quant, const int16_t dequant,
+    const tran_low_t *cuml_bins_ptr, const tran_low_t *dequant_val,
+    tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, int logsizeby32) {
+  const int coeff = coeffv;
+  const int coeff_sign = (coeff >> 31);
+  const int abs_coeff = (coeff ^ coeff_sign) - coeff_sign;
+  int i, q;
+  int64_t tmp = clamp(abs_coeff, INT32_MIN, INT32_MAX);
+  for (i = 0; i < NUQ_KNOTS; i++) {
+    if (tmp < ROUND_POWER_OF_TWO(cuml_bins_ptr[i], 1 + logsizeby32)) {
+      q = i;
+      break;
+    }
+  }
+  if (i == NUQ_KNOTS) {
+    q = NUQ_KNOTS + (((tmp - ROUND_POWER_OF_TWO(cuml_bins_ptr[NUQ_KNOTS - 1],
+                                                1 + logsizeby32)) *
+                      quant) >>
+                     (15 - logsizeby32));
+  }
+  if (q) {
+    *dqcoeff_ptr = ROUND_POWER_OF_TWO(
+        vp10_dequant_abscoeff_nuq(q, dequant, dequant_val), 1 + logsizeby32);
+    *qcoeff_ptr = (q ^ coeff_sign) - coeff_sign;
+    *dqcoeff_ptr = *qcoeff_ptr < 0 ? -*dqcoeff_ptr : *dqcoeff_ptr;
+  } else {
+    *qcoeff_ptr = 0;
+    *dqcoeff_ptr = 0;
+  }
+  return (q != 0);
+}
+
+static INLINE int highbd_quantize_coeff_bigtx_nuq(
+    const tran_low_t coeffv, const int16_t quant, const int16_t quant_shift,
+    const int16_t dequant, const tran_low_t *cuml_bins_ptr,
+    const tran_low_t *dequant_val, tran_low_t *qcoeff_ptr,
+    tran_low_t *dqcoeff_ptr, int logsizeby32) {
+  const int coeff = coeffv;
+  const int coeff_sign = (coeff >> 31);
+  const int abs_coeff = (coeff ^ coeff_sign) - coeff_sign;
+  int i, q;
+  int64_t tmp = clamp(abs_coeff, INT32_MIN, INT32_MAX);
+  for (i = 0; i < NUQ_KNOTS; i++) {
+    if (tmp < ROUND_POWER_OF_TWO(cuml_bins_ptr[i], 1 + logsizeby32)) {
+      q = i;
+      break;
+    }
+  }
+  if (i == NUQ_KNOTS) {
+    tmp -= ROUND_POWER_OF_TWO(cuml_bins_ptr[NUQ_KNOTS - 1], 1 + logsizeby32);
+    q = NUQ_KNOTS +
+        (((((tmp * quant) >> 16) + tmp) * quant_shift) >> (15 - logsizeby32));
+  }
+  if (q) {
+    *dqcoeff_ptr = ROUND_POWER_OF_TWO(
+        vp10_dequant_abscoeff_nuq(q, dequant, dequant_val), 1 + logsizeby32);
+    *qcoeff_ptr = (q ^ coeff_sign) - coeff_sign;
+    *dqcoeff_ptr = *qcoeff_ptr < 0 ? -*dqcoeff_ptr : *dqcoeff_ptr;
+  } else {
+    *qcoeff_ptr = 0;
+    *dqcoeff_ptr = 0;
+  }
+  return (q != 0);
+}
+
+void highbd_quantize_dc_nuq(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+                            int skip_block, const int16_t quant,
+                            const int16_t quant_shift, const int16_t dequant,
+                            const tran_low_t *cuml_bins_ptr,
+                            const tran_low_t *dequant_val,
+                            tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
+                            uint16_t *eob_ptr) {
+  int eob = -1;
+  memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
+  memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
+  if (!skip_block) {
+    const int rc = 0;
+    if (highbd_quantize_coeff_nuq(coeff_ptr[rc], quant, quant_shift, dequant,
+                                  cuml_bins_ptr, dequant_val, qcoeff_ptr,
+                                  dqcoeff_ptr))
+      eob = 0;
+  }
+  *eob_ptr = eob + 1;
+}
+
+void highbd_quantize_dc_fp_nuq(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+                               int skip_block, const int16_t quant,
+                               const int16_t dequant,
+                               const tran_low_t *cuml_bins_ptr,
+                               const tran_low_t *dequant_val,
+                               tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
+                               uint16_t *eob_ptr) {
+  int eob = -1;
+  memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
+  memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
+  if (!skip_block) {
+    const int rc = 0;
+    if (highbd_quantize_coeff_fp_nuq(coeff_ptr[rc], quant, dequant,
+                                     cuml_bins_ptr, dequant_val, qcoeff_ptr,
+                                     dqcoeff_ptr))
+      eob = 0;
+  }
+  *eob_ptr = eob + 1;
+}
+
+void highbd_quantize_nuq_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+                           int skip_block, const int16_t *quant_ptr,
+                           const int16_t *quant_shift_ptr,
+                           const int16_t *dequant_ptr,
+                           const cuml_bins_type_nuq *cuml_bins_ptr,
+                           const dequant_val_type_nuq *dequant_val,
+                           tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
+                           uint16_t *eob_ptr, const int16_t *scan,
+                           const uint8_t *band) {
+  int eob = -1;
+  memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
+  memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
+  if (!skip_block) {
+    int i;
+    for (i = 0; i < n_coeffs; i++) {
+      const int rc = scan[i];
+      if (highbd_quantize_coeff_nuq(
+              coeff_ptr[rc], quant_ptr[rc != 0], quant_shift_ptr[rc != 0],
+              dequant_ptr[rc != 0], cuml_bins_ptr[band[i]],
+              dequant_val[band[i]], &qcoeff_ptr[rc], &dqcoeff_ptr[rc]))
+        eob = i;
+    }
+  }
+  *eob_ptr = eob + 1;
+}
+
+void highbd_quantize_32x32_nuq_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+                                 int skip_block, const int16_t *quant_ptr,
+                                 const int16_t *quant_shift_ptr,
+                                 const int16_t *dequant_ptr,
+                                 const cuml_bins_type_nuq *cuml_bins_ptr,
+                                 const dequant_val_type_nuq *dequant_val,
+                                 tran_low_t *qcoeff_ptr,
+                                 tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr,
+                                 const int16_t *scan, const uint8_t *band) {
+  int eob = -1;
+  memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
+  memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
+  if (!skip_block) {
+    int i;
+    for (i = 0; i < n_coeffs; i++) {
+      const int rc = scan[i];
+      if (highbd_quantize_coeff_bigtx_nuq(
+              coeff_ptr[rc], quant_ptr[rc != 0], quant_shift_ptr[rc != 0],
+              dequant_ptr[rc != 0], cuml_bins_ptr[band[i]],
+              dequant_val[band[i]], &qcoeff_ptr[rc], &dqcoeff_ptr[rc], 0))
+        eob = i;
+    }
+  }
+  *eob_ptr = eob + 1;
+}
+
+void highbd_quantize_32x32_fp_nuq_c(const tran_low_t *coeff_ptr,
+                                    intptr_t n_coeffs, int skip_block,
+                                    const int16_t *quant_ptr,
+                                    const int16_t *dequant_ptr,
+                                    const cuml_bins_type_nuq *cuml_bins_ptr,
+                                    const dequant_val_type_nuq *dequant_val,
+                                    tran_low_t *qcoeff_ptr,
+                                    tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr,
+                                    const int16_t *scan, const uint8_t *band) {
+  int eob = -1;
+  memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
+  memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
+  if (!skip_block) {
+    int i;
+    for (i = 0; i < n_coeffs; i++) {
+      const int rc = scan[i];
+      if (highbd_quantize_coeff_bigtx_fp_nuq(
+              coeff_ptr[rc], quant_ptr[rc != 0], dequant_ptr[rc != 0],
+              cuml_bins_ptr[band[i]], dequant_val[band[i]], &qcoeff_ptr[rc],
+              &dqcoeff_ptr[rc], 0))
+        eob = i;
+    }
+  }
+  *eob_ptr = eob + 1;
+}
+
+void highbd_quantize_fp_nuq_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+                              int skip_block, const int16_t *quant_ptr,
+                              const int16_t *dequant_ptr,
+                              const cuml_bins_type_nuq *cuml_bins_ptr,
+                              const dequant_val_type_nuq *dequant_val,
+                              tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
+                              uint16_t *eob_ptr, const int16_t *scan,
+                              const uint8_t *band) {
+  int eob = -1;
+  memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
+  memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
+  if (!skip_block) {
+    int i;
+    for (i = 0; i < n_coeffs; i++) {
+      const int rc = scan[i];
+      if (highbd_quantize_coeff_fp_nuq(
+              coeff_ptr[rc], quant_ptr[rc != 0], dequant_ptr[rc != 0],
+              cuml_bins_ptr[band[i]], dequant_val[band[i]], &qcoeff_ptr[rc],
+              &dqcoeff_ptr[rc]))
+        eob = i;
+    }
+  }
+  *eob_ptr = eob + 1;
+}
+
+void highbd_quantize_dc_32x32_nuq(
+    const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block,
+    const int16_t quant, const int16_t quant_shift, const int16_t dequant,
+    const tran_low_t *cuml_bins_ptr, const tran_low_t *dequant_val,
+    tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr) {
+  int eob = -1;
+  memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
+  memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
+  if (!skip_block) {
+    const int rc = 0;
+    if (highbd_quantize_coeff_bigtx_nuq(coeff_ptr[rc], quant, quant_shift,
+                                        dequant, cuml_bins_ptr, dequant_val,
+                                        qcoeff_ptr, dqcoeff_ptr, 0))
+      eob = 0;
+  }
+  *eob_ptr = eob + 1;
+}
+
+void highbd_quantize_dc_32x32_fp_nuq(
+    const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block,
+    const int16_t quant, const int16_t dequant, const tran_low_t *cuml_bins_ptr,
+    const tran_low_t *dequant_val, tran_low_t *qcoeff_ptr,
+    tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr) {
+  int eob = -1;
+  memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
+  memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
+  if (!skip_block) {
+    const int rc = 0;
+    if (highbd_quantize_coeff_bigtx_fp_nuq(coeff_ptr[rc], quant, dequant,
+                                           cuml_bins_ptr, dequant_val,
+                                           qcoeff_ptr, dqcoeff_ptr, 0))
+      eob = 0;
+  }
+  *eob_ptr = eob + 1;
+}
+#endif  // CONFIG_NEW_QUANT
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+void vp10_quantize_fp_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+                        int skip_block, const int16_t *zbin_ptr,
+                        const int16_t *round_ptr, const int16_t *quant_ptr,
+                        const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr,
+                        tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr,
+                        uint16_t *eob_ptr, const int16_t *scan,
+                        const int16_t *iscan
+#if CONFIG_AOM_QM
+                        ,
+                        const qm_val_t *qm_ptr, const qm_val_t *iqm_ptr
+#endif
+                        ) {
+  int i, eob = -1;
+  // TODO(jingning) Decide the need of these arguments after the
+  // quantization process is completed.
+  (void)zbin_ptr;
+  (void)quant_shift_ptr;
+  (void)iscan;
+
+  memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
+  memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
+
+  if (!skip_block) {
+    // Quantization pass: All coefficients with index >= zero_flag are
+    // skippable. Note: zero_flag can be zero.
+    for (i = 0; i < n_coeffs; i++) {
+      const int rc = scan[i];
+      const int coeff = coeff_ptr[rc];
+#if CONFIG_AOM_QM
+      const qm_val_t wt = qm_ptr[rc];
+      const qm_val_t iwt = iqm_ptr[rc];
+      const int dequant =
+          (dequant_ptr[rc != 0] * iwt + (1 << (AOM_QM_BITS - 1))) >>
+          AOM_QM_BITS;
+#endif
+      const int coeff_sign = (coeff >> 31);
+      const int abs_coeff = (coeff ^ coeff_sign) - coeff_sign;
+
+      int64_t tmp = clamp(abs_coeff + round_ptr[rc != 0], INT16_MIN, INT16_MAX);
+      int tmp32;
+#if CONFIG_AOM_QM
+      tmp32 = (int)((tmp * wt * quant_ptr[rc != 0]) >> (16 + AOM_QM_BITS));
+      qcoeff_ptr[rc] = (tmp32 ^ coeff_sign) - coeff_sign;
+      dqcoeff_ptr[rc] = qcoeff_ptr[rc] * dequant;
+#else
+      tmp32 = (int)((tmp * quant_ptr[rc != 0]) >> 16);
+      qcoeff_ptr[rc] = (tmp32 ^ coeff_sign) - coeff_sign;
+      dqcoeff_ptr[rc] = qcoeff_ptr[rc] * dequant_ptr[rc != 0];
+#endif
+
+      if (tmp32) eob = i;
+    }
+  }
+  *eob_ptr = eob + 1;
+}
+
+#if CONFIG_VP9_HIGHBITDEPTH
+void vp10_highbd_quantize_fp_c(const tran_low_t *coeff_ptr, intptr_t count,
+                               int skip_block, const int16_t *zbin_ptr,
+                               const int16_t *round_ptr,
+                               const int16_t *quant_ptr,
+                               const int16_t *quant_shift_ptr,
+                               tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
+                               const int16_t *dequant_ptr, uint16_t *eob_ptr,
+                               const int16_t *scan, const int16_t *iscan,
+#if CONFIG_AOM_QM
+                               const qm_val_t *qm_ptr, const qm_val_t *iqm_ptr,
+#endif
+                               int log_scale) {
+  int i;
+  int eob = -1;
+  const int scale = 1 << log_scale;
+  const int shift = 16 - log_scale;
+  // TODO(jingning) Decide the need of these arguments after the
+  // quantization process is completed.
+  (void)zbin_ptr;
+  (void)quant_shift_ptr;
+  (void)iscan;
+
+  memset(qcoeff_ptr, 0, count * sizeof(*qcoeff_ptr));
+  memset(dqcoeff_ptr, 0, count * sizeof(*dqcoeff_ptr));
+
+  if (!skip_block) {
+    // Quantization pass: All coefficients with index >= zero_flag are
+    // skippable. Note: zero_flag can be zero.
+    for (i = 0; i < count; i++) {
+      const int rc = scan[i];
+      const int coeff = coeff_ptr[rc];
+#if CONFIG_AOM_QM
+      const qm_val_t wt = qm_ptr[rc];
+      const qm_val_t iwt = iqm_ptr[rc];
+      const int dequant =
+          (dequant_ptr[rc != 0] * iwt + (1 << (AOM_QM_BITS - 1))) >>
+          AOM_QM_BITS;
+#endif
+      const int coeff_sign = (coeff >> 31);
+      const int abs_coeff = (coeff ^ coeff_sign) - coeff_sign;
+      const int64_t tmp = abs_coeff + round_ptr[rc != 0];
+#if CONFIG_AOM_QM
+      const uint32_t abs_qcoeff =
+          (uint32_t)((tmp * quant_ptr[rc != 0] * wt) >> (shift + AOM_QM_BITS));
+      qcoeff_ptr[rc] = (tran_low_t)((abs_qcoeff ^ coeff_sign) - coeff_sign);
+      dqcoeff_ptr[rc] = qcoeff_ptr[rc] * dequant / scale;
+#else
+      const uint32_t abs_qcoeff =
+          (uint32_t)((tmp * quant_ptr[rc != 0]) >> shift);
+      qcoeff_ptr[rc] = (tran_low_t)((abs_qcoeff ^ coeff_sign) - coeff_sign);
+      dqcoeff_ptr[rc] = qcoeff_ptr[rc] * dequant_ptr[rc != 0] / scale;
+#endif
+      if (abs_qcoeff) eob = i;
+    }
+  }
+  *eob_ptr = eob + 1;
+}
+
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+// TODO(jingning) Refactor this file and combine functions with similar
+// operations.
+void vp10_quantize_fp_32x32_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+                              int skip_block, const int16_t *zbin_ptr,
+                              const int16_t *round_ptr,
+                              const int16_t *quant_ptr,
+                              const int16_t *quant_shift_ptr,
+                              tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
+                              const int16_t *dequant_ptr, uint16_t *eob_ptr,
+                              const int16_t *scan, const int16_t *iscan
+#if CONFIG_AOM_QM
+                              ,
+                              const qm_val_t *qm_ptr, const qm_val_t *iqm_ptr
+#endif
+                              ) {
+  int i, eob = -1;
+  (void)zbin_ptr;
+  (void)quant_shift_ptr;
+  (void)iscan;
+
+  memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
+  memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
+
+  if (!skip_block) {
+    for (i = 0; i < n_coeffs; i++) {
+      const int rc = scan[i];
+      const int coeff = coeff_ptr[rc];
+#if CONFIG_AOM_QM
+      const qm_val_t wt = qm_ptr[rc];
+      const qm_val_t iwt = iqm_ptr[rc];
+      const int dequant =
+          (dequant_ptr[rc != 0] * iwt + (1 << (AOM_QM_BITS - 1))) >>
+          AOM_QM_BITS;
+      int64_t tmp = 0;
+#endif
+      const int coeff_sign = (coeff >> 31);
+      int tmp32 = 0;
+      int abs_coeff = (coeff ^ coeff_sign) - coeff_sign;
+
+#if CONFIG_AOM_QM
+      if (abs_coeff * wt >= (dequant_ptr[rc != 0] << (AOM_QM_BITS - 2))) {
+#else
+      if (abs_coeff >= (dequant_ptr[rc != 0] >> 2)) {
+#endif
+        abs_coeff += ROUND_POWER_OF_TWO(round_ptr[rc != 0], 1);
+        abs_coeff = clamp(abs_coeff, INT16_MIN, INT16_MAX);
+#if CONFIG_AOM_QM
+        tmp = abs_coeff * wt;
+        tmp32 = (int)(tmp * quant_ptr[rc != 0]) >> (AOM_QM_BITS + 15);
+        qcoeff_ptr[rc] = (tmp32 ^ coeff_sign) - coeff_sign;
+        dqcoeff_ptr[rc] = (qcoeff_ptr[rc] * dequant) / 2;
+#else
+        tmp32 = (abs_coeff * quant_ptr[rc != 0]) >> 15;
+        qcoeff_ptr[rc] = (tmp32 ^ coeff_sign) - coeff_sign;
+        dqcoeff_ptr[rc] = (qcoeff_ptr[rc] * dequant_ptr[rc != 0]) / 2;
+#endif
+      }
+
+      if (tmp32) eob = i;
+    }
+  }
+  *eob_ptr = eob + 1;
+}
+
+#if CONFIG_VP9_HIGHBITDEPTH
+void vp10_highbd_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+                              int skip_block, const int16_t *zbin_ptr,
+                              const int16_t *round_ptr,
+                              const int16_t *quant_ptr,
+                              const int16_t *quant_shift_ptr,
+                              tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
+                              const int16_t *dequant_ptr, uint16_t *eob_ptr,
+                              const int16_t *scan, const int16_t *iscan,
+#if CONFIG_AOM_QM
+                              const qm_val_t *qm_ptr, const qm_val_t *iqm_ptr,
+#endif
+                              int log_scale) {
+  int i, non_zero_count = (int)n_coeffs, eob = -1;
+  int zbins[2] = { zbin_ptr[0], zbin_ptr[1] };
+  int round[2] = { round_ptr[0], round_ptr[1] };
+  int nzbins[2];
+  int scale = 1;
+  int shift = 16;
+  (void)iscan;
+
+  if (log_scale > 0) {
+    zbins[0] = ROUND_POWER_OF_TWO(zbin_ptr[0], log_scale);
+    zbins[1] = ROUND_POWER_OF_TWO(zbin_ptr[1], log_scale);
+    round[0] = ROUND_POWER_OF_TWO(round_ptr[0], log_scale);
+    round[1] = ROUND_POWER_OF_TWO(round_ptr[1], log_scale);
+    scale = 1 << log_scale;
+    shift = 16 - log_scale;
+  }
+
+  nzbins[0] = zbins[0] * -1;
+  nzbins[1] = zbins[1] * -1;
+
+  memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
+  memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
+
+  if (!skip_block) {
+    // Pre-scan pass
+    for (i = (int)n_coeffs - 1; i >= 0; i--) {
+      const int rc = scan[i];
+      const int coeff = coeff_ptr[rc];
+#if CONFIG_AOM_QM
+      uint32_t abs_qcoeff = 0;
+      const qm_val_t wt = qm_ptr[rc];
+      const qm_val_t iwt = iqm_ptr[rc];
+      const int dequant =
+          (dequant_ptr[rc != 0] * iwt + (1 << (AOM_QM_BITS - 1))) >>
+          AOM_QM_BITS;
+#endif
+
+      if (coeff < zbins[rc != 0] && coeff > nzbins[rc != 0])
+        non_zero_count--;
+      else
+        break;
+    }
+
+    // Quantization pass: All coefficients with index >= zero_flag are
+    // skippable. Note: zero_flag can be zero.
+    for (i = 0; i < non_zero_count; i++) {
+      const int rc = scan[i];
+      const int coeff = coeff_ptr[rc];
+      const int coeff_sign = (coeff >> 31);
+      const int abs_coeff = (coeff ^ coeff_sign) - coeff_sign;
+#if CONFIG_AOM_QM
+      if (abs_coeff * wt >= (zbins[rc != 0] << AOM_QM_BITS)) {
+#else
+
+      if (abs_coeff >= zbins[rc != 0]) {
+#endif
+        const int64_t tmp1 = abs_coeff + round[rc != 0];
+        const int64_t tmp2 = ((tmp1 * quant_ptr[rc != 0]) >> 16) + tmp1;
+#if CONFIG_AOM_QM
+        const uint32_t abs_qcoeff = (uint32_t)(
+            (tmp2 * wt * quant_shift_ptr[rc != 0]) >> (AOM_QM_BITS + shift));
+        qcoeff_ptr[rc] = (tran_low_t)((abs_qcoeff ^ coeff_sign) - coeff_sign);
+        dqcoeff_ptr[rc] = (qcoeff_ptr[rc] * dequant) / scale;
+#else
+        const uint32_t abs_qcoeff =
+            (uint32_t)((tmp2 * quant_shift_ptr[rc != 0]) >> shift);
+        qcoeff_ptr[rc] = (tran_low_t)((abs_qcoeff ^ coeff_sign) - coeff_sign);
+        dqcoeff_ptr[rc] = qcoeff_ptr[rc] * dequant_ptr[rc != 0] / scale;
+#endif  // CONFIG_AOM_QM
+        if (abs_qcoeff) eob = i;
+      }
+    }
+  }
+  *eob_ptr = eob + 1;
+}
+#endif
+
+#if CONFIG_VP9_HIGHBITDEPTH
+void vp10_highbd_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs,
+                             int skip_block, const int16_t *round_ptr,
+                             const int16_t quant, tran_low_t *qcoeff_ptr,
+                             tran_low_t *dqcoeff_ptr, const int16_t dequant_ptr,
+                             uint16_t *eob_ptr, const int log_scale) {
+  int eob = -1;
+
+  memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
+  memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr));
+
+  if (!skip_block) {
+    const int coeff = coeff_ptr[0];
+    const int coeff_sign = (coeff >> 31);
+    const int abs_coeff = (coeff ^ coeff_sign) - coeff_sign;
+    const int64_t tmp = abs_coeff + round_ptr[0];
+    const uint32_t abs_qcoeff = (uint32_t)((tmp * quant) >> (16 - log_scale));
+    qcoeff_ptr[0] = (tran_low_t)((abs_qcoeff ^ coeff_sign) - coeff_sign);
+    dqcoeff_ptr[0] = qcoeff_ptr[0] * dequant_ptr / (1 << log_scale);
+    if (abs_qcoeff) eob = 0;
+  }
+  *eob_ptr = eob + 1;
+}
+#endif
+
+static void invert_quant(int16_t *quant, int16_t *shift, int d) {
+  unsigned t;
+  int l, m;
+  t = d;
+  for (l = 0; t > 1; l++) t >>= 1;
+  m = 1 + (1 << (16 + l)) / d;
+  *quant = (int16_t)(m - (1 << 16));
+  *shift = 1 << (16 - l);
+}
+
+static int get_qzbin_factor(int q, vpx_bit_depth_t bit_depth) {
+  const int quant = vp10_dc_quant(q, 0, bit_depth);
+#if CONFIG_VP9_HIGHBITDEPTH
+  switch (bit_depth) {
+    case VPX_BITS_8: return q == 0 ? 64 : (quant < 148 ? 84 : 80);
+    case VPX_BITS_10: return q == 0 ? 64 : (quant < 592 ? 84 : 80);
+    case VPX_BITS_12: return q == 0 ? 64 : (quant < 2368 ? 84 : 80);
+    default:
+      assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12");
+      return -1;
+  }
+#else
+  (void)bit_depth;
+  return q == 0 ? 64 : (quant < 148 ? 84 : 80);
+#endif
+}
+
+void vp10_init_quantizer(VP10_COMP *cpi) {
+  VP10_COMMON *const cm = &cpi->common;
+  QUANTS *const quants = &cpi->quants;
+  int i, q, quant;
+#if CONFIG_NEW_QUANT
+  int dq;
+#endif
+
+  for (q = 0; q < QINDEX_RANGE; q++) {
+    const int qzbin_factor = get_qzbin_factor(q, cm->bit_depth);
+    const int qrounding_factor = q == 0 ? 64 : 48;
+
+    for (i = 0; i < 2; ++i) {
+      int qrounding_factor_fp = 64;
+      // y
+      quant = i == 0 ? vp10_dc_quant(q, cm->y_dc_delta_q, cm->bit_depth)
+                     : vp10_ac_quant(q, 0, cm->bit_depth);
+      invert_quant(&quants->y_quant[q][i], &quants->y_quant_shift[q][i], quant);
+      quants->y_quant_fp[q][i] = (1 << 16) / quant;
+      quants->y_round_fp[q][i] = (qrounding_factor_fp * quant) >> 7;
+      quants->y_zbin[q][i] = ROUND_POWER_OF_TWO(qzbin_factor * quant, 7);
+      quants->y_round[q][i] = (qrounding_factor * quant) >> 7;
+      cpi->y_dequant[q][i] = quant;
+
+      // uv
+      quant = i == 0 ? vp10_dc_quant(q, cm->uv_dc_delta_q, cm->bit_depth)
+                     : vp10_ac_quant(q, cm->uv_ac_delta_q, cm->bit_depth);
+      invert_quant(&quants->uv_quant[q][i], &quants->uv_quant_shift[q][i],
+                   quant);
+      quants->uv_quant_fp[q][i] = (1 << 16) / quant;
+      quants->uv_round_fp[q][i] = (qrounding_factor_fp * quant) >> 7;
+      quants->uv_zbin[q][i] = ROUND_POWER_OF_TWO(qzbin_factor * quant, 7);
+      quants->uv_round[q][i] = (qrounding_factor * quant) >> 7;
+      cpi->uv_dequant[q][i] = quant;
+    }
+
+#if CONFIG_NEW_QUANT
+    for (dq = 0; dq < QUANT_PROFILES; dq++) {
+      for (i = 0; i < COEF_BANDS; i++) {
+        const int quant = cpi->y_dequant[q][i != 0];
+        const int uvquant = cpi->uv_dequant[q][i != 0];
+        vp10_get_dequant_val_nuq(quant, q, i, cpi->y_dequant_val_nuq[dq][q][i],
+                                 quants->y_cuml_bins_nuq[dq][q][i], dq);
+        vp10_get_dequant_val_nuq(uvquant, q, i,
+                                 cpi->uv_dequant_val_nuq[dq][q][i],
+                                 quants->uv_cuml_bins_nuq[dq][q][i], dq);
+      }
+    }
+#endif  // CONFIG_NEW_QUANT
+
+    for (i = 2; i < 8; i++) {  // 8: SIMD width
+      quants->y_quant[q][i] = quants->y_quant[q][1];
+      quants->y_quant_fp[q][i] = quants->y_quant_fp[q][1];
+      quants->y_round_fp[q][i] = quants->y_round_fp[q][1];
+      quants->y_quant_shift[q][i] = quants->y_quant_shift[q][1];
+      quants->y_zbin[q][i] = quants->y_zbin[q][1];
+      quants->y_round[q][i] = quants->y_round[q][1];
+      cpi->y_dequant[q][i] = cpi->y_dequant[q][1];
+
+      quants->uv_quant[q][i] = quants->uv_quant[q][1];
+      quants->uv_quant_fp[q][i] = quants->uv_quant_fp[q][1];
+      quants->uv_round_fp[q][i] = quants->uv_round_fp[q][1];
+      quants->uv_quant_shift[q][i] = quants->uv_quant_shift[q][1];
+      quants->uv_zbin[q][i] = quants->uv_zbin[q][1];
+      quants->uv_round[q][i] = quants->uv_round[q][1];
+      cpi->uv_dequant[q][i] = cpi->uv_dequant[q][1];
+    }
+  }
+}
+
+void vp10_init_plane_quantizers(const VP10_COMP *cpi, MACROBLOCK *x,
+                                int segment_id) {
+  const VP10_COMMON *const cm = &cpi->common;
+  MACROBLOCKD *const xd = &x->e_mbd;
+  const QUANTS *const quants = &cpi->quants;
+  const int qindex = vp10_get_qindex(&cm->seg, segment_id, cm->base_qindex);
+  const int rdmult = vp10_compute_rd_mult(cpi, qindex + cm->y_dc_delta_q);
+  int i;
+#if CONFIG_AOM_QM
+  int minqm = cm->min_qmlevel;
+  int maxqm = cm->max_qmlevel;
+  // Quant matrix only depends on the base QP so there is only one set per frame
+  int qmlevel = (lossless || cm->using_qmatrix == 0)
+                    ? NUM_QM_LEVELS - 1
+                    : aom_get_qmlevel(cm->base_qindex, minqm, maxqm);
+#endif
+#if CONFIG_NEW_QUANT
+  int dq;
+#endif
+
+  // Y
+  x->plane[0].quant = quants->y_quant[qindex];
+  x->plane[0].quant_fp = quants->y_quant_fp[qindex];
+  x->plane[0].round_fp = quants->y_round_fp[qindex];
+  x->plane[0].quant_shift = quants->y_quant_shift[qindex];
+  x->plane[0].zbin = quants->y_zbin[qindex];
+  x->plane[0].round = quants->y_round[qindex];
+#if CONFIG_AOM_QM
+  memcpy(&xd->plane[0].seg_qmatrix[segment_id], cm->gqmatrix[qmlevel][0],
+         sizeof(cm->gqmatrix[qmlevel][0]));
+  memcpy(&xd->plane[0].seg_iqmatrix[segment_id], cm->giqmatrix[qmlevel][0],
+         sizeof(cm->giqmatrix[qmlevel][0]));
+#endif
+  xd->plane[0].dequant = cpi->y_dequant[qindex];
+#if CONFIG_NEW_QUANT
+  for (dq = 0; dq < QUANT_PROFILES; dq++) {
+    x->plane[0].cuml_bins_nuq[dq] = quants->y_cuml_bins_nuq[dq][qindex];
+    xd->plane[0].dequant_val_nuq[dq] = cpi->y_dequant_val_nuq[dq][qindex];
+  }
+#endif  // CONFIG_NEW_QUANT
+
+  x->plane[0].quant_thred[0] = x->plane[0].zbin[0] * x->plane[0].zbin[0];
+  x->plane[0].quant_thred[1] = x->plane[0].zbin[1] * x->plane[0].zbin[1];
+
+  // UV
+  for (i = 1; i < 3; i++) {
+    x->plane[i].quant = quants->uv_quant[qindex];
+    x->plane[i].quant_fp = quants->uv_quant_fp[qindex];
+    x->plane[i].round_fp = quants->uv_round_fp[qindex];
+    x->plane[i].quant_shift = quants->uv_quant_shift[qindex];
+    x->plane[i].zbin = quants->uv_zbin[qindex];
+    x->plane[i].round = quants->uv_round[qindex];
+#if CONFIG_AOM_QM
+    memcpy(&xd->plane[i].seg_qmatrix[segment_id], cm->gqmatrix[qmlevel][1],
+           sizeof(cm->gqmatrix[qmlevel][1]));
+    memcpy(&xd->plane[i].seg_iqmatrix[segment_id], cm->giqmatrix[qmlevel][1],
+           sizeof(cm->giqmatrix[qmlevel][1]));
+#endif
+    xd->plane[i].dequant = cpi->uv_dequant[qindex];
+#if CONFIG_NEW_QUANT
+    for (dq = 0; dq < QUANT_PROFILES; dq++) {
+      x->plane[i].cuml_bins_nuq[dq] = quants->uv_cuml_bins_nuq[dq][qindex];
+      xd->plane[i].dequant_val_nuq[dq] = cpi->uv_dequant_val_nuq[dq][qindex];
+    }
+#endif  // CONFIG_NEW_QUANT
+
+    x->plane[i].quant_thred[0] = x->plane[i].zbin[0] * x->plane[i].zbin[0];
+    x->plane[i].quant_thred[1] = x->plane[i].zbin[1] * x->plane[i].zbin[1];
+  }
+
+  x->skip_block = segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP);
+  x->q_index = qindex;
+
+  set_error_per_bit(x, rdmult);
+
+  vp10_initialize_me_consts(cpi, x, x->q_index);
+}
+
+void vp10_frame_init_quantizer(VP10_COMP *cpi) {
+  MACROBLOCK *const x = &cpi->td.mb;
+  MACROBLOCKD *const xd = &x->e_mbd;
+  vp10_init_plane_quantizers(cpi, x, xd->mi[0]->mbmi.segment_id);
+}
+
+void vp10_set_quantizer(VP10_COMMON *cm, int q) {
+  // quantizer has to be reinitialized with vp10_init_quantizer() if any
+  // delta_q changes.
+  cm->base_qindex = q;
+  cm->y_dc_delta_q = 0;
+  cm->uv_dc_delta_q = 0;
+  cm->uv_ac_delta_q = 0;
+}
+
+// Table that converts 0-63 Q-range values passed in outside to the Qindex
+// range used internally.
+static const int quantizer_to_qindex[] = {
+  0,   4,   8,   12,  16,  20,  24,  28,  32,  36,  40,  44,  48,
+  52,  56,  60,  64,  68,  72,  76,  80,  84,  88,  92,  96,  100,
+  104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144, 148, 152,
+  156, 160, 164, 168, 172, 176, 180, 184, 188, 192, 196, 200, 204,
+  208, 212, 216, 220, 224, 228, 232, 236, 240, 244, 249, 255,
+};
+
+int vp10_quantizer_to_qindex(int quantizer) {
+  return quantizer_to_qindex[quantizer];
+}
+
+int vp10_qindex_to_quantizer(int qindex) {
+  int quantizer;
+
+  for (quantizer = 0; quantizer < 64; ++quantizer)
+    if (quantizer_to_qindex[quantizer] >= qindex) return quantizer;
+
+  return 63;
+}
diff --git a/av1/encoder/quantize.h b/av1/encoder/quantize.h
new file mode 100644
index 0000000..6b1e739
--- /dev/null
+++ b/av1/encoder/quantize.h
@@ -0,0 +1,185 @@
+/*
+ *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP10_ENCODER_QUANTIZE_H_
+#define VP10_ENCODER_QUANTIZE_H_
+
+#include "./vpx_config.h"
+#include "av1/common/quant_common.h"
+#include "av1/common/scan.h"
+#include "av1/encoder/block.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct QUANT_PARAM { int log_scale; } QUANT_PARAM;
+
+typedef void (*VP10_QUANT_FACADE)(const tran_low_t *coeff_ptr,
+                                  intptr_t n_coeffs, const MACROBLOCK_PLANE *p,
+                                  tran_low_t *qcoeff_ptr,
+                                  const MACROBLOCKD_PLANE *pd,
+                                  tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr,
+                                  const scan_order *sc,
+                                  const QUANT_PARAM *qparam);
+
+typedef struct {
+#if CONFIG_NEW_QUANT
+  DECLARE_ALIGNED(
+      16, tran_low_t,
+      y_cuml_bins_nuq[QUANT_PROFILES][QINDEX_RANGE][COEF_BANDS][NUQ_KNOTS]);
+  DECLARE_ALIGNED(
+      16, tran_low_t,
+      uv_cuml_bins_nuq[QUANT_PROFILES][QINDEX_RANGE][COEF_BANDS][NUQ_KNOTS]);
+#endif  // CONFIG_NEW_QUANT
+  // 0: dc 1: ac 2-8: ac repeated to SIMD width
+  DECLARE_ALIGNED(16, int16_t, y_quant[QINDEX_RANGE][8]);
+  DECLARE_ALIGNED(16, int16_t, y_quant_shift[QINDEX_RANGE][8]);
+  DECLARE_ALIGNED(16, int16_t, y_zbin[QINDEX_RANGE][8]);
+  DECLARE_ALIGNED(16, int16_t, y_round[QINDEX_RANGE][8]);
+
+  // TODO(jingning): in progress of re-working the quantization. will decide
+  // if we want to deprecate the current use of y_quant.
+  DECLARE_ALIGNED(16, int16_t, y_quant_fp[QINDEX_RANGE][8]);
+  DECLARE_ALIGNED(16, int16_t, uv_quant_fp[QINDEX_RANGE][8]);
+  DECLARE_ALIGNED(16, int16_t, y_round_fp[QINDEX_RANGE][8]);
+  DECLARE_ALIGNED(16, int16_t, uv_round_fp[QINDEX_RANGE][8]);
+
+  DECLARE_ALIGNED(16, int16_t, uv_quant[QINDEX_RANGE][8]);
+  DECLARE_ALIGNED(16, int16_t, uv_quant_shift[QINDEX_RANGE][8]);
+  DECLARE_ALIGNED(16, int16_t, uv_zbin[QINDEX_RANGE][8]);
+  DECLARE_ALIGNED(16, int16_t, uv_round[QINDEX_RANGE][8]);
+} QUANTS;
+
+struct VP10_COMP;
+struct VP10Common;
+
+void vp10_frame_init_quantizer(struct VP10_COMP *cpi);
+
+void vp10_init_plane_quantizers(const struct VP10_COMP *cpi, MACROBLOCK *x,
+                                int segment_id);
+
+void vp10_init_quantizer(struct VP10_COMP *cpi);
+
+void vp10_set_quantizer(struct VP10Common *cm, int q);
+
+int vp10_quantizer_to_qindex(int quantizer);
+
+int vp10_qindex_to_quantizer(int qindex);
+
+void vp10_quantize_skip(intptr_t n_coeffs, tran_low_t *qcoeff_ptr,
+                        tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr);
+
+void vp10_quantize_fp_facade(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+                             const MACROBLOCK_PLANE *p, tran_low_t *qcoeff_ptr,
+                             const MACROBLOCKD_PLANE *pd,
+                             tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr,
+                             const scan_order *sc, const QUANT_PARAM *qparam);
+
+void vp10_quantize_b_facade(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+                            const MACROBLOCK_PLANE *p, tran_low_t *qcoeff_ptr,
+                            const MACROBLOCKD_PLANE *pd,
+                            tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr,
+                            const scan_order *sc, const QUANT_PARAM *qparam);
+
+void vp10_quantize_dc_facade(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+                             const MACROBLOCK_PLANE *p, tran_low_t *qcoeff_ptr,
+                             const MACROBLOCKD_PLANE *pd,
+                             tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr,
+                             const scan_order *sc, const QUANT_PARAM *qparam);
+
+#if CONFIG_NEW_QUANT
+void quantize_dc_nuq(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+                     int skip_block, const int16_t quant,
+                     const int16_t quant_shift, const int16_t dequant,
+                     const tran_low_t *cuml_bins_ptr,
+                     const tran_low_t *dequant_val, tran_low_t *qcoeff_ptr,
+                     tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr);
+void quantize_dc_32x32_nuq(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+                           int skip_block, const int16_t quant,
+                           const int16_t quant_shift, const int16_t dequant,
+                           const tran_low_t *cuml_bins_ptr,
+                           const tran_low_t *dequant_val,
+                           tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
+                           uint16_t *eob_ptr);
+void quantize_dc_fp_nuq(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+                        int skip_block, const int16_t quant,
+                        const int16_t dequant, const tran_low_t *cuml_bins_ptr,
+                        const tran_low_t *dequant_val, tran_low_t *qcoeff_ptr,
+                        tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr);
+void quantize_dc_32x32_fp_nuq(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+                              int skip_block, const int16_t quant,
+                              const int16_t dequant,
+                              const tran_low_t *cuml_bins_ptr,
+                              const tran_low_t *dequant_val,
+                              tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
+                              uint16_t *eob_ptr);
+#endif  // CONFIG_NEW_QUANT
+
+#if CONFIG_VP9_HIGHBITDEPTH
+void vp10_highbd_quantize_fp_facade(
+    const tran_low_t *coeff_ptr, intptr_t n_coeffs, const MACROBLOCK_PLANE *p,
+    tran_low_t *qcoeff_ptr, const MACROBLOCKD_PLANE *pd,
+    tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const scan_order *sc,
+    const QUANT_PARAM *qparam);
+
+void vp10_highbd_quantize_b_facade(const tran_low_t *coeff_ptr,
+                                   intptr_t n_coeffs, const MACROBLOCK_PLANE *p,
+                                   tran_low_t *qcoeff_ptr,
+                                   const MACROBLOCKD_PLANE *pd,
+                                   tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr,
+                                   const scan_order *sc,
+                                   const QUANT_PARAM *qparam);
+
+void vp10_highbd_quantize_dc_facade(
+    const tran_low_t *coeff_ptr, intptr_t n_coeffs, const MACROBLOCK_PLANE *p,
+    tran_low_t *qcoeff_ptr, const MACROBLOCKD_PLANE *pd,
+    tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr, const scan_order *sc,
+    const QUANT_PARAM *qparam);
+
+void vp10_highbd_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs,
+                             int skip_block, const int16_t *round_ptr,
+                             const int16_t quant, tran_low_t *qcoeff_ptr,
+                             tran_low_t *dqcoeff_ptr, const int16_t dequant_ptr,
+                             uint16_t *eob_ptr, const int log_scale);
+#if CONFIG_NEW_QUANT
+void highbd_quantize_dc_nuq(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+                            int skip_block, const int16_t quant,
+                            const int16_t quant_shift, const int16_t dequant,
+                            const tran_low_t *cuml_bins_ptr,
+                            const tran_low_t *dequant_val,
+                            tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
+                            uint16_t *eob_ptr);
+void highbd_quantize_dc_32x32_nuq(
+    const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block,
+    const int16_t quant, const int16_t quant_shift, const int16_t dequant,
+    const tran_low_t *cuml_bins_ptr, const tran_low_t *dequant_val,
+    tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr);
+void highbd_quantize_dc_fp_nuq(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+                               int skip_block, const int16_t quant,
+                               const int16_t dequant,
+                               const tran_low_t *cuml_bins_ptr,
+                               const tran_low_t *dequant_val,
+                               tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
+                               uint16_t *eob_ptr);
+void highbd_quantize_dc_32x32_fp_nuq(
+    const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block,
+    const int16_t quant, const int16_t dequant, const tran_low_t *cuml_bins_ptr,
+    const tran_low_t *dequant_val, tran_low_t *qcoeff_ptr,
+    tran_low_t *dqcoeff_ptr, uint16_t *eob_ptr);
+
+#endif  // CONFIG_NEW_QUANT
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+#ifdef __cplusplus
+}  // extern "C"
+#endif
+
+#endif  // VP10_ENCODER_QUANTIZE_H_
diff --git a/av1/encoder/ransac.c b/av1/encoder/ransac.c
new file mode 100644
index 0000000..e925068
--- /dev/null
+++ b/av1/encoder/ransac.c
@@ -0,0 +1,940 @@
+/*
+ *   (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory.h>
+#include <math.h>
+#include <time.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <assert.h>
+
+#include "vp10/encoder/ransac.h"
+
+#define MAX_PARAMDIM 9
+#define MAX_MINPTS 4
+
+#define MAX_DEGENERATE_ITER 10
+#define MINPTS_MULTIPLIER 5
+
+// svdcmp
+// Adopted from Numerical Recipes in C
+
+static const double TINY_NEAR_ZERO = 1.0E-12;
+
+static inline double SIGN(double a, double b) {
+  return ((b) >= 0 ? fabs(a) : -fabs(a));
+}
+
+static inline double PYTHAG(double a, double b) {
+  double absa, absb, ct;
+  absa = fabs(a);
+  absb = fabs(b);
+
+  if (absa > absb) {
+    ct = absb / absa;
+    return absa * sqrt(1.0 + ct * ct);
+  } else {
+    ct = absa / absb;
+    return (absb == 0) ? 0 : absb * sqrt(1.0 + ct * ct);
+  }
+}
+
+int IMIN(int a, int b) { return (((a) < (b)) ? (a) : (b)); }
+
+int IMAX(int a, int b) { return (((a) < (b)) ? (b) : (a)); }
+
+void MultiplyMat(double *m1, double *m2, double *res, const int M1,
+                 const int N1, const int N2) {
+  int timesInner = N1;
+  int timesRows = M1;
+  int timesCols = N2;
+  double sum;
+
+  int row, col, inner;
+  for (row = 0; row < timesRows; ++row) {
+    for (col = 0; col < timesCols; ++col) {
+      sum = 0;
+      for (inner = 0; inner < timesInner; ++inner)
+        sum += m1[row * N1 + inner] * m2[inner * N2 + col];
+      *(res++) = sum;
+    }
+  }
+}
+
+static int svdcmp_(double **u, int m, int n, double w[], double **v) {
+  const int max_its = 30;
+  int flag, i, its, j, jj, k, l, nm;
+  double anorm, c, f, g, h, s, scale, x, y, z;
+  double *rv1 = (double *)malloc(sizeof(*rv1) * (n + 1));
+  g = scale = anorm = 0.0;
+  for (i = 0; i < n; i++) {
+    l = i + 1;
+    rv1[i] = scale * g;
+    g = s = scale = 0.0;
+    if (i < m) {
+      for (k = i; k < m; k++) scale += fabs(u[k][i]);
+      if (scale) {
+        for (k = i; k < m; k++) {
+          u[k][i] /= scale;
+          s += u[k][i] * u[k][i];
+        }
+        f = u[i][i];
+        g = -SIGN(sqrt(s), f);
+        h = f * g - s;
+        u[i][i] = f - g;
+        for (j = l; j < n; j++) {
+          for (s = 0.0, k = i; k < m; k++) s += u[k][i] * u[k][j];
+          f = s / h;
+          for (k = i; k < m; k++) u[k][j] += f * u[k][i];
+        }
+        for (k = i; k < m; k++) u[k][i] *= scale;
+      }
+    }
+    w[i] = scale * g;
+    g = s = scale = 0.0;
+    if (i < m && i != n - 1) {
+      for (k = l; k < n; k++) scale += fabs(u[i][k]);
+      if (scale) {
+        for (k = l; k < n; k++) {
+          u[i][k] /= scale;
+          s += u[i][k] * u[i][k];
+        }
+        f = u[i][l];
+        g = -SIGN(sqrt(s), f);
+        h = f * g - s;
+        u[i][l] = f - g;
+        for (k = l; k < n; k++) rv1[k] = u[i][k] / h;
+        for (j = l; j < m; j++) {
+          for (s = 0.0, k = l; k < n; k++) s += u[j][k] * u[i][k];
+          for (k = l; k < n; k++) u[j][k] += s * rv1[k];
+        }
+        for (k = l; k < n; k++) u[i][k] *= scale;
+      }
+    }
+    anorm = fmax(anorm, (fabs(w[i]) + fabs(rv1[i])));
+  }
+
+  for (i = n - 1; i >= 0; i--) {
+    if (i < n - 1) {
+      if (g) {
+        for (j = l; j < n; j++) v[j][i] = (u[i][j] / u[i][l]) / g;
+        for (j = l; j < n; j++) {
+          for (s = 0.0, k = l; k < n; k++) s += u[i][k] * v[k][j];
+          for (k = l; k < n; k++) v[k][j] += s * v[k][i];
+        }
+      }
+      for (j = l; j < n; j++) v[i][j] = v[j][i] = 0.0;
+    }
+    v[i][i] = 1.0;
+    g = rv1[i];
+    l = i;
+  }
+
+  for (i = IMIN(m, n) - 1; i >= 0; i--) {
+    l = i + 1;
+    g = w[i];
+    for (j = l; j < n; j++) u[i][j] = 0.0;
+    if (g) {
+      g = 1.0 / g;
+      for (j = l; j < n; j++) {
+        for (s = 0.0, k = l; k < m; k++) s += u[k][i] * u[k][j];
+        f = (s / u[i][i]) * g;
+        for (k = i; k < m; k++) u[k][j] += f * u[k][i];
+      }
+      for (j = i; j < m; j++) u[j][i] *= g;
+    } else {
+      for (j = i; j < m; j++) u[j][i] = 0.0;
+    }
+    ++u[i][i];
+  }
+  for (k = n - 1; k >= 0; k--) {
+    for (its = 0; its < max_its; its++) {
+      flag = 1;
+      for (l = k; l >= 0; l--) {
+        nm = l - 1;
+        if ((double)(fabs(rv1[l]) + anorm) == anorm || nm < 0) {
+          flag = 0;
+          break;
+        }
+        if ((double)(fabs(w[nm]) + anorm) == anorm) break;
+      }
+      if (flag) {
+        c = 0.0;
+        s = 1.0;
+        for (i = l; i <= k; i++) {
+          f = s * rv1[i];
+          rv1[i] = c * rv1[i];
+          if ((double)(fabs(f) + anorm) == anorm) break;
+          g = w[i];
+          h = PYTHAG(f, g);
+          w[i] = h;
+          h = 1.0 / h;
+          c = g * h;
+          s = -f * h;
+          for (j = 0; j < m; j++) {
+            y = u[j][nm];
+            z = u[j][i];
+            u[j][nm] = y * c + z * s;
+            u[j][i] = z * c - y * s;
+          }
+        }
+      }
+      z = w[k];
+      if (l == k) {
+        if (z < 0.0) {
+          w[k] = -z;
+          for (j = 0; j < n; j++) v[j][k] = -v[j][k];
+        }
+        break;
+      }
+      if (its == max_its - 1) {
+        return 1;
+      }
+      assert(k > 0);
+      x = w[l];
+      nm = k - 1;
+      y = w[nm];
+      g = rv1[nm];
+      h = rv1[k];
+      f = ((y - z) * (y + z) + (g - h) * (g + h)) / (2.0 * h * y);
+      g = PYTHAG(f, 1.0);
+      f = ((x - z) * (x + z) + h * ((y / (f + SIGN(g, f))) - h)) / x;
+      c = s = 1.0;
+      for (j = l; j <= nm; j++) {
+        i = j + 1;
+        g = rv1[i];
+        y = w[i];
+        h = s * g;
+        g = c * g;
+        z = PYTHAG(f, h);
+        rv1[j] = z;
+        c = f / z;
+        s = h / z;
+        f = x * c + g * s;
+        g = g * c - x * s;
+        h = y * s;
+        y *= c;
+        for (jj = 0; jj < n; jj++) {
+          x = v[jj][j];
+          z = v[jj][i];
+          v[jj][j] = x * c + z * s;
+          v[jj][i] = z * c - x * s;
+        }
+        z = PYTHAG(f, h);
+        w[j] = z;
+        if (z) {
+          z = 1.0 / z;
+          c = f * z;
+          s = h * z;
+        }
+        f = c * g + s * y;
+        x = c * y - s * g;
+        for (jj = 0; jj < m; jj++) {
+          y = u[jj][j];
+          z = u[jj][i];
+          u[jj][j] = y * c + z * s;
+          u[jj][i] = z * c - y * s;
+        }
+      }
+      rv1[l] = 0.0;
+      rv1[k] = f;
+      w[k] = x;
+    }
+  }
+  free(rv1);
+  return 0;
+}
+
+static int SVD(double *U, double *W, double *V, double *matx, int M, int N) {
+  // Assumes allocation for U is MxN
+  double **nrU, **nrV;
+  int problem, i;
+
+  nrU = (double **)malloc((M) * sizeof(*nrU));
+  nrV = (double **)malloc((N) * sizeof(*nrV));
+  problem = !(nrU && nrV);
+  if (!problem) {
+    problem = 0;
+    for (i = 0; i < M; i++) {
+      nrU[i] = &U[i * N];
+    }
+    for (i = 0; i < N; i++) {
+      nrV[i] = &V[i * N];
+    }
+  }
+  if (problem) {
+    return 1;
+  }
+
+  /* copy from given matx into nrU */
+  for (i = 0; i < M; i++) {
+    memcpy(&(nrU[i][0]), matx + N * i, N * sizeof(*matx));
+  }
+
+  /* HERE IT IS: do SVD */
+  if (svdcmp_(nrU, M, N, W, nrV)) {
+    return 1;
+  }
+
+  /* free Numerical Recipes arrays */
+  free(nrU);
+  free(nrV);
+
+  return 0;
+}
+
+int PseudoInverse(double *inv, double *matx, const int M, const int N) {
+  double *U, *W, *V, ans;
+  int i, j, k;
+  U = (double *)malloc(M * N * sizeof(*matx));
+  W = (double *)malloc(N * sizeof(*matx));
+  V = (double *)malloc(N * N * sizeof(*matx));
+
+  if (!(U && W && V)) {
+    return 1;
+  }
+  if (SVD(U, W, V, matx, M, N)) {
+    return 1;
+  }
+  for (i = 0; i < N; i++) {
+    if (fabs(W[i]) < TINY_NEAR_ZERO) {
+      return 1;
+    }
+  }
+
+  for (i = 0; i < N; i++) {
+    for (j = 0; j < M; j++) {
+      ans = 0;
+      for (k = 0; k < N; k++) {
+        ans += V[k + N * i] * U[k + N * j] / W[k];
+      }
+      inv[j + M * i] = ans;
+    }
+  }
+  free(U);
+  free(W);
+  free(V);
+  return 0;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// ransac
+typedef int (*isDegenerateType)(double *p);
+typedef void (*normalizeType)(double *p, int np, double *T);
+typedef void (*denormalizeType)(double *H, double *T1, double *T2);
+typedef int (*findTransformationType)(int points, double *points1,
+                                      double *points2, double *H);
+
+static int get_rand_indices(int npoints, int minpts, int *indices) {
+  int i, j;
+  unsigned int seed = (unsigned int)npoints;
+  int ptr = rand_r(&seed) % npoints;
+  if (minpts > npoints) return 0;
+  indices[0] = ptr;
+  ptr = (ptr == npoints - 1 ? 0 : ptr + 1);
+  i = 1;
+  while (i < minpts) {
+    int index = rand_r(&seed) % npoints;
+    while (index) {
+      ptr = (ptr == npoints - 1 ? 0 : ptr + 1);
+      for (j = 0; j < i; ++j) {
+        if (indices[j] == ptr) break;
+      }
+      if (j == i) index--;
+    }
+    indices[i++] = ptr;
+  }
+  return 1;
+}
+
+int ransac_(double *matched_points, int npoints, int *number_of_inliers,
+            int *best_inlier_mask, double *bestH, const int minpts,
+            const int paramdim, isDegenerateType isDegenerate,
+            normalizeType normalize, denormalizeType denormalize,
+            findTransformationType findTransformation,
+            ProjectPointsType projectpoints, TransformationType type) {
+  static const double INLIER_THRESHOLD_NORMALIZED = 0.1;
+  static const double INLIER_THRESHOLD_UNNORMALIZED = 1.0;
+  static const double PROBABILITY_REQUIRED = 0.9;
+  static const double EPS = 1e-12;
+  static const int MIN_TRIALS = 20;
+
+  const double inlier_threshold =
+      (normalize && denormalize ? INLIER_THRESHOLD_NORMALIZED
+                                : INLIER_THRESHOLD_UNNORMALIZED);
+  int N = 10000, trial_count = 0;
+  int i;
+  int ret_val = 0;
+
+  int max_inliers = 0;
+  double best_variance = 0.0;
+  double H[MAX_PARAMDIM];
+  WarpedMotionParams wm;
+  double points1[2 * MAX_MINPTS];
+  double points2[2 * MAX_MINPTS];
+  int indices[MAX_MINPTS];
+
+  double *best_inlier_set1;
+  double *best_inlier_set2;
+  double *inlier_set1;
+  double *inlier_set2;
+  double *corners1;
+  int *corners1_int;
+  double *corners2;
+  int *image1_coord;
+  int *inlier_mask;
+
+  double *cnp1, *cnp2;
+  double T1[9], T2[9];
+
+  // srand((unsigned)time(NULL)) ;
+  // better to make this deterministic for a given sequence for ease of testing
+  srand(npoints);
+
+  *number_of_inliers = 0;
+  if (npoints < minpts * MINPTS_MULTIPLIER) {
+    printf("Cannot find motion with %d matches\n", npoints);
+    return 1;
+  }
+
+  memset(&wm, 0, sizeof(wm));
+  best_inlier_set1 = (double *)malloc(sizeof(*best_inlier_set1) * npoints * 2);
+  best_inlier_set2 = (double *)malloc(sizeof(*best_inlier_set2) * npoints * 2);
+  inlier_set1 = (double *)malloc(sizeof(*inlier_set1) * npoints * 2);
+  inlier_set2 = (double *)malloc(sizeof(*inlier_set2) * npoints * 2);
+  corners1 = (double *)malloc(sizeof(*corners1) * npoints * 2);
+  corners1_int = (int *)malloc(sizeof(*corners1_int) * npoints * 2);
+  corners2 = (double *)malloc(sizeof(*corners2) * npoints * 2);
+  image1_coord = (int *)malloc(sizeof(*image1_coord) * npoints * 2);
+  inlier_mask = (int *)malloc(sizeof(*inlier_mask) * npoints);
+
+  for (cnp1 = corners1, cnp2 = corners2, i = 0; i < npoints; ++i) {
+    *(cnp1++) = *(matched_points++);
+    *(cnp1++) = *(matched_points++);
+    *(cnp2++) = *(matched_points++);
+    *(cnp2++) = *(matched_points++);
+  }
+  matched_points -= 4 * npoints;
+
+  if (normalize && denormalize) {
+    normalize(corners1, npoints, T1);
+    normalize(corners2, npoints, T2);
+  }
+
+  while (N > trial_count) {
+    int num_inliers = 0;
+    double sum_distance = 0.0;
+    double sum_distance_squared = 0.0;
+
+    int degenerate = 1;
+    int num_degenerate_iter = 0;
+    while (degenerate) {
+      num_degenerate_iter++;
+      if (!get_rand_indices(npoints, minpts, indices)) {
+        ret_val = 1;
+        goto finish_ransac;
+      }
+      i = 0;
+      while (i < minpts) {
+        int index = indices[i];
+        // add to list
+        points1[i * 2] = corners1[index * 2];
+        points1[i * 2 + 1] = corners1[index * 2 + 1];
+        points2[i * 2] = corners2[index * 2];
+        points2[i * 2 + 1] = corners2[index * 2 + 1];
+        i++;
+      }
+      degenerate = isDegenerate(points1);
+      if (num_degenerate_iter > MAX_DEGENERATE_ITER) {
+        ret_val = 1;
+        goto finish_ransac;
+      }
+    }
+
+    if (findTransformation(minpts, points1, points2, H)) {
+      trial_count++;
+      continue;
+    }
+
+    for (i = 0; i < npoints; ++i) {
+      corners1_int[2 * i] = (int)corners1[i * 2];
+      corners1_int[2 * i + 1] = (int)corners1[i * 2 + 1];
+    }
+
+    vp10_integerize_model(H, type, &wm);
+    projectpoints(wm.wmmat, corners1_int, image1_coord, npoints, 2, 2, 0, 0);
+
+    for (i = 0; i < npoints; ++i) {
+      double dx =
+          (image1_coord[i * 2] >> WARPEDPIXEL_PREC_BITS) - corners2[i * 2];
+      double dy = (image1_coord[i * 2 + 1] >> WARPEDPIXEL_PREC_BITS) -
+                  corners2[i * 2 + 1];
+      double distance = sqrt(dx * dx + dy * dy);
+
+      inlier_mask[i] = distance < inlier_threshold;
+      if (inlier_mask[i]) {
+        inlier_set1[num_inliers * 2] = corners1[i * 2];
+        inlier_set1[num_inliers * 2 + 1] = corners1[i * 2 + 1];
+        inlier_set2[num_inliers * 2] = corners2[i * 2];
+        inlier_set2[num_inliers * 2 + 1] = corners2[i * 2 + 1];
+        num_inliers++;
+        sum_distance += distance;
+        sum_distance_squared += distance * distance;
+      }
+    }
+
+    if (num_inliers >= max_inliers) {
+      double mean_distance = sum_distance / ((double)num_inliers);
+      double variance = sum_distance_squared / ((double)num_inliers - 1.0) -
+                        mean_distance * mean_distance * ((double)num_inliers) /
+                            ((double)num_inliers - 1.0);
+      if ((num_inliers > max_inliers) ||
+          (num_inliers == max_inliers && variance < best_variance)) {
+        best_variance = variance;
+        max_inliers = num_inliers;
+        memcpy(bestH, H, paramdim * sizeof(*bestH));
+        memcpy(best_inlier_set1, inlier_set1,
+               num_inliers * 2 * sizeof(*best_inlier_set1));
+        memcpy(best_inlier_set2, inlier_set2,
+               num_inliers * 2 * sizeof(*best_inlier_set2));
+        memcpy(best_inlier_mask, inlier_mask,
+               npoints * sizeof(*best_inlier_mask));
+
+        if (num_inliers > 0) {
+          double fracinliers = (double)num_inliers / (double)npoints;
+          double pNoOutliers = 1 - pow(fracinliers, minpts);
+          int temp;
+          pNoOutliers = fmax(EPS, pNoOutliers);
+          pNoOutliers = fmin(1 - EPS, pNoOutliers);
+          temp = (int)(log(1.0 - PROBABILITY_REQUIRED) / log(pNoOutliers));
+          if (temp > 0 && temp < N) {
+            N = IMAX(temp, MIN_TRIALS);
+          }
+        }
+      }
+    }
+    trial_count++;
+  }
+  findTransformation(max_inliers, best_inlier_set1, best_inlier_set2, bestH);
+  if (normalize && denormalize) {
+    denormalize(bestH, T1, T2);
+  }
+  *number_of_inliers = max_inliers;
+finish_ransac:
+  free(best_inlier_set1);
+  free(best_inlier_set2);
+  free(inlier_set1);
+  free(inlier_set2);
+  free(corners1);
+  free(corners2);
+  free(image1_coord);
+  free(inlier_mask);
+  return ret_val;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static void normalizeHomography(double *pts, int n, double *T) {
+  // Assume the points are 2d coordinates with scale = 1
+  double *p = pts;
+  double mean[2] = { 0, 0 };
+  double msqe = 0;
+  double scale;
+  int i;
+  for (i = 0; i < n; ++i, p += 2) {
+    mean[0] += p[0];
+    mean[1] += p[1];
+  }
+  mean[0] /= n;
+  mean[1] /= n;
+  for (p = pts, i = 0; i < n; ++i, p += 2) {
+    p[0] -= mean[0];
+    p[1] -= mean[1];
+    msqe += sqrt(p[0] * p[0] + p[1] * p[1]);
+  }
+  msqe /= n;
+  scale = sqrt(2) / msqe;
+  T[0] = scale;
+  T[1] = 0;
+  T[2] = -scale * mean[0];
+  T[3] = 0;
+  T[4] = scale;
+  T[5] = -scale * mean[1];
+  T[6] = 0;
+  T[7] = 0;
+  T[8] = 1;
+  for (p = pts, i = 0; i < n; ++i, p += 2) {
+    p[0] *= scale;
+    p[1] *= scale;
+  }
+}
+
+static void invnormalize_mat(double *T, double *iT) {
+  double is = 1.0 / T[0];
+  double m0 = -T[2] * is;
+  double m1 = -T[5] * is;
+  iT[0] = is;
+  iT[1] = 0;
+  iT[2] = m0;
+  iT[3] = 0;
+  iT[4] = is;
+  iT[5] = m1;
+  iT[6] = 0;
+  iT[7] = 0;
+  iT[8] = 1;
+}
+
+static void denormalizeHomography(double *H, double *T1, double *T2) {
+  double iT2[9];
+  double H2[9];
+  invnormalize_mat(T2, iT2);
+  MultiplyMat(H, T1, H2, 3, 3, 3);
+  MultiplyMat(iT2, H2, H, 3, 3, 3);
+}
+
+static void denormalizeAffine(double *H, double *T1, double *T2) {
+  double Ha[MAX_PARAMDIM];
+  Ha[0] = H[0];
+  Ha[1] = H[1];
+  Ha[2] = H[4];
+  Ha[3] = H[2];
+  Ha[4] = H[3];
+  Ha[5] = H[5];
+  Ha[6] = Ha[7] = 0;
+  Ha[8] = 1;
+  denormalizeHomography(Ha, T1, T2);
+  H[0] = Ha[2];
+  H[1] = Ha[5];
+  H[2] = Ha[0];
+  H[3] = Ha[1];
+  H[4] = Ha[3];
+  H[5] = Ha[4];
+}
+
+static void denormalizeRotZoom(double *H, double *T1, double *T2) {
+  double Ha[MAX_PARAMDIM];
+  Ha[0] = H[0];
+  Ha[1] = H[1];
+  Ha[2] = H[2];
+  Ha[3] = -H[1];
+  Ha[4] = H[0];
+  Ha[5] = H[3];
+  Ha[6] = Ha[7] = 0;
+  Ha[8] = 1;
+  denormalizeHomography(Ha, T1, T2);
+  H[0] = Ha[2];
+  H[1] = Ha[5];
+  H[2] = Ha[0];
+  H[3] = Ha[1];
+}
+
+static void denormalizeTranslation(double *H, double *T1, double *T2) {
+  double Ha[MAX_PARAMDIM];
+  Ha[0] = 1;
+  Ha[1] = 0;
+  Ha[2] = H[0];
+  Ha[3] = 0;
+  Ha[4] = 1;
+  Ha[5] = H[1];
+  Ha[6] = Ha[7] = 0;
+  Ha[8] = 1;
+  denormalizeHomography(Ha, T1, T2);
+  H[0] = Ha[2];
+  H[1] = Ha[5];
+}
+
+static int is_collinear3(double *p1, double *p2, double *p3) {
+  static const double collinear_eps = 1e-3;
+  const double v =
+      (p2[0] - p1[0]) * (p3[1] - p1[1]) - (p2[1] - p1[1]) * (p3[0] - p1[0]);
+  return fabs(v) < collinear_eps;
+}
+
+static int isDegenerateTranslation(double *p) {
+  return (p[0] - p[2]) * (p[0] - p[2]) + (p[1] - p[3]) * (p[1] - p[3]) <= 2;
+}
+
+static int isDegenerateAffine(double *p) {
+  return is_collinear3(p, p + 2, p + 4);
+}
+
+static int isDegenerateHomography(double *p) {
+  return is_collinear3(p, p + 2, p + 4) || is_collinear3(p, p + 2, p + 6) ||
+         is_collinear3(p, p + 4, p + 6) || is_collinear3(p + 2, p + 4, p + 6);
+}
+
+int findTranslation(const int np, double *pts1, double *pts2, double *mat) {
+  int i;
+  double sx, sy, dx, dy;
+  double sumx, sumy;
+
+  double T1[9], T2[9];
+  normalizeHomography(pts1, np, T1);
+  normalizeHomography(pts2, np, T2);
+
+  sumx = 0;
+  sumy = 0;
+  for (i = 0; i < np; ++i) {
+    dx = *(pts2++);
+    dy = *(pts2++);
+    sx = *(pts1++);
+    sy = *(pts1++);
+
+    sumx += dx - sx;
+    sumy += dy - sy;
+  }
+  mat[0] = sumx / np;
+  mat[1] = sumy / np;
+  denormalizeTranslation(mat, T1, T2);
+  return 0;
+}
+
+int findRotZoom(const int np, double *pts1, double *pts2, double *mat) {
+  const int np2 = np * 2;
+  double *a = (double *)malloc(sizeof(*a) * np2 * 9);
+  double *b = a + np2 * 4;
+  double *temp = b + np2;
+  int i;
+  double sx, sy, dx, dy;
+
+  double T1[9], T2[9];
+  normalizeHomography(pts1, np, T1);
+  normalizeHomography(pts2, np, T2);
+
+  for (i = 0; i < np; ++i) {
+    dx = *(pts2++);
+    dy = *(pts2++);
+    sx = *(pts1++);
+    sy = *(pts1++);
+
+    a[i * 2 * 4 + 0] = sx;
+    a[i * 2 * 4 + 1] = sy;
+    a[i * 2 * 4 + 2] = 1;
+    a[i * 2 * 4 + 3] = 0;
+    a[(i * 2 + 1) * 4 + 0] = sy;
+    a[(i * 2 + 1) * 4 + 1] = -sx;
+    a[(i * 2 + 1) * 4 + 2] = 0;
+    a[(i * 2 + 1) * 4 + 3] = 1;
+
+    b[2 * i] = dx;
+    b[2 * i + 1] = dy;
+  }
+  if (PseudoInverse(temp, a, np2, 4)) {
+    free(a);
+    return 1;
+  }
+  MultiplyMat(temp, b, mat, 4, np2, 1);
+  denormalizeRotZoom(mat, T1, T2);
+  free(a);
+  return 0;
+}
+
+int findAffine(const int np, double *pts1, double *pts2, double *mat) {
+  const int np2 = np * 2;
+  double *a = (double *)malloc(sizeof(*a) * np2 * 13);
+  double *b = a + np2 * 6;
+  double *temp = b + np2;
+  int i;
+  double sx, sy, dx, dy;
+
+  double T1[9], T2[9];
+  normalizeHomography(pts1, np, T1);
+  normalizeHomography(pts2, np, T2);
+
+  for (i = 0; i < np; ++i) {
+    dx = *(pts2++);
+    dy = *(pts2++);
+    sx = *(pts1++);
+    sy = *(pts1++);
+
+    a[i * 2 * 6 + 0] = sx;
+    a[i * 2 * 6 + 1] = sy;
+    a[i * 2 * 6 + 2] = 0;
+    a[i * 2 * 6 + 3] = 0;
+    a[i * 2 * 6 + 4] = 1;
+    a[i * 2 * 6 + 5] = 0;
+    a[(i * 2 + 1) * 6 + 0] = 0;
+    a[(i * 2 + 1) * 6 + 1] = 0;
+    a[(i * 2 + 1) * 6 + 2] = sx;
+    a[(i * 2 + 1) * 6 + 3] = sy;
+    a[(i * 2 + 1) * 6 + 4] = 0;
+    a[(i * 2 + 1) * 6 + 5] = 1;
+
+    b[2 * i] = dx;
+    b[2 * i + 1] = dy;
+  }
+  if (PseudoInverse(temp, a, np2, 6)) {
+    free(a);
+    return 1;
+  }
+  MultiplyMat(temp, b, mat, 6, np2, 1);
+  denormalizeAffine(mat, T1, T2);
+  free(a);
+  return 0;
+}
+
+int findHomography(const int np, double *pts1, double *pts2, double *mat) {
+  // Implemented from Peter Kovesi's normalized implementation
+  const int np3 = np * 3;
+  double *a = (double *)malloc(sizeof(*a) * np3 * 18);
+  double *U = a + np3 * 9;
+  double S[9], V[9 * 9];
+  int i, mini;
+  double sx, sy, dx, dy;
+  double T1[9], T2[9];
+
+  normalizeHomography(pts1, np, T1);
+  normalizeHomography(pts2, np, T2);
+
+  for (i = 0; i < np; ++i) {
+    dx = *(pts2++);
+    dy = *(pts2++);
+    sx = *(pts1++);
+    sy = *(pts1++);
+
+    a[i * 3 * 9 + 0] = a[i * 3 * 9 + 1] = a[i * 3 * 9 + 2] = 0;
+    a[i * 3 * 9 + 3] = -sx;
+    a[i * 3 * 9 + 4] = -sy;
+    a[i * 3 * 9 + 5] = -1;
+    a[i * 3 * 9 + 6] = dy * sx;
+    a[i * 3 * 9 + 7] = dy * sy;
+    a[i * 3 * 9 + 8] = dy;
+
+    a[(i * 3 + 1) * 9 + 0] = sx;
+    a[(i * 3 + 1) * 9 + 1] = sy;
+    a[(i * 3 + 1) * 9 + 2] = 1;
+    a[(i * 3 + 1) * 9 + 3] = a[(i * 3 + 1) * 9 + 4] = a[(i * 3 + 1) * 9 + 5] =
+        0;
+    a[(i * 3 + 1) * 9 + 6] = -dx * sx;
+    a[(i * 3 + 1) * 9 + 7] = -dx * sy;
+    a[(i * 3 + 1) * 9 + 8] = -dx;
+
+    a[(i * 3 + 2) * 9 + 0] = -dy * sx;
+    a[(i * 3 + 2) * 9 + 1] = -dy * sy;
+    a[(i * 3 + 2) * 9 + 2] = -dy;
+    a[(i * 3 + 2) * 9 + 3] = dx * sx;
+    a[(i * 3 + 2) * 9 + 4] = dx * sy;
+    a[(i * 3 + 2) * 9 + 5] = dx;
+    a[(i * 3 + 2) * 9 + 6] = a[(i * 3 + 2) * 9 + 7] = a[(i * 3 + 2) * 9 + 8] =
+        0;
+  }
+
+  if (SVD(U, S, V, a, np3, 9)) {
+    free(a);
+    return 1;
+  } else {
+    double minS = 1e12;
+    mini = -1;
+    for (i = 0; i < 9; ++i) {
+      if (S[i] < minS) {
+        minS = S[i];
+        mini = i;
+      }
+    }
+  }
+
+  for (i = 0; i < 9; i++) mat[i] = V[i * 9 + mini];
+  denormalizeHomography(mat, T1, T2);
+  free(a);
+  if (mat[8] == 0.0) {
+    return 1;
+  }
+  return 0;
+}
+
+int findHomographyScale1(const int np, double *pts1, double *pts2,
+                         double *mat) {
+  // This implementation assumes h33 = 1, but does not seem to give good results
+  const int np2 = np * 2;
+  double *a = (double *)malloc(sizeof(*a) * np2 * 17);
+  double *b = a + np2 * 8;
+  double *temp = b + np2;
+  int i, j;
+  double sx, sy, dx, dy;
+  double T1[9], T2[9];
+
+  normalizeHomography(pts1, np, T1);
+  normalizeHomography(pts2, np, T2);
+
+  for (i = 0, j = np; i < np; ++i, ++j) {
+    dx = *(pts2++);
+    dy = *(pts2++);
+    sx = *(pts1++);
+    sy = *(pts1++);
+    a[i * 8 + 0] = a[j * 8 + 3] = sx;
+    a[i * 8 + 1] = a[j * 8 + 4] = sy;
+    a[i * 8 + 2] = a[j * 8 + 5] = 1;
+    a[i * 8 + 3] = a[i * 8 + 4] = a[i * 8 + 5] = a[j * 8 + 0] = a[j * 8 + 1] =
+        a[j * 8 + 2] = 0;
+    a[i * 8 + 6] = -dx * sx;
+    a[i * 8 + 7] = -dx * sy;
+    a[j * 8 + 6] = -dy * sx;
+    a[j * 8 + 7] = -dy * sy;
+    b[i] = dx;
+    b[j] = dy;
+  }
+
+  if (PseudoInverse(temp, a, np2, 8)) {
+    free(a);
+    return 1;
+  }
+  MultiplyMat(temp, b, &*mat, 8, np2, 1);
+  mat[8] = 1;
+
+  denormalizeHomography(mat, T1, T2);
+  free(a);
+  return 0;
+}
+
+int ransacTranslation(double *matched_points, int npoints,
+                      int *number_of_inliers, int *best_inlier_mask,
+                      double *bestH) {
+  return ransac_(matched_points, npoints, number_of_inliers, best_inlier_mask,
+                 bestH, 3, 2, isDegenerateTranslation,
+                 NULL,  // normalizeHomography,
+                 NULL,  // denormalizeRotZoom,
+                 findTranslation, projectPointsTranslation, TRANSLATION);
+}
+
+int ransacRotZoom(double *matched_points, int npoints, int *number_of_inliers,
+                  int *best_inlier_mask, double *bestH) {
+  return ransac_(matched_points, npoints, number_of_inliers, best_inlier_mask,
+                 bestH, 3, 4, isDegenerateAffine,
+                 NULL,  // normalizeHomography,
+                 NULL,  // denormalizeRotZoom,
+                 findRotZoom, projectPointsRotZoom, ROTZOOM);
+}
+
+int ransacAffine(double *matched_points, int npoints, int *number_of_inliers,
+                 int *best_inlier_mask, double *bestH) {
+  return ransac_(matched_points, npoints, number_of_inliers, best_inlier_mask,
+                 bestH, 3, 6, isDegenerateAffine,
+                 NULL,  // normalizeHomography,
+                 NULL,  // denormalizeAffine,
+                 findAffine, projectPointsAffine, AFFINE);
+}
+
+int ransacHomography(double *matched_points, int npoints,
+                     int *number_of_inliers, int *best_inlier_mask,
+                     double *bestH) {
+  int result = ransac_(matched_points, npoints, number_of_inliers,
+                       best_inlier_mask, bestH, 4, 8, isDegenerateHomography,
+                       NULL,  // normalizeHomography,
+                       NULL,  // denormalizeHomography,
+                       findHomography, projectPointsHomography, HOMOGRAPHY);
+  if (!result) {
+    // normalize so that H33 = 1
+    int i;
+    double m = 1.0 / bestH[8];
+    for (i = 0; i < 8; ++i) bestH[i] *= m;
+    bestH[8] = 1.0;
+  }
+  return result;
+}
diff --git a/av1/encoder/ransac.h b/av1/encoder/ransac.h
new file mode 100644
index 0000000..0b14ecf
--- /dev/null
+++ b/av1/encoder/ransac.h
@@ -0,0 +1,37 @@
+/*
+ *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP10_ENCODER_RANSAC_H_
+#define VP10_ENCODER_RANSAC_H_
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <math.h>
+#include <memory.h>
+
+#include "vp10/common/warped_motion.h"
+
+typedef int (*RansacType)(double *matched_points, int npoints,
+                          int *number_of_inliers, int *best_inlier_mask,
+                          double *bestH);
+
+/* Each of these functions fits a motion model from a set of
+corresponding points in 2 frames using RANSAC.*/
+int ransacHomography(double *matched_points, int npoints,
+                     int *number_of_inliers, int *best_inlier_indices,
+                     double *bestH);
+int ransacAffine(double *matched_points, int npoints, int *number_of_inliers,
+                 int *best_inlier_indices, double *bestH);
+int ransacRotZoom(double *matched_points, int npoints, int *number_of_inliers,
+                  int *best_inlier_indices, double *bestH);
+int ransacTranslation(double *matched_points, int npoints,
+                      int *number_of_inliers, int *best_inlier_indices,
+                      double *bestH);
+#endif  // VP10_ENCODER_RANSAC_H
diff --git a/av1/encoder/ratectrl.c b/av1/encoder/ratectrl.c
new file mode 100644
index 0000000..ddd5762
--- /dev/null
+++ b/av1/encoder/ratectrl.c
@@ -0,0 +1,1757 @@
+/*
+ *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include <limits.h>
+#include <math.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_mem/vpx_mem.h"
+#include "aom_ports/mem.h"
+#include "aom_ports/system_state.h"
+
+#include "av1/common/alloccommon.h"
+#include "av1/encoder/aq_cyclicrefresh.h"
+#include "av1/common/common.h"
+#include "av1/common/entropymode.h"
+#include "av1/common/quant_common.h"
+#include "av1/common/seg_common.h"
+
+#include "av1/encoder/encodemv.h"
+#include "av1/encoder/ratectrl.h"
+
+// Max rate target for 1080P and below encodes under normal circumstances
+// (1920 * 1080 / (16 * 16)) * MAX_MB_RATE bits per MB
+#define MAX_MB_RATE 250
+#define MAXRATE_1080P 2025000
+
+#define DEFAULT_KF_BOOST 2000
+#define DEFAULT_GF_BOOST 2000
+
+#define MIN_BPB_FACTOR 0.005
+#define MAX_BPB_FACTOR 50
+
+#define FRAME_OVERHEAD_BITS 200
+#if CONFIG_VP9_HIGHBITDEPTH
+#define ASSIGN_MINQ_TABLE(bit_depth, name)                   \
+  do {                                                       \
+    switch (bit_depth) {                                     \
+      case VPX_BITS_8: name = name##_8; break;               \
+      case VPX_BITS_10: name = name##_10; break;             \
+      case VPX_BITS_12: name = name##_12; break;             \
+      default:                                               \
+        assert(0 &&                                          \
+               "bit_depth should be VPX_BITS_8, VPX_BITS_10" \
+               " or VPX_BITS_12");                           \
+        name = NULL;                                         \
+    }                                                        \
+  } while (0)
+#else
+#define ASSIGN_MINQ_TABLE(bit_depth, name) \
+  do {                                     \
+    (void) bit_depth;                      \
+    name = name##_8;                       \
+  } while (0)
+#endif
+
+// Tables relating active max Q to active min Q
+static int kf_low_motion_minq_8[QINDEX_RANGE];
+static int kf_high_motion_minq_8[QINDEX_RANGE];
+static int arfgf_low_motion_minq_8[QINDEX_RANGE];
+static int arfgf_high_motion_minq_8[QINDEX_RANGE];
+static int inter_minq_8[QINDEX_RANGE];
+static int rtc_minq_8[QINDEX_RANGE];
+
+#if CONFIG_VP9_HIGHBITDEPTH
+static int kf_low_motion_minq_10[QINDEX_RANGE];
+static int kf_high_motion_minq_10[QINDEX_RANGE];
+static int arfgf_low_motion_minq_10[QINDEX_RANGE];
+static int arfgf_high_motion_minq_10[QINDEX_RANGE];
+static int inter_minq_10[QINDEX_RANGE];
+static int rtc_minq_10[QINDEX_RANGE];
+static int kf_low_motion_minq_12[QINDEX_RANGE];
+static int kf_high_motion_minq_12[QINDEX_RANGE];
+static int arfgf_low_motion_minq_12[QINDEX_RANGE];
+static int arfgf_high_motion_minq_12[QINDEX_RANGE];
+static int inter_minq_12[QINDEX_RANGE];
+static int rtc_minq_12[QINDEX_RANGE];
+#endif
+
+static int gf_high = 2000;
+static int gf_low = 400;
+static int kf_high = 5000;
+static int kf_low = 400;
+
+// Functions to compute the active minq lookup table entries based on a
+// formulaic approach to facilitate easier adjustment of the Q tables.
+// The formulae were derived from computing a 3rd order polynomial best
+// fit to the original data (after plotting real maxq vs minq (not q index))
+static int get_minq_index(double maxq, double x3, double x2, double x1,
+                          vpx_bit_depth_t bit_depth) {
+  int i;
+  const double minqtarget = VPXMIN(((x3 * maxq + x2) * maxq + x1) * maxq, maxq);
+
+  // Special case handling to deal with the step from q2.0
+  // down to lossless mode represented by q 1.0.
+  if (minqtarget <= 2.0) return 0;
+
+  for (i = 0; i < QINDEX_RANGE; i++) {
+    if (minqtarget <= vp10_convert_qindex_to_q(i, bit_depth)) return i;
+  }
+
+  return QINDEX_RANGE - 1;
+}
+
+static void init_minq_luts(int *kf_low_m, int *kf_high_m, int *arfgf_low,
+                           int *arfgf_high, int *inter, int *rtc,
+                           vpx_bit_depth_t bit_depth) {
+  int i;
+  for (i = 0; i < QINDEX_RANGE; i++) {
+    const double maxq = vp10_convert_qindex_to_q(i, bit_depth);
+    kf_low_m[i] = get_minq_index(maxq, 0.000001, -0.0004, 0.150, bit_depth);
+    kf_high_m[i] = get_minq_index(maxq, 0.0000021, -0.00125, 0.55, bit_depth);
+    arfgf_low[i] = get_minq_index(maxq, 0.0000015, -0.0009, 0.30, bit_depth);
+    arfgf_high[i] = get_minq_index(maxq, 0.0000021, -0.00125, 0.55, bit_depth);
+    inter[i] = get_minq_index(maxq, 0.00000271, -0.00113, 0.90, bit_depth);
+    rtc[i] = get_minq_index(maxq, 0.00000271, -0.00113, 0.70, bit_depth);
+  }
+}
+
+void vp10_rc_init_minq_luts(void) {
+  init_minq_luts(kf_low_motion_minq_8, kf_high_motion_minq_8,
+                 arfgf_low_motion_minq_8, arfgf_high_motion_minq_8,
+                 inter_minq_8, rtc_minq_8, VPX_BITS_8);
+#if CONFIG_VP9_HIGHBITDEPTH
+  init_minq_luts(kf_low_motion_minq_10, kf_high_motion_minq_10,
+                 arfgf_low_motion_minq_10, arfgf_high_motion_minq_10,
+                 inter_minq_10, rtc_minq_10, VPX_BITS_10);
+  init_minq_luts(kf_low_motion_minq_12, kf_high_motion_minq_12,
+                 arfgf_low_motion_minq_12, arfgf_high_motion_minq_12,
+                 inter_minq_12, rtc_minq_12, VPX_BITS_12);
+#endif
+}
+
+// These functions use formulaic calculations to make playing with the
+// quantizer tables easier. If necessary they can be replaced by lookup
+// tables if and when things settle down in the experimental bitstream
+double vp10_convert_qindex_to_q(int qindex, vpx_bit_depth_t bit_depth) {
+// Convert the index to a real Q value (scaled down to match old Q values)
+#if CONFIG_VP9_HIGHBITDEPTH
+  switch (bit_depth) {
+    case VPX_BITS_8: return vp10_ac_quant(qindex, 0, bit_depth) / 4.0;
+    case VPX_BITS_10: return vp10_ac_quant(qindex, 0, bit_depth) / 16.0;
+    case VPX_BITS_12: return vp10_ac_quant(qindex, 0, bit_depth) / 64.0;
+    default:
+      assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12");
+      return -1.0;
+  }
+#else
+  return vp10_ac_quant(qindex, 0, bit_depth) / 4.0;
+#endif
+}
+
+int vp10_rc_bits_per_mb(FRAME_TYPE frame_type, int qindex,
+                        double correction_factor, vpx_bit_depth_t bit_depth) {
+  const double q = vp10_convert_qindex_to_q(qindex, bit_depth);
+  int enumerator = frame_type == KEY_FRAME ? 2700000 : 1800000;
+
+  assert(correction_factor <= MAX_BPB_FACTOR &&
+         correction_factor >= MIN_BPB_FACTOR);
+
+  // q based adjustment to baseline enumerator
+  enumerator += (int)(enumerator * q) >> 12;
+  return (int)(enumerator * correction_factor / q);
+}
+
+int vp10_estimate_bits_at_q(FRAME_TYPE frame_type, int q, int mbs,
+                            double correction_factor,
+                            vpx_bit_depth_t bit_depth) {
+  const int bpm =
+      (int)(vp10_rc_bits_per_mb(frame_type, q, correction_factor, bit_depth));
+  return VPXMAX(FRAME_OVERHEAD_BITS,
+                (int)((uint64_t)bpm * mbs) >> BPER_MB_NORMBITS);
+}
+
+int vp10_rc_clamp_pframe_target_size(const VP10_COMP *const cpi, int target) {
+  const RATE_CONTROL *rc = &cpi->rc;
+  const VP10EncoderConfig *oxcf = &cpi->oxcf;
+  const int min_frame_target =
+      VPXMAX(rc->min_frame_bandwidth, rc->avg_frame_bandwidth >> 5);
+// Clip the frame target to the minimum setup value.
+#if CONFIG_EXT_REFS
+  if (cpi->rc.is_src_frame_alt_ref) {
+#else
+  if (cpi->refresh_golden_frame && rc->is_src_frame_alt_ref) {
+#endif
+    // If there is an active ARF at this location use the minimum
+    // bits on this frame even if it is a constructed arf.
+    // The active maximum quantizer insures that an appropriate
+    // number of bits will be spent if needed for constructed ARFs.
+    target = min_frame_target;
+  } else if (target < min_frame_target) {
+    target = min_frame_target;
+  }
+
+  // Clip the frame target to the maximum allowed value.
+  if (target > rc->max_frame_bandwidth) target = rc->max_frame_bandwidth;
+  if (oxcf->rc_max_inter_bitrate_pct) {
+    const int max_rate =
+        rc->avg_frame_bandwidth * oxcf->rc_max_inter_bitrate_pct / 100;
+    target = VPXMIN(target, max_rate);
+  }
+
+  return target;
+}
+
+int vp10_rc_clamp_iframe_target_size(const VP10_COMP *const cpi, int target) {
+  const RATE_CONTROL *rc = &cpi->rc;
+  const VP10EncoderConfig *oxcf = &cpi->oxcf;
+  if (oxcf->rc_max_intra_bitrate_pct) {
+    const int max_rate =
+        rc->avg_frame_bandwidth * oxcf->rc_max_intra_bitrate_pct / 100;
+    target = VPXMIN(target, max_rate);
+  }
+  if (target > rc->max_frame_bandwidth) target = rc->max_frame_bandwidth;
+  return target;
+}
+
+// Update the buffer level: leaky bucket model.
+static void update_buffer_level(VP10_COMP *cpi, int encoded_frame_size) {
+  const VP10_COMMON *const cm = &cpi->common;
+  RATE_CONTROL *const rc = &cpi->rc;
+
+// Non-viewable frames are a special case and are treated as pure overhead.
+#if CONFIG_EXT_REFS
+  // TODO(zoeliu): To further explore whether we should treat BWDREF_FRAME
+  //               differently, since it is a no-show frame.
+  if (!cm->show_frame && !rc->is_bwd_ref_frame)
+#else
+  if (!cm->show_frame)
+#endif  // CONFIG_EXT_REFS
+    rc->bits_off_target -= encoded_frame_size;
+  else
+    rc->bits_off_target += rc->avg_frame_bandwidth - encoded_frame_size;
+
+  // Clip the buffer level to the maximum specified buffer size.
+  rc->bits_off_target = VPXMIN(rc->bits_off_target, rc->maximum_buffer_size);
+  rc->buffer_level = rc->bits_off_target;
+}
+
+int vp10_rc_get_default_min_gf_interval(int width, int height,
+                                        double framerate) {
+  // Assume we do not need any constraint lower than 4K 20 fps
+  static const double factor_safe = 3840 * 2160 * 20.0;
+  const double factor = width * height * framerate;
+  const int default_interval =
+      clamp((int)(framerate * 0.125), MIN_GF_INTERVAL, MAX_GF_INTERVAL);
+
+  if (factor <= factor_safe)
+    return default_interval;
+  else
+    return VPXMAX(default_interval,
+                  (int)(MIN_GF_INTERVAL * factor / factor_safe + 0.5));
+  // Note this logic makes:
+  // 4K24: 5
+  // 4K30: 6
+  // 4K60: 12
+}
+
+int vp10_rc_get_default_max_gf_interval(double framerate, int min_gf_interval) {
+  int interval = VPXMIN(MAX_GF_INTERVAL, (int)(framerate * 0.75));
+  interval += (interval & 0x01);  // Round to even value
+  return VPXMAX(interval, min_gf_interval);
+}
+
+void vp10_rc_init(const VP10EncoderConfig *oxcf, int pass, RATE_CONTROL *rc) {
+  int i;
+
+  if (pass == 0 && oxcf->rc_mode == VPX_CBR) {
+    rc->avg_frame_qindex[KEY_FRAME] = oxcf->worst_allowed_q;
+    rc->avg_frame_qindex[INTER_FRAME] = oxcf->worst_allowed_q;
+  } else {
+    rc->avg_frame_qindex[KEY_FRAME] =
+        (oxcf->worst_allowed_q + oxcf->best_allowed_q) / 2;
+    rc->avg_frame_qindex[INTER_FRAME] =
+        (oxcf->worst_allowed_q + oxcf->best_allowed_q) / 2;
+  }
+
+  rc->last_q[KEY_FRAME] = oxcf->best_allowed_q;
+  rc->last_q[INTER_FRAME] = oxcf->worst_allowed_q;
+
+  rc->buffer_level = rc->starting_buffer_level;
+  rc->bits_off_target = rc->starting_buffer_level;
+
+  rc->rolling_target_bits = rc->avg_frame_bandwidth;
+  rc->rolling_actual_bits = rc->avg_frame_bandwidth;
+  rc->long_rolling_target_bits = rc->avg_frame_bandwidth;
+  rc->long_rolling_actual_bits = rc->avg_frame_bandwidth;
+
+  rc->total_actual_bits = 0;
+  rc->total_target_bits = 0;
+  rc->total_target_vs_actual = 0;
+
+  rc->frames_since_key = 8;  // Sensible default for first frame.
+  rc->this_key_frame_forced = 0;
+  rc->next_key_frame_forced = 0;
+  rc->source_alt_ref_pending = 0;
+  rc->source_alt_ref_active = 0;
+
+  rc->frames_till_gf_update_due = 0;
+  rc->ni_av_qi = oxcf->worst_allowed_q;
+  rc->ni_tot_qi = 0;
+  rc->ni_frames = 0;
+
+  rc->tot_q = 0.0;
+  rc->avg_q = vp10_convert_qindex_to_q(oxcf->worst_allowed_q, oxcf->bit_depth);
+
+  for (i = 0; i < RATE_FACTOR_LEVELS; ++i) {
+    rc->rate_correction_factors[i] = 1.0;
+  }
+
+  rc->min_gf_interval = oxcf->min_gf_interval;
+  rc->max_gf_interval = oxcf->max_gf_interval;
+  if (rc->min_gf_interval == 0)
+    rc->min_gf_interval = vp10_rc_get_default_min_gf_interval(
+        oxcf->width, oxcf->height, oxcf->init_framerate);
+  if (rc->max_gf_interval == 0)
+    rc->max_gf_interval = vp10_rc_get_default_max_gf_interval(
+        oxcf->init_framerate, rc->min_gf_interval);
+  rc->baseline_gf_interval = (rc->min_gf_interval + rc->max_gf_interval) / 2;
+}
+
+int vp10_rc_drop_frame(VP10_COMP *cpi) {
+  const VP10EncoderConfig *oxcf = &cpi->oxcf;
+  RATE_CONTROL *const rc = &cpi->rc;
+
+  if (!oxcf->drop_frames_water_mark) {
+    return 0;
+  } else {
+    if (rc->buffer_level < 0) {
+      // Always drop if buffer is below 0.
+      return 1;
+    } else {
+      // If buffer is below drop_mark, for now just drop every other frame
+      // (starting with the next frame) until it increases back over drop_mark.
+      int drop_mark =
+          (int)(oxcf->drop_frames_water_mark * rc->optimal_buffer_level / 100);
+      if ((rc->buffer_level > drop_mark) && (rc->decimation_factor > 0)) {
+        --rc->decimation_factor;
+      } else if (rc->buffer_level <= drop_mark && rc->decimation_factor == 0) {
+        rc->decimation_factor = 1;
+      }
+      if (rc->decimation_factor > 0) {
+        if (rc->decimation_count > 0) {
+          --rc->decimation_count;
+          return 1;
+        } else {
+          rc->decimation_count = rc->decimation_factor;
+          return 0;
+        }
+      } else {
+        rc->decimation_count = 0;
+        return 0;
+      }
+    }
+  }
+}
+
+static double get_rate_correction_factor(const VP10_COMP *cpi) {
+  const RATE_CONTROL *const rc = &cpi->rc;
+  double rcf;
+
+  if (cpi->common.frame_type == KEY_FRAME) {
+    rcf = rc->rate_correction_factors[KF_STD];
+  } else if (cpi->oxcf.pass == 2) {
+    RATE_FACTOR_LEVEL rf_lvl =
+        cpi->twopass.gf_group.rf_level[cpi->twopass.gf_group.index];
+    rcf = rc->rate_correction_factors[rf_lvl];
+  } else {
+    if ((cpi->refresh_alt_ref_frame || cpi->refresh_golden_frame) &&
+        !rc->is_src_frame_alt_ref &&
+        (cpi->oxcf.rc_mode != VPX_CBR || cpi->oxcf.gf_cbr_boost_pct > 20))
+      rcf = rc->rate_correction_factors[GF_ARF_STD];
+    else
+      rcf = rc->rate_correction_factors[INTER_NORMAL];
+  }
+  rcf *= rcf_mult[rc->frame_size_selector];
+  return fclamp(rcf, MIN_BPB_FACTOR, MAX_BPB_FACTOR);
+}
+
+static void set_rate_correction_factor(VP10_COMP *cpi, double factor) {
+  RATE_CONTROL *const rc = &cpi->rc;
+
+  // Normalize RCF to account for the size-dependent scaling factor.
+  factor /= rcf_mult[cpi->rc.frame_size_selector];
+
+  factor = fclamp(factor, MIN_BPB_FACTOR, MAX_BPB_FACTOR);
+
+  if (cpi->common.frame_type == KEY_FRAME) {
+    rc->rate_correction_factors[KF_STD] = factor;
+  } else if (cpi->oxcf.pass == 2) {
+    RATE_FACTOR_LEVEL rf_lvl =
+        cpi->twopass.gf_group.rf_level[cpi->twopass.gf_group.index];
+    rc->rate_correction_factors[rf_lvl] = factor;
+  } else {
+    if ((cpi->refresh_alt_ref_frame || cpi->refresh_golden_frame) &&
+        !rc->is_src_frame_alt_ref &&
+        (cpi->oxcf.rc_mode != VPX_CBR || cpi->oxcf.gf_cbr_boost_pct > 20))
+      rc->rate_correction_factors[GF_ARF_STD] = factor;
+    else
+      rc->rate_correction_factors[INTER_NORMAL] = factor;
+  }
+}
+
+void vp10_rc_update_rate_correction_factors(VP10_COMP *cpi) {
+  const VP10_COMMON *const cm = &cpi->common;
+  int correction_factor = 100;
+  double rate_correction_factor = get_rate_correction_factor(cpi);
+  double adjustment_limit;
+
+  int projected_size_based_on_q = 0;
+
+  // Do not update the rate factors for arf overlay frames.
+  if (cpi->rc.is_src_frame_alt_ref) return;
+
+  // Clear down mmx registers to allow floating point in what follows
+  vpx_clear_system_state();
+
+  // Work out how big we would have expected the frame to be at this Q given
+  // the current correction factor.
+  // Stay in double to avoid int overflow when values are large
+  if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cpi->common.seg.enabled) {
+    projected_size_based_on_q =
+        vp10_cyclic_refresh_estimate_bits_at_q(cpi, rate_correction_factor);
+  } else {
+    projected_size_based_on_q =
+        vp10_estimate_bits_at_q(cpi->common.frame_type, cm->base_qindex,
+                                cm->MBs, rate_correction_factor, cm->bit_depth);
+  }
+  // Work out a size correction factor.
+  if (projected_size_based_on_q > FRAME_OVERHEAD_BITS)
+    correction_factor = (int)((100 * (int64_t)cpi->rc.projected_frame_size) /
+                              projected_size_based_on_q);
+
+  // More heavily damped adjustment used if we have been oscillating either side
+  // of target.
+  adjustment_limit =
+      0.25 + 0.5 * VPXMIN(1, fabs(log10(0.01 * correction_factor)));
+
+  cpi->rc.q_2_frame = cpi->rc.q_1_frame;
+  cpi->rc.q_1_frame = cm->base_qindex;
+  cpi->rc.rc_2_frame = cpi->rc.rc_1_frame;
+  if (correction_factor > 110)
+    cpi->rc.rc_1_frame = -1;
+  else if (correction_factor < 90)
+    cpi->rc.rc_1_frame = 1;
+  else
+    cpi->rc.rc_1_frame = 0;
+
+  if (correction_factor > 102) {
+    // We are not already at the worst allowable quality
+    correction_factor =
+        (int)(100 + ((correction_factor - 100) * adjustment_limit));
+    rate_correction_factor = (rate_correction_factor * correction_factor) / 100;
+    // Keep rate_correction_factor within limits
+    if (rate_correction_factor > MAX_BPB_FACTOR)
+      rate_correction_factor = MAX_BPB_FACTOR;
+  } else if (correction_factor < 99) {
+    // We are not already at the best allowable quality
+    correction_factor =
+        (int)(100 - ((100 - correction_factor) * adjustment_limit));
+    rate_correction_factor = (rate_correction_factor * correction_factor) / 100;
+
+    // Keep rate_correction_factor within limits
+    if (rate_correction_factor < MIN_BPB_FACTOR)
+      rate_correction_factor = MIN_BPB_FACTOR;
+  }
+
+  set_rate_correction_factor(cpi, rate_correction_factor);
+}
+
+int vp10_rc_regulate_q(const VP10_COMP *cpi, int target_bits_per_frame,
+                       int active_best_quality, int active_worst_quality) {
+  const VP10_COMMON *const cm = &cpi->common;
+  int q = active_worst_quality;
+  int last_error = INT_MAX;
+  int i, target_bits_per_mb, bits_per_mb_at_this_q;
+  const double correction_factor = get_rate_correction_factor(cpi);
+
+  // Calculate required scaling factor based on target frame size and size of
+  // frame produced using previous Q.
+  target_bits_per_mb =
+      ((uint64_t)target_bits_per_frame << BPER_MB_NORMBITS) / cm->MBs;
+
+  i = active_best_quality;
+
+  do {
+    if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->seg.enabled) {
+      bits_per_mb_at_this_q =
+          (int)vp10_cyclic_refresh_rc_bits_per_mb(cpi, i, correction_factor);
+    } else {
+      bits_per_mb_at_this_q = (int)vp10_rc_bits_per_mb(
+          cm->frame_type, i, correction_factor, cm->bit_depth);
+    }
+
+    if (bits_per_mb_at_this_q <= target_bits_per_mb) {
+      if ((target_bits_per_mb - bits_per_mb_at_this_q) <= last_error)
+        q = i;
+      else
+        q = i - 1;
+
+      break;
+    } else {
+      last_error = bits_per_mb_at_this_q - target_bits_per_mb;
+    }
+  } while (++i <= active_worst_quality);
+
+  // In CBR mode, this makes sure q is between oscillating Qs to prevent
+  // resonance.
+  if (cpi->oxcf.rc_mode == VPX_CBR &&
+      (cpi->rc.rc_1_frame * cpi->rc.rc_2_frame == -1) &&
+      cpi->rc.q_1_frame != cpi->rc.q_2_frame) {
+    q = clamp(q, VPXMIN(cpi->rc.q_1_frame, cpi->rc.q_2_frame),
+              VPXMAX(cpi->rc.q_1_frame, cpi->rc.q_2_frame));
+  }
+  return q;
+}
+
+static int get_active_quality(int q, int gfu_boost, int low, int high,
+                              int *low_motion_minq, int *high_motion_minq) {
+  if (gfu_boost > high) {
+    return low_motion_minq[q];
+  } else if (gfu_boost < low) {
+    return high_motion_minq[q];
+  } else {
+    const int gap = high - low;
+    const int offset = high - gfu_boost;
+    const int qdiff = high_motion_minq[q] - low_motion_minq[q];
+    const int adjustment = ((offset * qdiff) + (gap >> 1)) / gap;
+    return low_motion_minq[q] + adjustment;
+  }
+}
+
+static int get_kf_active_quality(const RATE_CONTROL *const rc, int q,
+                                 vpx_bit_depth_t bit_depth) {
+  int *kf_low_motion_minq;
+  int *kf_high_motion_minq;
+  ASSIGN_MINQ_TABLE(bit_depth, kf_low_motion_minq);
+  ASSIGN_MINQ_TABLE(bit_depth, kf_high_motion_minq);
+  return get_active_quality(q, rc->kf_boost, kf_low, kf_high,
+                            kf_low_motion_minq, kf_high_motion_minq);
+}
+
+static int get_gf_active_quality(const RATE_CONTROL *const rc, int q,
+                                 vpx_bit_depth_t bit_depth) {
+  int *arfgf_low_motion_minq;
+  int *arfgf_high_motion_minq;
+  ASSIGN_MINQ_TABLE(bit_depth, arfgf_low_motion_minq);
+  ASSIGN_MINQ_TABLE(bit_depth, arfgf_high_motion_minq);
+  return get_active_quality(q, rc->gfu_boost, gf_low, gf_high,
+                            arfgf_low_motion_minq, arfgf_high_motion_minq);
+}
+
+static int calc_active_worst_quality_one_pass_vbr(const VP10_COMP *cpi) {
+  const RATE_CONTROL *const rc = &cpi->rc;
+  const unsigned int curr_frame = cpi->common.current_video_frame;
+  int active_worst_quality;
+
+  if (cpi->common.frame_type == KEY_FRAME) {
+    active_worst_quality =
+        curr_frame == 0 ? rc->worst_quality : rc->last_q[KEY_FRAME] * 2;
+  } else {
+    if (!rc->is_src_frame_alt_ref &&
+        (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) {
+      active_worst_quality = curr_frame == 1 ? rc->last_q[KEY_FRAME] * 5 / 4
+                                             : rc->last_q[INTER_FRAME];
+    } else {
+      active_worst_quality = curr_frame == 1 ? rc->last_q[KEY_FRAME] * 2
+                                             : rc->last_q[INTER_FRAME] * 2;
+    }
+  }
+  return VPXMIN(active_worst_quality, rc->worst_quality);
+}
+
+// Adjust active_worst_quality level based on buffer level.
+static int calc_active_worst_quality_one_pass_cbr(const VP10_COMP *cpi) {
+  // Adjust active_worst_quality: If buffer is above the optimal/target level,
+  // bring active_worst_quality down depending on fullness of buffer.
+  // If buffer is below the optimal level, let the active_worst_quality go from
+  // ambient Q (at buffer = optimal level) to worst_quality level
+  // (at buffer = critical level).
+  const VP10_COMMON *const cm = &cpi->common;
+  const RATE_CONTROL *rc = &cpi->rc;
+  // Buffer level below which we push active_worst to worst_quality.
+  int64_t critical_level = rc->optimal_buffer_level >> 3;
+  int64_t buff_lvl_step = 0;
+  int adjustment = 0;
+  int active_worst_quality;
+  int ambient_qp;
+  if (cm->frame_type == KEY_FRAME) return rc->worst_quality;
+  // For ambient_qp we use minimum of avg_frame_qindex[KEY_FRAME/INTER_FRAME]
+  // for the first few frames following key frame. These are both initialized
+  // to worst_quality and updated with (3/4, 1/4) average in postencode_update.
+  // So for first few frames following key, the qp of that key frame is weighted
+  // into the active_worst_quality setting.
+  ambient_qp = (cm->current_video_frame < 5)
+                   ? VPXMIN(rc->avg_frame_qindex[INTER_FRAME],
+                            rc->avg_frame_qindex[KEY_FRAME])
+                   : rc->avg_frame_qindex[INTER_FRAME];
+  active_worst_quality = VPXMIN(rc->worst_quality, ambient_qp * 5 / 4);
+  if (rc->buffer_level > rc->optimal_buffer_level) {
+    // Adjust down.
+    // Maximum limit for down adjustment, ~30%.
+    int max_adjustment_down = active_worst_quality / 3;
+    if (max_adjustment_down) {
+      buff_lvl_step = ((rc->maximum_buffer_size - rc->optimal_buffer_level) /
+                       max_adjustment_down);
+      if (buff_lvl_step)
+        adjustment = (int)((rc->buffer_level - rc->optimal_buffer_level) /
+                           buff_lvl_step);
+      active_worst_quality -= adjustment;
+    }
+  } else if (rc->buffer_level > critical_level) {
+    // Adjust up from ambient Q.
+    if (critical_level) {
+      buff_lvl_step = (rc->optimal_buffer_level - critical_level);
+      if (buff_lvl_step) {
+        adjustment = (int)((rc->worst_quality - ambient_qp) *
+                           (rc->optimal_buffer_level - rc->buffer_level) /
+                           buff_lvl_step);
+      }
+      active_worst_quality = ambient_qp + adjustment;
+    }
+  } else {
+    // Set to worst_quality if buffer is below critical level.
+    active_worst_quality = rc->worst_quality;
+  }
+  return active_worst_quality;
+}
+
+static int rc_pick_q_and_bounds_one_pass_cbr(const VP10_COMP *cpi,
+                                             int *bottom_index,
+                                             int *top_index) {
+  const VP10_COMMON *const cm = &cpi->common;
+  const RATE_CONTROL *const rc = &cpi->rc;
+  int active_best_quality;
+  int active_worst_quality = calc_active_worst_quality_one_pass_cbr(cpi);
+  int q;
+  int *rtc_minq;
+  ASSIGN_MINQ_TABLE(cm->bit_depth, rtc_minq);
+
+  if (frame_is_intra_only(cm)) {
+    active_best_quality = rc->best_quality;
+    // Handle the special case for key frames forced when we have reached
+    // the maximum key frame interval. Here force the Q to a range
+    // based on the ambient Q to reduce the risk of popping.
+    if (rc->this_key_frame_forced) {
+      int qindex = rc->last_boosted_qindex;
+      double last_boosted_q = vp10_convert_qindex_to_q(qindex, cm->bit_depth);
+      int delta_qindex = vp10_compute_qdelta(
+          rc, last_boosted_q, (last_boosted_q * 0.75), cm->bit_depth);
+      active_best_quality = VPXMAX(qindex + delta_qindex, rc->best_quality);
+    } else if (cm->current_video_frame > 0) {
+      // not first frame of one pass and kf_boost is set
+      double q_adj_factor = 1.0;
+      double q_val;
+
+      active_best_quality = get_kf_active_quality(
+          rc, rc->avg_frame_qindex[KEY_FRAME], cm->bit_depth);
+
+      // Allow somewhat lower kf minq with small image formats.
+      if ((cm->width * cm->height) <= (352 * 288)) {
+        q_adj_factor -= 0.25;
+      }
+
+      // Convert the adjustment factor to a qindex delta
+      // on active_best_quality.
+      q_val = vp10_convert_qindex_to_q(active_best_quality, cm->bit_depth);
+      active_best_quality +=
+          vp10_compute_qdelta(rc, q_val, q_val * q_adj_factor, cm->bit_depth);
+    }
+  } else if (!rc->is_src_frame_alt_ref &&
+             (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) {
+    // Use the lower of active_worst_quality and recent
+    // average Q as basis for GF/ARF best Q limit unless last frame was
+    // a key frame.
+    if (rc->frames_since_key > 1 &&
+        rc->avg_frame_qindex[INTER_FRAME] < active_worst_quality) {
+      q = rc->avg_frame_qindex[INTER_FRAME];
+    } else {
+      q = active_worst_quality;
+    }
+    active_best_quality = get_gf_active_quality(rc, q, cm->bit_depth);
+  } else {
+    // Use the lower of active_worst_quality and recent/average Q.
+    if (cm->current_video_frame > 1) {
+      if (rc->avg_frame_qindex[INTER_FRAME] < active_worst_quality)
+        active_best_quality = rtc_minq[rc->avg_frame_qindex[INTER_FRAME]];
+      else
+        active_best_quality = rtc_minq[active_worst_quality];
+    } else {
+      if (rc->avg_frame_qindex[KEY_FRAME] < active_worst_quality)
+        active_best_quality = rtc_minq[rc->avg_frame_qindex[KEY_FRAME]];
+      else
+        active_best_quality = rtc_minq[active_worst_quality];
+    }
+  }
+
+  // Clip the active best and worst quality values to limits
+  active_best_quality =
+      clamp(active_best_quality, rc->best_quality, rc->worst_quality);
+  active_worst_quality =
+      clamp(active_worst_quality, active_best_quality, rc->worst_quality);
+
+  *top_index = active_worst_quality;
+  *bottom_index = active_best_quality;
+
+  // Limit Q range for the adaptive loop.
+  if (cm->frame_type == KEY_FRAME && !rc->this_key_frame_forced &&
+      !(cm->current_video_frame == 0)) {
+    int qdelta = 0;
+    vpx_clear_system_state();
+    qdelta = vp10_compute_qdelta_by_rate(
+        &cpi->rc, cm->frame_type, active_worst_quality, 2.0, cm->bit_depth);
+    *top_index = active_worst_quality + qdelta;
+    *top_index = VPXMAX(*top_index, *bottom_index);
+  }
+
+  // Special case code to try and match quality with forced key frames
+  if (cm->frame_type == KEY_FRAME && rc->this_key_frame_forced) {
+    q = rc->last_boosted_qindex;
+  } else {
+    q = vp10_rc_regulate_q(cpi, rc->this_frame_target, active_best_quality,
+                           active_worst_quality);
+    if (q > *top_index) {
+      // Special case when we are targeting the max allowed rate
+      if (rc->this_frame_target >= rc->max_frame_bandwidth)
+        *top_index = q;
+      else
+        q = *top_index;
+    }
+  }
+
+  assert(*top_index <= rc->worst_quality && *top_index >= rc->best_quality);
+  assert(*bottom_index <= rc->worst_quality &&
+         *bottom_index >= rc->best_quality);
+  assert(q <= rc->worst_quality && q >= rc->best_quality);
+  return q;
+}
+
+static int get_active_cq_level(const RATE_CONTROL *rc,
+                               const VP10EncoderConfig *const oxcf) {
+  static const double cq_adjust_threshold = 0.1;
+  int active_cq_level = oxcf->cq_level;
+  if (oxcf->rc_mode == VPX_CQ && rc->total_target_bits > 0) {
+    const double x = (double)rc->total_actual_bits / rc->total_target_bits;
+    if (x < cq_adjust_threshold) {
+      active_cq_level = (int)(active_cq_level * x / cq_adjust_threshold);
+    }
+  }
+  return active_cq_level;
+}
+
+static int rc_pick_q_and_bounds_one_pass_vbr(const VP10_COMP *cpi,
+                                             int *bottom_index,
+                                             int *top_index) {
+  const VP10_COMMON *const cm = &cpi->common;
+  const RATE_CONTROL *const rc = &cpi->rc;
+  const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+  const int cq_level = get_active_cq_level(rc, oxcf);
+  int active_best_quality;
+  int active_worst_quality = calc_active_worst_quality_one_pass_vbr(cpi);
+  int q;
+  int *inter_minq;
+  ASSIGN_MINQ_TABLE(cm->bit_depth, inter_minq);
+
+  if (frame_is_intra_only(cm)) {
+    if (oxcf->rc_mode == VPX_Q) {
+      int qindex = cq_level;
+      double q = vp10_convert_qindex_to_q(qindex, cm->bit_depth);
+      int delta_qindex = vp10_compute_qdelta(rc, q, q * 0.25, cm->bit_depth);
+      active_best_quality = VPXMAX(qindex + delta_qindex, rc->best_quality);
+    } else if (rc->this_key_frame_forced) {
+      int qindex = rc->last_boosted_qindex;
+      double last_boosted_q = vp10_convert_qindex_to_q(qindex, cm->bit_depth);
+      int delta_qindex = vp10_compute_qdelta(
+          rc, last_boosted_q, last_boosted_q * 0.75, cm->bit_depth);
+      active_best_quality = VPXMAX(qindex + delta_qindex, rc->best_quality);
+    } else {
+      // not first frame of one pass and kf_boost is set
+      double q_adj_factor = 1.0;
+      double q_val;
+
+      active_best_quality = get_kf_active_quality(
+          rc, rc->avg_frame_qindex[KEY_FRAME], cm->bit_depth);
+
+      // Allow somewhat lower kf minq with small image formats.
+      if ((cm->width * cm->height) <= (352 * 288)) {
+        q_adj_factor -= 0.25;
+      }
+
+      // Convert the adjustment factor to a qindex delta
+      // on active_best_quality.
+      q_val = vp10_convert_qindex_to_q(active_best_quality, cm->bit_depth);
+      active_best_quality +=
+          vp10_compute_qdelta(rc, q_val, q_val * q_adj_factor, cm->bit_depth);
+    }
+  } else if (!rc->is_src_frame_alt_ref &&
+             (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) {
+    // Use the lower of active_worst_quality and recent
+    // average Q as basis for GF/ARF best Q limit unless last frame was
+    // a key frame.
+    if (rc->frames_since_key > 1 &&
+        rc->avg_frame_qindex[INTER_FRAME] < active_worst_quality) {
+      q = rc->avg_frame_qindex[INTER_FRAME];
+    } else {
+      q = rc->avg_frame_qindex[KEY_FRAME];
+    }
+    // For constrained quality dont allow Q less than the cq level
+    if (oxcf->rc_mode == VPX_CQ) {
+      if (q < cq_level) q = cq_level;
+
+      active_best_quality = get_gf_active_quality(rc, q, cm->bit_depth);
+
+      // Constrained quality use slightly lower active best.
+      active_best_quality = active_best_quality * 15 / 16;
+
+    } else if (oxcf->rc_mode == VPX_Q) {
+      int qindex = cq_level;
+      double q = vp10_convert_qindex_to_q(qindex, cm->bit_depth);
+      int delta_qindex;
+      if (cpi->refresh_alt_ref_frame)
+        delta_qindex = vp10_compute_qdelta(rc, q, q * 0.40, cm->bit_depth);
+      else
+        delta_qindex = vp10_compute_qdelta(rc, q, q * 0.50, cm->bit_depth);
+      active_best_quality = VPXMAX(qindex + delta_qindex, rc->best_quality);
+    } else {
+      active_best_quality = get_gf_active_quality(rc, q, cm->bit_depth);
+    }
+  } else {
+    if (oxcf->rc_mode == VPX_Q) {
+      int qindex = cq_level;
+      double q = vp10_convert_qindex_to_q(qindex, cm->bit_depth);
+      double delta_rate[FIXED_GF_INTERVAL] = { 0.50, 1.0, 0.85, 1.0,
+                                               0.70, 1.0, 0.85, 1.0 };
+      int delta_qindex = vp10_compute_qdelta(
+          rc, q, q * delta_rate[cm->current_video_frame % FIXED_GF_INTERVAL],
+          cm->bit_depth);
+      active_best_quality = VPXMAX(qindex + delta_qindex, rc->best_quality);
+    } else {
+      // Use the lower of active_worst_quality and recent/average Q.
+      if (cm->current_video_frame > 1)
+        active_best_quality = inter_minq[rc->avg_frame_qindex[INTER_FRAME]];
+      else
+        active_best_quality = inter_minq[rc->avg_frame_qindex[KEY_FRAME]];
+      // For the constrained quality mode we don't want
+      // q to fall below the cq level.
+      if ((oxcf->rc_mode == VPX_CQ) && (active_best_quality < cq_level)) {
+        active_best_quality = cq_level;
+      }
+    }
+  }
+
+  // Clip the active best and worst quality values to limits
+  active_best_quality =
+      clamp(active_best_quality, rc->best_quality, rc->worst_quality);
+  active_worst_quality =
+      clamp(active_worst_quality, active_best_quality, rc->worst_quality);
+
+  *top_index = active_worst_quality;
+  *bottom_index = active_best_quality;
+
+  // Limit Q range for the adaptive loop.
+  {
+    int qdelta = 0;
+    vpx_clear_system_state();
+    if (cm->frame_type == KEY_FRAME && !rc->this_key_frame_forced &&
+        !(cm->current_video_frame == 0)) {
+      qdelta = vp10_compute_qdelta_by_rate(
+          &cpi->rc, cm->frame_type, active_worst_quality, 2.0, cm->bit_depth);
+    } else if (!rc->is_src_frame_alt_ref &&
+               (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) {
+      qdelta = vp10_compute_qdelta_by_rate(
+          &cpi->rc, cm->frame_type, active_worst_quality, 1.75, cm->bit_depth);
+    }
+    *top_index = active_worst_quality + qdelta;
+    *top_index = VPXMAX(*top_index, *bottom_index);
+  }
+
+  if (oxcf->rc_mode == VPX_Q) {
+    q = active_best_quality;
+    // Special case code to try and match quality with forced key frames
+  } else if ((cm->frame_type == KEY_FRAME) && rc->this_key_frame_forced) {
+    q = rc->last_boosted_qindex;
+  } else {
+    q = vp10_rc_regulate_q(cpi, rc->this_frame_target, active_best_quality,
+                           active_worst_quality);
+    if (q > *top_index) {
+      // Special case when we are targeting the max allowed rate
+      if (rc->this_frame_target >= rc->max_frame_bandwidth)
+        *top_index = q;
+      else
+        q = *top_index;
+    }
+  }
+
+  assert(*top_index <= rc->worst_quality && *top_index >= rc->best_quality);
+  assert(*bottom_index <= rc->worst_quality &&
+         *bottom_index >= rc->best_quality);
+  assert(q <= rc->worst_quality && q >= rc->best_quality);
+  return q;
+}
+
+int vp10_frame_type_qdelta(const VP10_COMP *cpi, int rf_level, int q) {
+  static const double rate_factor_deltas[RATE_FACTOR_LEVELS] = {
+    1.00,  // INTER_NORMAL
+#if CONFIG_EXT_REFS
+    0.80,  // INTER_LOW
+    1.50,  // INTER_HIGH
+    1.25,  // GF_ARF_LOW
+#else
+    1.00,  // INTER_HIGH
+    1.50,  // GF_ARF_LOW
+#endif     // CONFIG_EXT_REFS
+    2.00,  // GF_ARF_STD
+    2.00,  // KF_STD
+  };
+  static const FRAME_TYPE frame_type[RATE_FACTOR_LEVELS] =
+#if CONFIG_EXT_REFS
+      { INTER_FRAME, INTER_FRAME, INTER_FRAME,
+        INTER_FRAME, INTER_FRAME, KEY_FRAME };
+#else
+      { INTER_FRAME, INTER_FRAME, INTER_FRAME, INTER_FRAME, KEY_FRAME };
+#endif  // CONFIG_EXT_REFS
+  const VP10_COMMON *const cm = &cpi->common;
+  int qdelta =
+      vp10_compute_qdelta_by_rate(&cpi->rc, frame_type[rf_level], q,
+                                  rate_factor_deltas[rf_level], cm->bit_depth);
+  return qdelta;
+}
+
+#define STATIC_MOTION_THRESH 95
+static int rc_pick_q_and_bounds_two_pass(const VP10_COMP *cpi,
+                                         int *bottom_index, int *top_index) {
+  const VP10_COMMON *const cm = &cpi->common;
+  const RATE_CONTROL *const rc = &cpi->rc;
+  const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+  const GF_GROUP *gf_group = &cpi->twopass.gf_group;
+  const int cq_level = get_active_cq_level(rc, oxcf);
+  int active_best_quality;
+  int active_worst_quality = cpi->twopass.active_worst_quality;
+  int q;
+  int *inter_minq;
+  ASSIGN_MINQ_TABLE(cm->bit_depth, inter_minq);
+
+  if (frame_is_intra_only(cm)) {
+    // Handle the special case for key frames forced when we have reached
+    // the maximum key frame interval. Here force the Q to a range
+    // based on the ambient Q to reduce the risk of popping.
+    if (rc->this_key_frame_forced) {
+      double last_boosted_q;
+      int delta_qindex;
+      int qindex;
+
+      if (cpi->twopass.last_kfgroup_zeromotion_pct >= STATIC_MOTION_THRESH) {
+        qindex = VPXMIN(rc->last_kf_qindex, rc->last_boosted_qindex);
+        active_best_quality = qindex;
+        last_boosted_q = vp10_convert_qindex_to_q(qindex, cm->bit_depth);
+        delta_qindex = vp10_compute_qdelta(
+            rc, last_boosted_q, last_boosted_q * 1.25, cm->bit_depth);
+        active_worst_quality =
+            VPXMIN(qindex + delta_qindex, active_worst_quality);
+      } else {
+        qindex = rc->last_boosted_qindex;
+        last_boosted_q = vp10_convert_qindex_to_q(qindex, cm->bit_depth);
+        delta_qindex = vp10_compute_qdelta(
+            rc, last_boosted_q, last_boosted_q * 0.75, cm->bit_depth);
+        active_best_quality = VPXMAX(qindex + delta_qindex, rc->best_quality);
+      }
+    } else {
+      // Not forced keyframe.
+      double q_adj_factor = 1.0;
+      double q_val;
+
+      // Baseline value derived from cpi->active_worst_quality and kf boost.
+      active_best_quality =
+          get_kf_active_quality(rc, active_worst_quality, cm->bit_depth);
+
+      // Allow somewhat lower kf minq with small image formats.
+      if ((cm->width * cm->height) <= (352 * 288)) {
+        q_adj_factor -= 0.25;
+      }
+
+      // Make a further adjustment based on the kf zero motion measure.
+      q_adj_factor += 0.05 - (0.001 * (double)cpi->twopass.kf_zeromotion_pct);
+
+      // Convert the adjustment factor to a qindex delta
+      // on active_best_quality.
+      q_val = vp10_convert_qindex_to_q(active_best_quality, cm->bit_depth);
+      active_best_quality +=
+          vp10_compute_qdelta(rc, q_val, q_val * q_adj_factor, cm->bit_depth);
+    }
+  } else if (!rc->is_src_frame_alt_ref &&
+             (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) {
+    // Use the lower of active_worst_quality and recent
+    // average Q as basis for GF/ARF best Q limit unless last frame was
+    // a key frame.
+    if (rc->frames_since_key > 1 &&
+        rc->avg_frame_qindex[INTER_FRAME] < active_worst_quality) {
+      q = rc->avg_frame_qindex[INTER_FRAME];
+    } else {
+      q = active_worst_quality;
+    }
+    // For constrained quality dont allow Q less than the cq level
+    if (oxcf->rc_mode == VPX_CQ) {
+      if (q < cq_level) q = cq_level;
+
+      active_best_quality = get_gf_active_quality(rc, q, cm->bit_depth);
+
+      // Constrained quality use slightly lower active best.
+      active_best_quality = active_best_quality * 15 / 16;
+
+    } else if (oxcf->rc_mode == VPX_Q) {
+      if (!cpi->refresh_alt_ref_frame) {
+        active_best_quality = cq_level;
+      } else {
+        active_best_quality = get_gf_active_quality(rc, q, cm->bit_depth);
+
+        // Modify best quality for second level arfs. For mode VPX_Q this
+        // becomes the baseline frame q.
+        if (gf_group->rf_level[gf_group->index] == GF_ARF_LOW)
+          active_best_quality = (active_best_quality + cq_level + 1) / 2;
+      }
+    } else {
+      active_best_quality = get_gf_active_quality(rc, q, cm->bit_depth);
+    }
+  } else {
+    if (oxcf->rc_mode == VPX_Q) {
+      active_best_quality = cq_level;
+    } else {
+      active_best_quality = inter_minq[active_worst_quality];
+
+      // For the constrained quality mode we don't want
+      // q to fall below the cq level.
+      if ((oxcf->rc_mode == VPX_CQ) && (active_best_quality < cq_level)) {
+        active_best_quality = cq_level;
+      }
+    }
+  }
+
+  // Extension to max or min Q if undershoot or overshoot is outside
+  // the permitted range.
+  if ((cpi->oxcf.rc_mode != VPX_Q) &&
+      (cpi->twopass.gf_zeromotion_pct < VLOW_MOTION_THRESHOLD)) {
+    if (frame_is_intra_only(cm) ||
+        (!rc->is_src_frame_alt_ref &&
+         (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame))) {
+      active_best_quality -=
+          (cpi->twopass.extend_minq + cpi->twopass.extend_minq_fast);
+      active_worst_quality += (cpi->twopass.extend_maxq / 2);
+    } else {
+      active_best_quality -=
+          (cpi->twopass.extend_minq + cpi->twopass.extend_minq_fast) / 2;
+      active_worst_quality += cpi->twopass.extend_maxq;
+    }
+  }
+
+  vpx_clear_system_state();
+  // Static forced key frames Q restrictions dealt with elsewhere.
+  if (!(frame_is_intra_only(cm)) || !rc->this_key_frame_forced ||
+      (cpi->twopass.last_kfgroup_zeromotion_pct < STATIC_MOTION_THRESH)) {
+    int qdelta = vp10_frame_type_qdelta(
+        cpi, gf_group->rf_level[gf_group->index], active_worst_quality);
+    active_worst_quality =
+        VPXMAX(active_worst_quality + qdelta, active_best_quality);
+  }
+
+  // Modify active_best_quality for downscaled normal frames.
+  if (rc->frame_size_selector != UNSCALED && !frame_is_kf_gf_arf(cpi)) {
+    int qdelta = vp10_compute_qdelta_by_rate(
+        rc, cm->frame_type, active_best_quality, 2.0, cm->bit_depth);
+    active_best_quality =
+        VPXMAX(active_best_quality + qdelta, rc->best_quality);
+  }
+
+  active_best_quality =
+      clamp(active_best_quality, rc->best_quality, rc->worst_quality);
+  active_worst_quality =
+      clamp(active_worst_quality, active_best_quality, rc->worst_quality);
+
+  if (oxcf->rc_mode == VPX_Q) {
+    q = active_best_quality;
+    // Special case code to try and match quality with forced key frames.
+  } else if (frame_is_intra_only(cm) && rc->this_key_frame_forced) {
+    // If static since last kf use better of last boosted and last kf q.
+    if (cpi->twopass.last_kfgroup_zeromotion_pct >= STATIC_MOTION_THRESH) {
+      q = VPXMIN(rc->last_kf_qindex, rc->last_boosted_qindex);
+    } else {
+      q = rc->last_boosted_qindex;
+    }
+  } else {
+    q = vp10_rc_regulate_q(cpi, rc->this_frame_target, active_best_quality,
+                           active_worst_quality);
+    if (q > active_worst_quality) {
+      // Special case when we are targeting the max allowed rate.
+      if (rc->this_frame_target >= rc->max_frame_bandwidth)
+        active_worst_quality = q;
+      else
+        q = active_worst_quality;
+    }
+  }
+  clamp(q, active_best_quality, active_worst_quality);
+
+  *top_index = active_worst_quality;
+  *bottom_index = active_best_quality;
+
+  assert(*top_index <= rc->worst_quality && *top_index >= rc->best_quality);
+  assert(*bottom_index <= rc->worst_quality &&
+         *bottom_index >= rc->best_quality);
+  assert(q <= rc->worst_quality && q >= rc->best_quality);
+  return q;
+}
+
+int vp10_rc_pick_q_and_bounds(const VP10_COMP *cpi, int *bottom_index,
+                              int *top_index) {
+  int q;
+  if (cpi->oxcf.pass == 0) {
+    if (cpi->oxcf.rc_mode == VPX_CBR)
+      q = rc_pick_q_and_bounds_one_pass_cbr(cpi, bottom_index, top_index);
+    else
+      q = rc_pick_q_and_bounds_one_pass_vbr(cpi, bottom_index, top_index);
+  } else {
+    q = rc_pick_q_and_bounds_two_pass(cpi, bottom_index, top_index);
+  }
+
+  return q;
+}
+
+void vp10_rc_compute_frame_size_bounds(const VP10_COMP *cpi, int frame_target,
+                                       int *frame_under_shoot_limit,
+                                       int *frame_over_shoot_limit) {
+  if (cpi->oxcf.rc_mode == VPX_Q) {
+    *frame_under_shoot_limit = 0;
+    *frame_over_shoot_limit = INT_MAX;
+  } else {
+    // For very small rate targets where the fractional adjustment
+    // may be tiny make sure there is at least a minimum range.
+    const int tolerance = (cpi->sf.recode_tolerance * frame_target) / 100;
+    *frame_under_shoot_limit = VPXMAX(frame_target - tolerance - 200, 0);
+    *frame_over_shoot_limit =
+        VPXMIN(frame_target + tolerance + 200, cpi->rc.max_frame_bandwidth);
+  }
+}
+
+void vp10_rc_set_frame_target(VP10_COMP *cpi, int target) {
+  const VP10_COMMON *const cm = &cpi->common;
+  RATE_CONTROL *const rc = &cpi->rc;
+
+  rc->this_frame_target = target;
+
+  // Modify frame size target when down-scaling.
+  if (cpi->oxcf.resize_mode == RESIZE_DYNAMIC &&
+      rc->frame_size_selector != UNSCALED)
+    rc->this_frame_target = (int)(rc->this_frame_target *
+                                  rate_thresh_mult[rc->frame_size_selector]);
+
+  // Target rate per SB64 (including partial SB64s.
+  rc->sb64_target_rate =
+      ((int64_t)rc->this_frame_target * 64 * 64) / (cm->width * cm->height);
+}
+
+static void update_alt_ref_frame_stats(VP10_COMP *cpi) {
+  // this frame refreshes means next frames don't unless specified by user
+  RATE_CONTROL *const rc = &cpi->rc;
+  rc->frames_since_golden = 0;
+
+  // Mark the alt ref as done (setting to 0 means no further alt refs pending).
+  rc->source_alt_ref_pending = 0;
+
+  // Set the alternate reference frame active flag
+  rc->source_alt_ref_active = 1;
+}
+
+static void update_golden_frame_stats(VP10_COMP *cpi) {
+  RATE_CONTROL *const rc = &cpi->rc;
+
+#if CONFIG_EXT_REFS
+  // Update the Golden frame usage counts.
+  // NOTE(weitinglin): If we use show_existing_frame for an OVERLAY frame,
+  //                   only the virtual indices for the reference frame will be
+  //                   updated and cpi->refresh_golden_frame will still be zero.
+  if (cpi->refresh_golden_frame || rc->is_src_frame_alt_ref) {
+#else
+  // Update the Golden frame usage counts.
+  if (cpi->refresh_golden_frame) {
+#endif
+#if CONFIG_EXT_REFS
+    // We will not use internal overlay frames to replace the golden frame
+    if (!rc->is_src_frame_ext_arf)
+#endif
+      // this frame refreshes means next frames don't unless specified by user
+      rc->frames_since_golden = 0;
+
+    // If we are not using alt ref in the up and coming group clear the arf
+    // active flag. In multi arf group case, if the index is not 0 then
+    // we are overlaying a mid group arf so should not reset the flag.
+    if (cpi->oxcf.pass == 2) {
+      if (!rc->source_alt_ref_pending && (cpi->twopass.gf_group.index == 0))
+        rc->source_alt_ref_active = 0;
+    } else if (!rc->source_alt_ref_pending) {
+      rc->source_alt_ref_active = 0;
+    }
+
+    // Decrement count down till next gf
+    if (rc->frames_till_gf_update_due > 0) rc->frames_till_gf_update_due--;
+
+  } else if (!cpi->refresh_alt_ref_frame) {
+    // Decrement count down till next gf
+    if (rc->frames_till_gf_update_due > 0) rc->frames_till_gf_update_due--;
+
+    rc->frames_since_golden++;
+  }
+}
+
+void vp10_rc_postencode_update(VP10_COMP *cpi, uint64_t bytes_used) {
+  const VP10_COMMON *const cm = &cpi->common;
+  const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+  RATE_CONTROL *const rc = &cpi->rc;
+  const int qindex = cm->base_qindex;
+
+  if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->seg.enabled) {
+    vp10_cyclic_refresh_postencode(cpi);
+  }
+
+  // Update rate control heuristics
+  rc->projected_frame_size = (int)(bytes_used << 3);
+
+  // Post encode loop adjustment of Q prediction.
+  vp10_rc_update_rate_correction_factors(cpi);
+
+  // Keep a record of last Q and ambient average Q.
+  if (cm->frame_type == KEY_FRAME) {
+    rc->last_q[KEY_FRAME] = qindex;
+    rc->avg_frame_qindex[KEY_FRAME] =
+        ROUND_POWER_OF_TWO(3 * rc->avg_frame_qindex[KEY_FRAME] + qindex, 2);
+  } else {
+    if (!rc->is_src_frame_alt_ref &&
+        !(cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) {
+      rc->last_q[INTER_FRAME] = qindex;
+      rc->avg_frame_qindex[INTER_FRAME] =
+          ROUND_POWER_OF_TWO(3 * rc->avg_frame_qindex[INTER_FRAME] + qindex, 2);
+      rc->ni_frames++;
+      rc->tot_q += vp10_convert_qindex_to_q(qindex, cm->bit_depth);
+      rc->avg_q = rc->tot_q / rc->ni_frames;
+      // Calculate the average Q for normal inter frames (not key or GFU
+      // frames).
+      rc->ni_tot_qi += qindex;
+      rc->ni_av_qi = rc->ni_tot_qi / rc->ni_frames;
+    }
+  }
+
+  // Keep record of last boosted (KF/GF/ARF) Q value.
+  // If the current frame is coded at a lower Q then we also update it.
+  // If all mbs in this group are skipped only update if the Q value is
+  // better than that already stored.
+  // This is used to help set quality in forced key frames to reduce popping
+  if ((qindex < rc->last_boosted_qindex) || (cm->frame_type == KEY_FRAME) ||
+      (!rc->constrained_gf_group &&
+       (cpi->refresh_alt_ref_frame ||
+        (cpi->refresh_golden_frame && !rc->is_src_frame_alt_ref)))) {
+    rc->last_boosted_qindex = qindex;
+  }
+  if (cm->frame_type == KEY_FRAME) rc->last_kf_qindex = qindex;
+
+  update_buffer_level(cpi, rc->projected_frame_size);
+
+  // Rolling monitors of whether we are over or underspending used to help
+  // regulate min and Max Q in two pass.
+  if (cm->frame_type != KEY_FRAME) {
+    rc->rolling_target_bits = ROUND_POWER_OF_TWO(
+        rc->rolling_target_bits * 3 + rc->this_frame_target, 2);
+    rc->rolling_actual_bits = ROUND_POWER_OF_TWO(
+        rc->rolling_actual_bits * 3 + rc->projected_frame_size, 2);
+    rc->long_rolling_target_bits = ROUND_POWER_OF_TWO(
+        rc->long_rolling_target_bits * 31 + rc->this_frame_target, 5);
+    rc->long_rolling_actual_bits = ROUND_POWER_OF_TWO(
+        rc->long_rolling_actual_bits * 31 + rc->projected_frame_size, 5);
+  }
+
+  // Actual bits spent
+  rc->total_actual_bits += rc->projected_frame_size;
+#if CONFIG_EXT_REFS
+  rc->total_target_bits +=
+      (cm->show_frame || rc->is_bwd_ref_frame) ? rc->avg_frame_bandwidth : 0;
+#else
+  rc->total_target_bits += cm->show_frame ? rc->avg_frame_bandwidth : 0;
+#endif  // CONFIG_EXT_REFS
+
+  rc->total_target_vs_actual = rc->total_actual_bits - rc->total_target_bits;
+
+  if (is_altref_enabled(cpi) && cpi->refresh_alt_ref_frame &&
+      (cm->frame_type != KEY_FRAME))
+    // Update the alternate reference frame stats as appropriate.
+    update_alt_ref_frame_stats(cpi);
+  else
+    // Update the Golden frame stats as appropriate.
+    update_golden_frame_stats(cpi);
+
+  if (cm->frame_type == KEY_FRAME) rc->frames_since_key = 0;
+
+#if CONFIG_EXT_REFS
+  if (cm->show_frame || rc->is_bwd_ref_frame) {
+#else
+  if (cm->show_frame) {
+#endif  // CONFIG_EXT_REFS
+    rc->frames_since_key++;
+    rc->frames_to_key--;
+  }
+
+  // Trigger the resizing of the next frame if it is scaled.
+  if (oxcf->pass != 0) {
+    cpi->resize_pending =
+        rc->next_frame_size_selector != rc->frame_size_selector;
+    rc->frame_size_selector = rc->next_frame_size_selector;
+  }
+}
+
+void vp10_rc_postencode_update_drop_frame(VP10_COMP *cpi) {
+  // Update buffer level with zero size, update frame counters, and return.
+  update_buffer_level(cpi, 0);
+  cpi->rc.frames_since_key++;
+  cpi->rc.frames_to_key--;
+  cpi->rc.rc_2_frame = 0;
+  cpi->rc.rc_1_frame = 0;
+}
+
+// Use this macro to turn on/off use of alt-refs in one-pass mode.
+#define USE_ALTREF_FOR_ONE_PASS 1
+
+static int calc_pframe_target_size_one_pass_vbr(const VP10_COMP *const cpi) {
+  static const int af_ratio = 10;
+  const RATE_CONTROL *const rc = &cpi->rc;
+  int target;
+#if USE_ALTREF_FOR_ONE_PASS
+  target =
+      (!rc->is_src_frame_alt_ref &&
+       (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame))
+          ? (rc->avg_frame_bandwidth * rc->baseline_gf_interval * af_ratio) /
+                (rc->baseline_gf_interval + af_ratio - 1)
+          : (rc->avg_frame_bandwidth * rc->baseline_gf_interval) /
+                (rc->baseline_gf_interval + af_ratio - 1);
+#else
+  target = rc->avg_frame_bandwidth;
+#endif
+  return vp10_rc_clamp_pframe_target_size(cpi, target);
+}
+
+static int calc_iframe_target_size_one_pass_vbr(const VP10_COMP *const cpi) {
+  static const int kf_ratio = 25;
+  const RATE_CONTROL *rc = &cpi->rc;
+  const int target = rc->avg_frame_bandwidth * kf_ratio;
+  return vp10_rc_clamp_iframe_target_size(cpi, target);
+}
+
+void vp10_rc_get_one_pass_vbr_params(VP10_COMP *cpi) {
+  VP10_COMMON *const cm = &cpi->common;
+  RATE_CONTROL *const rc = &cpi->rc;
+  int target;
+  // TODO(yaowu): replace the "auto_key && 0" below with proper decision logic.
+  if (!cpi->refresh_alt_ref_frame &&
+      (cm->current_video_frame == 0 || (cpi->frame_flags & FRAMEFLAGS_KEY) ||
+       rc->frames_to_key == 0 || (cpi->oxcf.auto_key && 0))) {
+    cm->frame_type = KEY_FRAME;
+    rc->this_key_frame_forced =
+        cm->current_video_frame != 0 && rc->frames_to_key == 0;
+    rc->frames_to_key = cpi->oxcf.key_freq;
+    rc->kf_boost = DEFAULT_KF_BOOST;
+    rc->source_alt_ref_active = 0;
+  } else {
+    cm->frame_type = INTER_FRAME;
+  }
+  if (rc->frames_till_gf_update_due == 0) {
+    rc->baseline_gf_interval = (rc->min_gf_interval + rc->max_gf_interval) / 2;
+    rc->frames_till_gf_update_due = rc->baseline_gf_interval;
+    // NOTE: frames_till_gf_update_due must be <= frames_to_key.
+    if (rc->frames_till_gf_update_due > rc->frames_to_key) {
+      rc->frames_till_gf_update_due = rc->frames_to_key;
+      rc->constrained_gf_group = 1;
+    } else {
+      rc->constrained_gf_group = 0;
+    }
+    cpi->refresh_golden_frame = 1;
+    rc->source_alt_ref_pending = USE_ALTREF_FOR_ONE_PASS;
+    rc->gfu_boost = DEFAULT_GF_BOOST;
+  }
+  if (cm->frame_type == KEY_FRAME)
+    target = calc_iframe_target_size_one_pass_vbr(cpi);
+  else
+    target = calc_pframe_target_size_one_pass_vbr(cpi);
+  vp10_rc_set_frame_target(cpi, target);
+}
+
+static int calc_pframe_target_size_one_pass_cbr(const VP10_COMP *cpi) {
+  const VP10EncoderConfig *oxcf = &cpi->oxcf;
+  const RATE_CONTROL *rc = &cpi->rc;
+  const int64_t diff = rc->optimal_buffer_level - rc->buffer_level;
+  const int64_t one_pct_bits = 1 + rc->optimal_buffer_level / 100;
+  int min_frame_target =
+      VPXMAX(rc->avg_frame_bandwidth >> 4, FRAME_OVERHEAD_BITS);
+  int target;
+
+  if (oxcf->gf_cbr_boost_pct) {
+    const int af_ratio_pct = oxcf->gf_cbr_boost_pct + 100;
+    target = cpi->refresh_golden_frame
+                 ? (rc->avg_frame_bandwidth * rc->baseline_gf_interval *
+                    af_ratio_pct) /
+                       (rc->baseline_gf_interval * 100 + af_ratio_pct - 100)
+                 : (rc->avg_frame_bandwidth * rc->baseline_gf_interval * 100) /
+                       (rc->baseline_gf_interval * 100 + af_ratio_pct - 100);
+  } else {
+    target = rc->avg_frame_bandwidth;
+  }
+
+  if (diff > 0) {
+    // Lower the target bandwidth for this frame.
+    const int pct_low = (int)VPXMIN(diff / one_pct_bits, oxcf->under_shoot_pct);
+    target -= (target * pct_low) / 200;
+  } else if (diff < 0) {
+    // Increase the target bandwidth for this frame.
+    const int pct_high =
+        (int)VPXMIN(-diff / one_pct_bits, oxcf->over_shoot_pct);
+    target += (target * pct_high) / 200;
+  }
+  if (oxcf->rc_max_inter_bitrate_pct) {
+    const int max_rate =
+        rc->avg_frame_bandwidth * oxcf->rc_max_inter_bitrate_pct / 100;
+    target = VPXMIN(target, max_rate);
+  }
+  return VPXMAX(min_frame_target, target);
+}
+
+static int calc_iframe_target_size_one_pass_cbr(const VP10_COMP *cpi) {
+  const RATE_CONTROL *rc = &cpi->rc;
+  int target;
+  if (cpi->common.current_video_frame == 0) {
+    target = ((rc->starting_buffer_level / 2) > INT_MAX)
+                 ? INT_MAX
+                 : (int)(rc->starting_buffer_level / 2);
+  } else {
+    int kf_boost = 32;
+    double framerate = cpi->framerate;
+
+    kf_boost = VPXMAX(kf_boost, (int)(2 * framerate - 16));
+    if (rc->frames_since_key < framerate / 2) {
+      kf_boost = (int)(kf_boost * rc->frames_since_key / (framerate / 2));
+    }
+    target = ((16 + kf_boost) * rc->avg_frame_bandwidth) >> 4;
+  }
+  return vp10_rc_clamp_iframe_target_size(cpi, target);
+}
+
+void vp10_rc_get_one_pass_cbr_params(VP10_COMP *cpi) {
+  VP10_COMMON *const cm = &cpi->common;
+  RATE_CONTROL *const rc = &cpi->rc;
+  int target;
+  // TODO(yaowu): replace the "auto_key && 0" below with proper decision logic.
+  if ((cm->current_video_frame == 0 || (cpi->frame_flags & FRAMEFLAGS_KEY) ||
+       rc->frames_to_key == 0 || (cpi->oxcf.auto_key && 0))) {
+    cm->frame_type = KEY_FRAME;
+    rc->this_key_frame_forced =
+        cm->current_video_frame != 0 && rc->frames_to_key == 0;
+    rc->frames_to_key = cpi->oxcf.key_freq;
+    rc->kf_boost = DEFAULT_KF_BOOST;
+    rc->source_alt_ref_active = 0;
+  } else {
+    cm->frame_type = INTER_FRAME;
+  }
+  if (rc->frames_till_gf_update_due == 0) {
+    if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ)
+      vp10_cyclic_refresh_set_golden_update(cpi);
+    else
+      rc->baseline_gf_interval =
+          (rc->min_gf_interval + rc->max_gf_interval) / 2;
+    rc->frames_till_gf_update_due = rc->baseline_gf_interval;
+    // NOTE: frames_till_gf_update_due must be <= frames_to_key.
+    if (rc->frames_till_gf_update_due > rc->frames_to_key)
+      rc->frames_till_gf_update_due = rc->frames_to_key;
+    cpi->refresh_golden_frame = 1;
+    rc->gfu_boost = DEFAULT_GF_BOOST;
+  }
+
+  // Any update/change of global cyclic refresh parameters (amount/delta-qp)
+  // should be done here, before the frame qp is selected.
+  if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ)
+    vp10_cyclic_refresh_update_parameters(cpi);
+
+  if (cm->frame_type == KEY_FRAME)
+    target = calc_iframe_target_size_one_pass_cbr(cpi);
+  else
+    target = calc_pframe_target_size_one_pass_cbr(cpi);
+
+  vp10_rc_set_frame_target(cpi, target);
+  if (cpi->oxcf.resize_mode == RESIZE_DYNAMIC)
+    cpi->resize_pending = vp10_resize_one_pass_cbr(cpi);
+  else
+    cpi->resize_pending = 0;
+}
+
+int vp10_compute_qdelta(const RATE_CONTROL *rc, double qstart, double qtarget,
+                        vpx_bit_depth_t bit_depth) {
+  int start_index = rc->worst_quality;
+  int target_index = rc->worst_quality;
+  int i;
+
+  // Convert the average q value to an index.
+  for (i = rc->best_quality; i < rc->worst_quality; ++i) {
+    start_index = i;
+    if (vp10_convert_qindex_to_q(i, bit_depth) >= qstart) break;
+  }
+
+  // Convert the q target to an index
+  for (i = rc->best_quality; i < rc->worst_quality; ++i) {
+    target_index = i;
+    if (vp10_convert_qindex_to_q(i, bit_depth) >= qtarget) break;
+  }
+
+  return target_index - start_index;
+}
+
+int vp10_compute_qdelta_by_rate(const RATE_CONTROL *rc, FRAME_TYPE frame_type,
+                                int qindex, double rate_target_ratio,
+                                vpx_bit_depth_t bit_depth) {
+  int target_index = rc->worst_quality;
+  int i;
+
+  // Look up the current projected bits per block for the base index
+  const int base_bits_per_mb =
+      vp10_rc_bits_per_mb(frame_type, qindex, 1.0, bit_depth);
+
+  // Find the target bits per mb based on the base value and given ratio.
+  const int target_bits_per_mb = (int)(rate_target_ratio * base_bits_per_mb);
+
+  // Convert the q target to an index
+  for (i = rc->best_quality; i < rc->worst_quality; ++i) {
+    if (vp10_rc_bits_per_mb(frame_type, i, 1.0, bit_depth) <=
+        target_bits_per_mb) {
+      target_index = i;
+      break;
+    }
+  }
+  return target_index - qindex;
+}
+
+void vp10_rc_set_gf_interval_range(const VP10_COMP *const cpi,
+                                   RATE_CONTROL *const rc) {
+  const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+
+  // Special case code for 1 pass fixed Q mode tests
+  if ((oxcf->pass == 0) && (oxcf->rc_mode == VPX_Q)) {
+    rc->max_gf_interval = FIXED_GF_INTERVAL;
+    rc->min_gf_interval = FIXED_GF_INTERVAL;
+    rc->static_scene_max_gf_interval = FIXED_GF_INTERVAL;
+  } else {
+    // Set Maximum gf/arf interval
+    rc->max_gf_interval = oxcf->max_gf_interval;
+    rc->min_gf_interval = oxcf->min_gf_interval;
+    if (rc->min_gf_interval == 0)
+      rc->min_gf_interval = vp10_rc_get_default_min_gf_interval(
+          oxcf->width, oxcf->height, cpi->framerate);
+    if (rc->max_gf_interval == 0)
+      rc->max_gf_interval = vp10_rc_get_default_max_gf_interval(
+          cpi->framerate, rc->min_gf_interval);
+
+    // Extended interval for genuinely static scenes
+    rc->static_scene_max_gf_interval = MAX_LAG_BUFFERS * 2;
+
+    if (is_altref_enabled(cpi)) {
+      if (rc->static_scene_max_gf_interval > oxcf->lag_in_frames - 1)
+        rc->static_scene_max_gf_interval = oxcf->lag_in_frames - 1;
+    }
+
+    if (rc->max_gf_interval > rc->static_scene_max_gf_interval)
+      rc->max_gf_interval = rc->static_scene_max_gf_interval;
+
+    // Clamp min to max
+    rc->min_gf_interval = VPXMIN(rc->min_gf_interval, rc->max_gf_interval);
+  }
+}
+
+void vp10_rc_update_framerate(VP10_COMP *cpi) {
+  const VP10_COMMON *const cm = &cpi->common;
+  const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+  RATE_CONTROL *const rc = &cpi->rc;
+  int vbr_max_bits;
+
+  rc->avg_frame_bandwidth = (int)(oxcf->target_bandwidth / cpi->framerate);
+  rc->min_frame_bandwidth =
+      (int)(rc->avg_frame_bandwidth * oxcf->two_pass_vbrmin_section / 100);
+
+  rc->min_frame_bandwidth =
+      VPXMAX(rc->min_frame_bandwidth, FRAME_OVERHEAD_BITS);
+
+  // A maximum bitrate for a frame is defined.
+  // The baseline for this aligns with HW implementations that
+  // can support decode of 1080P content up to a bitrate of MAX_MB_RATE bits
+  // per 16x16 MB (averaged over a frame). However this limit is extended if
+  // a very high rate is given on the command line or the the rate cannnot
+  // be acheived because of a user specificed max q (e.g. when the user
+  // specifies lossless encode.
+  vbr_max_bits =
+      (int)(((int64_t)rc->avg_frame_bandwidth * oxcf->two_pass_vbrmax_section) /
+            100);
+  rc->max_frame_bandwidth =
+      VPXMAX(VPXMAX((cm->MBs * MAX_MB_RATE), MAXRATE_1080P), vbr_max_bits);
+
+  vp10_rc_set_gf_interval_range(cpi, rc);
+}
+
+#define VBR_PCT_ADJUSTMENT_LIMIT 50
+// For VBR...adjustment to the frame target based on error from previous frames
+static void vbr_rate_correction(VP10_COMP *cpi, int *this_frame_target) {
+  RATE_CONTROL *const rc = &cpi->rc;
+  int64_t vbr_bits_off_target = rc->vbr_bits_off_target;
+  int max_delta;
+  double position_factor = 1.0;
+
+  // How far through the clip are we.
+  // This number is used to damp the per frame rate correction.
+  // Range 0 - 1.0
+  if (cpi->twopass.total_stats.count != 0.) {
+    position_factor = sqrt((double)cpi->common.current_video_frame /
+                           cpi->twopass.total_stats.count);
+  }
+  max_delta = (int)(position_factor *
+                    ((*this_frame_target * VBR_PCT_ADJUSTMENT_LIMIT) / 100));
+
+  // vbr_bits_off_target > 0 means we have extra bits to spend
+  if (vbr_bits_off_target > 0) {
+    *this_frame_target += (vbr_bits_off_target > max_delta)
+                              ? max_delta
+                              : (int)vbr_bits_off_target;
+  } else {
+    *this_frame_target -= (vbr_bits_off_target < -max_delta)
+                              ? max_delta
+                              : (int)-vbr_bits_off_target;
+  }
+
+  // Fast redistribution of bits arising from massive local undershoot.
+  // Dont do it for kf,arf,gf or overlay frames.
+  if (!frame_is_kf_gf_arf(cpi) && !rc->is_src_frame_alt_ref &&
+      rc->vbr_bits_off_target_fast) {
+    int one_frame_bits = VPXMAX(rc->avg_frame_bandwidth, *this_frame_target);
+    int fast_extra_bits;
+    fast_extra_bits = (int)VPXMIN(rc->vbr_bits_off_target_fast, one_frame_bits);
+    fast_extra_bits = (int)VPXMIN(
+        fast_extra_bits,
+        VPXMAX(one_frame_bits / 8, rc->vbr_bits_off_target_fast / 8));
+    *this_frame_target += (int)fast_extra_bits;
+    rc->vbr_bits_off_target_fast -= fast_extra_bits;
+  }
+}
+
+void vp10_set_target_rate(VP10_COMP *cpi) {
+  RATE_CONTROL *const rc = &cpi->rc;
+  int target_rate = rc->base_frame_target;
+
+  // Correction to rate target based on prior over or under shoot.
+  if (cpi->oxcf.rc_mode == VPX_VBR || cpi->oxcf.rc_mode == VPX_CQ)
+    vbr_rate_correction(cpi, &target_rate);
+  vp10_rc_set_frame_target(cpi, target_rate);
+}
+
+// Check if we should resize, based on average QP from past x frames.
+// Only allow for resize at most one scale down for now, scaling factor is 2.
+int vp10_resize_one_pass_cbr(VP10_COMP *cpi) {
+  const VP10_COMMON *const cm = &cpi->common;
+  RATE_CONTROL *const rc = &cpi->rc;
+  int resize_now = 0;
+  cpi->resize_scale_num = 1;
+  cpi->resize_scale_den = 1;
+  // Don't resize on key frame; reset the counters on key frame.
+  if (cm->frame_type == KEY_FRAME) {
+    cpi->resize_avg_qp = 0;
+    cpi->resize_count = 0;
+    return 0;
+  }
+  // Resize based on average buffer underflow and QP over some window.
+  // Ignore samples close to key frame, since QP is usually high after key.
+  if (cpi->rc.frames_since_key > 2 * cpi->framerate) {
+    const int window = (int)(5 * cpi->framerate);
+    cpi->resize_avg_qp += cm->base_qindex;
+    if (cpi->rc.buffer_level < (int)(30 * rc->optimal_buffer_level / 100))
+      ++cpi->resize_buffer_underflow;
+    ++cpi->resize_count;
+    // Check for resize action every "window" frames.
+    if (cpi->resize_count >= window) {
+      int avg_qp = cpi->resize_avg_qp / cpi->resize_count;
+      // Resize down if buffer level has underflowed sufficent amount in past
+      // window, and we are at original resolution.
+      // Resize back up if average QP is low, and we are currently in a resized
+      // down state.
+      if (cpi->resize_state == 0 &&
+          cpi->resize_buffer_underflow > (cpi->resize_count >> 2)) {
+        resize_now = 1;
+        cpi->resize_state = 1;
+      } else if (cpi->resize_state == 1 &&
+                 avg_qp < 40 * cpi->rc.worst_quality / 100) {
+        resize_now = -1;
+        cpi->resize_state = 0;
+      }
+      // Reset for next window measurement.
+      cpi->resize_avg_qp = 0;
+      cpi->resize_count = 0;
+      cpi->resize_buffer_underflow = 0;
+    }
+  }
+  // If decision is to resize, reset some quantities, and check is we should
+  // reduce rate correction factor,
+  if (resize_now != 0) {
+    int target_bits_per_frame;
+    int active_worst_quality;
+    int qindex;
+    int tot_scale_change;
+    // For now, resize is by 1/2 x 1/2.
+    cpi->resize_scale_num = 1;
+    cpi->resize_scale_den = 2;
+    tot_scale_change = (cpi->resize_scale_den * cpi->resize_scale_den) /
+                       (cpi->resize_scale_num * cpi->resize_scale_num);
+    // Reset buffer level to optimal, update target size.
+    rc->buffer_level = rc->optimal_buffer_level;
+    rc->bits_off_target = rc->optimal_buffer_level;
+    rc->this_frame_target = calc_pframe_target_size_one_pass_cbr(cpi);
+    // Reset cyclic refresh parameters.
+    if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->seg.enabled)
+      vp10_cyclic_refresh_reset_resize(cpi);
+    // Get the projected qindex, based on the scaled target frame size (scaled
+    // so target_bits_per_mb in vp10_rc_regulate_q will be correct target).
+    target_bits_per_frame = (resize_now == 1)
+                                ? rc->this_frame_target * tot_scale_change
+                                : rc->this_frame_target / tot_scale_change;
+    active_worst_quality = calc_active_worst_quality_one_pass_cbr(cpi);
+    qindex = vp10_rc_regulate_q(cpi, target_bits_per_frame, rc->best_quality,
+                                active_worst_quality);
+    // If resize is down, check if projected q index is close to worst_quality,
+    // and if so, reduce the rate correction factor (since likely can afford
+    // lower q for resized frame).
+    if (resize_now == 1 && qindex > 90 * cpi->rc.worst_quality / 100) {
+      rc->rate_correction_factors[INTER_NORMAL] *= 0.85;
+    }
+    // If resize is back up, check if projected q index is too much above the
+    // current base_qindex, and if so, reduce the rate correction factor
+    // (since prefer to keep q for resized frame at least close to previous q).
+    if (resize_now == -1 && qindex > 130 * cm->base_qindex / 100) {
+      rc->rate_correction_factors[INTER_NORMAL] *= 0.9;
+    }
+  }
+  return resize_now;
+}
diff --git a/av1/encoder/ratectrl.h b/av1/encoder/ratectrl.h
new file mode 100644
index 0000000..88a14bc
--- /dev/null
+++ b/av1/encoder/ratectrl.h
@@ -0,0 +1,285 @@
+/*
+ *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP10_ENCODER_RATECTRL_H_
+#define VP10_ENCODER_RATECTRL_H_
+
+#include "aom/vpx_codec.h"
+#include "aom/vpx_integer.h"
+
+#include "av1/common/blockd.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// Bits Per MB at different Q (Multiplied by 512)
+#define BPER_MB_NORMBITS 9
+
+#define MIN_GF_INTERVAL 4
+#define MAX_GF_INTERVAL 16
+#define FIXED_GF_INTERVAL 8  // Used in some testing modes only
+
+#if CONFIG_EXT_REFS
+typedef enum {
+  INTER_NORMAL = 0,
+  INTER_LOW = 1,
+  INTER_HIGH = 2,
+  GF_ARF_LOW = 3,
+  GF_ARF_STD = 4,
+  KF_STD = 5,
+  RATE_FACTOR_LEVELS = 6
+} RATE_FACTOR_LEVEL;
+#else
+typedef enum {
+  INTER_NORMAL = 0,
+  INTER_HIGH = 1,
+  GF_ARF_LOW = 2,
+  GF_ARF_STD = 3,
+  KF_STD = 4,
+  RATE_FACTOR_LEVELS = 5
+} RATE_FACTOR_LEVEL;
+#endif  // CONFIG_EXT_REFS
+
+// Internal frame scaling level.
+typedef enum {
+  UNSCALED = 0,     // Frame is unscaled.
+  SCALE_STEP1 = 1,  // First-level down-scaling.
+  FRAME_SCALE_STEPS
+} FRAME_SCALE_LEVEL;
+
+// Frame dimensions multiplier wrt the native frame size, in 1/16ths,
+// specified for the scale-up case.
+// e.g. 24 => 16/24 = 2/3 of native size. The restriction to 1/16th is
+// intended to match the capabilities of the normative scaling filters,
+// giving precedence to the up-scaling accuracy.
+static const int frame_scale_factor[FRAME_SCALE_STEPS] = { 16, 24 };
+
+// Multiplier of the target rate to be used as threshold for triggering scaling.
+static const double rate_thresh_mult[FRAME_SCALE_STEPS] = { 1.0, 2.0 };
+
+// Scale dependent Rate Correction Factor multipliers. Compensates for the
+// greater number of bits per pixel generated in down-scaled frames.
+static const double rcf_mult[FRAME_SCALE_STEPS] = { 1.0, 2.0 };
+
+typedef struct {
+  // Rate targetting variables
+  int base_frame_target;  // A baseline frame target before adjustment
+                          // for previous under or over shoot.
+  int this_frame_target;  // Actual frame target after rc adjustment.
+  int projected_frame_size;
+  int sb64_target_rate;
+  int last_q[FRAME_TYPES];  // Separate values for Intra/Inter
+  int last_boosted_qindex;  // Last boosted GF/KF/ARF q
+  int last_kf_qindex;       // Q index of the last key frame coded.
+
+  int gfu_boost;
+  int last_boost;
+  int kf_boost;
+
+  double rate_correction_factors[RATE_FACTOR_LEVELS];
+
+  int frames_since_golden;
+  int frames_till_gf_update_due;
+  int min_gf_interval;
+  int max_gf_interval;
+  int static_scene_max_gf_interval;
+  int baseline_gf_interval;
+  int constrained_gf_group;
+  int frames_to_key;
+  int frames_since_key;
+  int this_key_frame_forced;
+  int next_key_frame_forced;
+  int source_alt_ref_pending;
+  int source_alt_ref_active;
+  int is_src_frame_alt_ref;
+
+#if CONFIG_EXT_REFS
+  // Length of the bi-predictive frame group interval
+  int bipred_group_interval;
+
+  // NOTE: Different types of frames may have different bits allocated
+  //       accordingly, aiming to achieve the overall optimal RD performance.
+  int is_bwd_ref_frame;
+  int is_last_bipred_frame;
+  int is_bipred_frame;
+  int is_src_frame_ext_arf;
+#endif  // CONFIG_EXT_REFS
+
+  int avg_frame_bandwidth;  // Average frame size target for clip
+  int min_frame_bandwidth;  // Minimum allocation used for any frame
+  int max_frame_bandwidth;  // Maximum burst rate allowed for a frame.
+
+  int ni_av_qi;
+  int ni_tot_qi;
+  int ni_frames;
+  int avg_frame_qindex[FRAME_TYPES];
+  double tot_q;
+  double avg_q;
+
+  int64_t buffer_level;
+  int64_t bits_off_target;
+  int64_t vbr_bits_off_target;
+  int64_t vbr_bits_off_target_fast;
+
+  int decimation_factor;
+  int decimation_count;
+
+  int rolling_target_bits;
+  int rolling_actual_bits;
+
+  int long_rolling_target_bits;
+  int long_rolling_actual_bits;
+
+  int rate_error_estimate;
+
+  int64_t total_actual_bits;
+  int64_t total_target_bits;
+  int64_t total_target_vs_actual;
+
+  int worst_quality;
+  int best_quality;
+
+  int64_t starting_buffer_level;
+  int64_t optimal_buffer_level;
+  int64_t maximum_buffer_size;
+
+  // rate control history for last frame(1) and the frame before(2).
+  // -1: undershot
+  //  1: overshoot
+  //  0: not initialized.
+  int rc_1_frame;
+  int rc_2_frame;
+  int q_1_frame;
+  int q_2_frame;
+
+  // Auto frame-scaling variables.
+  FRAME_SCALE_LEVEL frame_size_selector;
+  FRAME_SCALE_LEVEL next_frame_size_selector;
+  int frame_width[FRAME_SCALE_STEPS];
+  int frame_height[FRAME_SCALE_STEPS];
+  int rf_level_maxq[RATE_FACTOR_LEVELS];
+} RATE_CONTROL;
+
+struct VP10_COMP;
+struct VP10EncoderConfig;
+
+void vp10_rc_init(const struct VP10EncoderConfig *oxcf, int pass,
+                  RATE_CONTROL *rc);
+
+int vp10_estimate_bits_at_q(FRAME_TYPE frame_kind, int q, int mbs,
+                            double correction_factor,
+                            vpx_bit_depth_t bit_depth);
+
+double vp10_convert_qindex_to_q(int qindex, vpx_bit_depth_t bit_depth);
+
+void vp10_rc_init_minq_luts(void);
+
+int vp10_rc_get_default_min_gf_interval(int width, int height,
+                                        double framerate);
+// Note vp10_rc_get_default_max_gf_interval() requires the min_gf_interval to
+// be passed in to ensure that the max_gf_interval returned is at least as bis
+// as that.
+int vp10_rc_get_default_max_gf_interval(double framerate, int min_frame_rate);
+
+// Generally at the high level, the following flow is expected
+// to be enforced for rate control:
+// First call per frame, one of:
+//   vp10_rc_get_one_pass_vbr_params()
+//   vp10_rc_get_one_pass_cbr_params()
+//   vp10_rc_get_first_pass_params()
+//   vp10_rc_get_second_pass_params()
+// depending on the usage to set the rate control encode parameters desired.
+//
+// Then, call encode_frame_to_data_rate() to perform the
+// actual encode. This function will in turn call encode_frame()
+// one or more times, followed by one of:
+//   vp10_rc_postencode_update()
+//   vp10_rc_postencode_update_drop_frame()
+//
+// The majority of rate control parameters are only expected
+// to be set in the vp10_rc_get_..._params() functions and
+// updated during the vp10_rc_postencode_update...() functions.
+// The only exceptions are vp10_rc_drop_frame() and
+// vp10_rc_update_rate_correction_factors() functions.
+
+// Functions to set parameters for encoding before the actual
+// encode_frame_to_data_rate() function.
+void vp10_rc_get_one_pass_vbr_params(struct VP10_COMP *cpi);
+void vp10_rc_get_one_pass_cbr_params(struct VP10_COMP *cpi);
+
+// Post encode update of the rate control parameters based
+// on bytes used
+void vp10_rc_postencode_update(struct VP10_COMP *cpi, uint64_t bytes_used);
+// Post encode update of the rate control parameters for dropped frames
+void vp10_rc_postencode_update_drop_frame(struct VP10_COMP *cpi);
+
+// Updates rate correction factors
+// Changes only the rate correction factors in the rate control structure.
+void vp10_rc_update_rate_correction_factors(struct VP10_COMP *cpi);
+
+// Decide if we should drop this frame: For 1-pass CBR.
+// Changes only the decimation count in the rate control structure
+int vp10_rc_drop_frame(struct VP10_COMP *cpi);
+
+// Computes frame size bounds.
+void vp10_rc_compute_frame_size_bounds(const struct VP10_COMP *cpi,
+                                       int this_frame_target,
+                                       int *frame_under_shoot_limit,
+                                       int *frame_over_shoot_limit);
+
+// Picks q and q bounds given the target for bits
+int vp10_rc_pick_q_and_bounds(const struct VP10_COMP *cpi, int *bottom_index,
+                              int *top_index);
+
+// Estimates q to achieve a target bits per frame
+int vp10_rc_regulate_q(const struct VP10_COMP *cpi, int target_bits_per_frame,
+                       int active_best_quality, int active_worst_quality);
+
+// Estimates bits per mb for a given qindex and correction factor.
+int vp10_rc_bits_per_mb(FRAME_TYPE frame_type, int qindex,
+                        double correction_factor, vpx_bit_depth_t bit_depth);
+
+// Clamping utilities for bitrate targets for iframes and pframes.
+int vp10_rc_clamp_iframe_target_size(const struct VP10_COMP *const cpi,
+                                     int target);
+int vp10_rc_clamp_pframe_target_size(const struct VP10_COMP *const cpi,
+                                     int target);
+// Utility to set frame_target into the RATE_CONTROL structure
+// This function is called only from the vp10_rc_get_..._params() functions.
+void vp10_rc_set_frame_target(struct VP10_COMP *cpi, int target);
+
+// Computes a q delta (in "q index" terms) to get from a starting q value
+// to a target q value
+int vp10_compute_qdelta(const RATE_CONTROL *rc, double qstart, double qtarget,
+                        vpx_bit_depth_t bit_depth);
+
+// Computes a q delta (in "q index" terms) to get from a starting q value
+// to a value that should equate to the given rate ratio.
+int vp10_compute_qdelta_by_rate(const RATE_CONTROL *rc, FRAME_TYPE frame_type,
+                                int qindex, double rate_target_ratio,
+                                vpx_bit_depth_t bit_depth);
+
+int vp10_frame_type_qdelta(const struct VP10_COMP *cpi, int rf_level, int q);
+
+void vp10_rc_update_framerate(struct VP10_COMP *cpi);
+
+void vp10_rc_set_gf_interval_range(const struct VP10_COMP *const cpi,
+                                   RATE_CONTROL *const rc);
+
+void vp10_set_target_rate(struct VP10_COMP *cpi);
+
+int vp10_resize_one_pass_cbr(struct VP10_COMP *cpi);
+
+#ifdef __cplusplus
+}  // extern "C"
+#endif
+
+#endif  // VP10_ENCODER_RATECTRL_H_
diff --git a/av1/encoder/rd.c b/av1/encoder/rd.c
new file mode 100644
index 0000000..a8a8691
--- /dev/null
+++ b/av1/encoder/rd.c
@@ -0,0 +1,1103 @@
+/*
+ *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include <math.h>
+#include <stdio.h>
+
+#include "./vp10_rtcd.h"
+
+#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_mem/vpx_mem.h"
+#include "aom_ports/bitops.h"
+#include "aom_ports/mem.h"
+#include "aom_ports/system_state.h"
+
+#include "av1/common/common.h"
+#include "av1/common/entropy.h"
+#include "av1/common/entropymode.h"
+#include "av1/common/mvref_common.h"
+#include "av1/common/pred_common.h"
+#include "av1/common/quant_common.h"
+#include "av1/common/reconinter.h"
+#include "av1/common/reconintra.h"
+#include "av1/common/seg_common.h"
+
+#include "av1/encoder/cost.h"
+#include "av1/encoder/encodemb.h"
+#include "av1/encoder/encodemv.h"
+#include "av1/encoder/encoder.h"
+#include "av1/encoder/mcomp.h"
+#include "av1/encoder/quantize.h"
+#include "av1/encoder/ratectrl.h"
+#include "av1/encoder/rd.h"
+#include "av1/encoder/tokenize.h"
+
+#define RD_THRESH_POW 1.25
+
+// Factor to weigh the rate for switchable interp filters.
+#define SWITCHABLE_INTERP_RATE_FACTOR 1
+
+void vp10_rd_cost_reset(RD_COST *rd_cost) {
+  rd_cost->rate = INT_MAX;
+  rd_cost->dist = INT64_MAX;
+  rd_cost->rdcost = INT64_MAX;
+}
+
+void vp10_rd_cost_init(RD_COST *rd_cost) {
+  rd_cost->rate = 0;
+  rd_cost->dist = 0;
+  rd_cost->rdcost = 0;
+}
+
+// The baseline rd thresholds for breaking out of the rd loop for
+// certain modes are assumed to be based on 8x8 blocks.
+// This table is used to correct for block size.
+// The factors here are << 2 (2 = x0.5, 32 = x8 etc).
+static const uint8_t rd_thresh_block_size_factor[BLOCK_SIZES] = {
+  2,  3,  3, 4, 6, 6, 8, 12, 12, 16, 24, 24, 32,
+#if CONFIG_EXT_PARTITION
+  48, 48, 64
+#endif  // CONFIG_EXT_PARTITION
+};
+
+static void fill_mode_costs(VP10_COMP *cpi) {
+  const FRAME_CONTEXT *const fc = cpi->common.fc;
+  int i, j;
+
+  for (i = 0; i < INTRA_MODES; ++i)
+    for (j = 0; j < INTRA_MODES; ++j)
+      vp10_cost_tokens(cpi->y_mode_costs[i][j], vp10_kf_y_mode_prob[i][j],
+                       vp10_intra_mode_tree);
+
+  for (i = 0; i < BLOCK_SIZE_GROUPS; ++i)
+    vp10_cost_tokens(cpi->mbmode_cost[i], fc->y_mode_prob[i],
+                     vp10_intra_mode_tree);
+
+  for (i = 0; i < INTRA_MODES; ++i)
+    vp10_cost_tokens(cpi->intra_uv_mode_cost[i], fc->uv_mode_prob[i],
+                     vp10_intra_mode_tree);
+
+  for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
+    vp10_cost_tokens(cpi->switchable_interp_costs[i],
+                     fc->switchable_interp_prob[i],
+                     vp10_switchable_interp_tree);
+
+  for (i = 0; i < PALETTE_BLOCK_SIZES; ++i) {
+    vp10_cost_tokens(cpi->palette_y_size_cost[i],
+                     vp10_default_palette_y_size_prob[i],
+                     vp10_palette_size_tree);
+    vp10_cost_tokens(cpi->palette_uv_size_cost[i],
+                     vp10_default_palette_uv_size_prob[i],
+                     vp10_palette_size_tree);
+  }
+
+  for (i = 0; i < PALETTE_MAX_SIZE - 1; ++i)
+    for (j = 0; j < PALETTE_COLOR_CONTEXTS; ++j) {
+      vp10_cost_tokens(cpi->palette_y_color_cost[i][j],
+                       vp10_default_palette_y_color_prob[i][j],
+                       vp10_palette_color_tree[i]);
+      vp10_cost_tokens(cpi->palette_uv_color_cost[i][j],
+                       vp10_default_palette_uv_color_prob[i][j],
+                       vp10_palette_color_tree[i]);
+    }
+
+  for (i = 0; i < TX_SIZES - 1; ++i)
+    for (j = 0; j < TX_SIZE_CONTEXTS; ++j)
+      vp10_cost_tokens(cpi->tx_size_cost[i][j], fc->tx_size_probs[i][j],
+                       vp10_tx_size_tree[i]);
+
+#if CONFIG_EXT_TX
+  for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
+    int s;
+    for (s = 1; s < EXT_TX_SETS_INTER; ++s) {
+      if (use_inter_ext_tx_for_txsize[s][i]) {
+        vp10_cost_tokens(cpi->inter_tx_type_costs[s][i],
+                         fc->inter_ext_tx_prob[s][i],
+                         vp10_ext_tx_inter_tree[s]);
+      }
+    }
+    for (s = 1; s < EXT_TX_SETS_INTRA; ++s) {
+      if (use_intra_ext_tx_for_txsize[s][i]) {
+        for (j = 0; j < INTRA_MODES; ++j)
+          vp10_cost_tokens(cpi->intra_tx_type_costs[s][i][j],
+                           fc->intra_ext_tx_prob[s][i][j],
+                           vp10_ext_tx_intra_tree[s]);
+      }
+    }
+  }
+#else
+  for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
+    for (j = 0; j < TX_TYPES; ++j)
+      vp10_cost_tokens(cpi->intra_tx_type_costs[i][j],
+                       fc->intra_ext_tx_prob[i][j], vp10_ext_tx_tree);
+  }
+  for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
+    vp10_cost_tokens(cpi->inter_tx_type_costs[i], fc->inter_ext_tx_prob[i],
+                     vp10_ext_tx_tree);
+  }
+#endif  // CONFIG_EXT_TX
+#if CONFIG_EXT_INTRA
+  for (i = 0; i < INTRA_FILTERS + 1; ++i)
+    vp10_cost_tokens(cpi->intra_filter_cost[i], fc->intra_filter_probs[i],
+                     vp10_intra_filter_tree);
+#endif  // CONFIG_EXT_INTRA
+}
+
+void vp10_fill_token_costs(vp10_coeff_cost *c,
+#if CONFIG_ANS
+                           coeff_cdf_model (*cdf)[PLANE_TYPES],
+#endif  // CONFIG_ANS
+                           vp10_coeff_probs_model (*p)[PLANE_TYPES]) {
+  int i, j, k, l;
+  TX_SIZE t;
+  for (t = TX_4X4; t <= TX_32X32; ++t)
+    for (i = 0; i < PLANE_TYPES; ++i)
+      for (j = 0; j < REF_TYPES; ++j)
+        for (k = 0; k < COEF_BANDS; ++k)
+          for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
+#if CONFIG_ANS
+            const vpx_prob *const tree_probs = p[t][i][j][k][l];
+            vp10_cost_tokens_ans((int *)c[t][i][j][k][0][l], tree_probs,
+                                 cdf[t][i][j][k][l], 0);
+            vp10_cost_tokens_ans((int *)c[t][i][j][k][1][l], tree_probs,
+                                 cdf[t][i][j][k][l], 1);
+#else
+            vpx_prob probs[ENTROPY_NODES];
+            vp10_model_to_full_probs(p[t][i][j][k][l], probs);
+            vp10_cost_tokens((int *)c[t][i][j][k][0][l], probs, vp10_coef_tree);
+            vp10_cost_tokens_skip((int *)c[t][i][j][k][1][l], probs,
+                                  vp10_coef_tree);
+#endif  // CONFIG_ANS
+            assert(c[t][i][j][k][0][l][EOB_TOKEN] ==
+                   c[t][i][j][k][1][l][EOB_TOKEN]);
+          }
+}
+
+// Values are now correlated to quantizer.
+static int sad_per_bit16lut_8[QINDEX_RANGE];
+static int sad_per_bit4lut_8[QINDEX_RANGE];
+
+#if CONFIG_VP9_HIGHBITDEPTH
+static int sad_per_bit16lut_10[QINDEX_RANGE];
+static int sad_per_bit4lut_10[QINDEX_RANGE];
+static int sad_per_bit16lut_12[QINDEX_RANGE];
+static int sad_per_bit4lut_12[QINDEX_RANGE];
+#endif
+
+static void init_me_luts_bd(int *bit16lut, int *bit4lut, int range,
+                            vpx_bit_depth_t bit_depth) {
+  int i;
+  // Initialize the sad lut tables using a formulaic calculation for now.
+  // This is to make it easier to resolve the impact of experimental changes
+  // to the quantizer tables.
+  for (i = 0; i < range; i++) {
+    const double q = vp10_convert_qindex_to_q(i, bit_depth);
+    bit16lut[i] = (int)(0.0418 * q + 2.4107);
+    bit4lut[i] = (int)(0.063 * q + 2.742);
+  }
+}
+
+void vp10_init_me_luts(void) {
+  init_me_luts_bd(sad_per_bit16lut_8, sad_per_bit4lut_8, QINDEX_RANGE,
+                  VPX_BITS_8);
+#if CONFIG_VP9_HIGHBITDEPTH
+  init_me_luts_bd(sad_per_bit16lut_10, sad_per_bit4lut_10, QINDEX_RANGE,
+                  VPX_BITS_10);
+  init_me_luts_bd(sad_per_bit16lut_12, sad_per_bit4lut_12, QINDEX_RANGE,
+                  VPX_BITS_12);
+#endif
+}
+
+static const int rd_boost_factor[16] = { 64, 32, 32, 32, 24, 16, 12, 12,
+                                         8,  8,  4,  4,  2,  2,  1,  0 };
+static const int rd_frame_type_factor[FRAME_UPDATE_TYPES] = {
+  128, 144, 128, 128, 144,
+#if CONFIG_EXT_REFS
+  // TODO(zoeliu): To adjust further following factor values.
+  128, 128, 128
+  // TODO(weitinglin): We should investigate if the values should be the same
+  //                   as the value used by OVERLAY frame
+  ,
+  144
+#endif  // CONFIG_EXT_REFS
+};
+
+int vp10_compute_rd_mult(const VP10_COMP *cpi, int qindex) {
+  const int64_t q = vp10_dc_quant(qindex, 0, cpi->common.bit_depth);
+#if CONFIG_VP9_HIGHBITDEPTH
+  int64_t rdmult = 0;
+  switch (cpi->common.bit_depth) {
+    case VPX_BITS_8: rdmult = 88 * q * q / 24; break;
+    case VPX_BITS_10: rdmult = ROUND_POWER_OF_TWO(88 * q * q / 24, 4); break;
+    case VPX_BITS_12: rdmult = ROUND_POWER_OF_TWO(88 * q * q / 24, 8); break;
+    default:
+      assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12");
+      return -1;
+  }
+#else
+  int64_t rdmult = 88 * q * q / 24;
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+  if (cpi->oxcf.pass == 2 && (cpi->common.frame_type != KEY_FRAME)) {
+    const GF_GROUP *const gf_group = &cpi->twopass.gf_group;
+    const FRAME_UPDATE_TYPE frame_type = gf_group->update_type[gf_group->index];
+    const int boost_index = VPXMIN(15, (cpi->rc.gfu_boost / 100));
+
+    rdmult = (rdmult * rd_frame_type_factor[frame_type]) >> 7;
+    rdmult += ((rdmult * rd_boost_factor[boost_index]) >> 7);
+  }
+  if (rdmult < 1) rdmult = 1;
+  return (int)rdmult;
+}
+
+static int compute_rd_thresh_factor(int qindex, vpx_bit_depth_t bit_depth) {
+  double q;
+#if CONFIG_VP9_HIGHBITDEPTH
+  switch (bit_depth) {
+    case VPX_BITS_8: q = vp10_dc_quant(qindex, 0, VPX_BITS_8) / 4.0; break;
+    case VPX_BITS_10: q = vp10_dc_quant(qindex, 0, VPX_BITS_10) / 16.0; break;
+    case VPX_BITS_12: q = vp10_dc_quant(qindex, 0, VPX_BITS_12) / 64.0; break;
+    default:
+      assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12");
+      return -1;
+  }
+#else
+  (void)bit_depth;
+  q = vp10_dc_quant(qindex, 0, VPX_BITS_8) / 4.0;
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+  // TODO(debargha): Adjust the function below.
+  return VPXMAX((int)(pow(q, RD_THRESH_POW) * 5.12), 8);
+}
+
+void vp10_initialize_me_consts(const VP10_COMP *cpi, MACROBLOCK *x,
+                               int qindex) {
+#if CONFIG_VP9_HIGHBITDEPTH
+  switch (cpi->common.bit_depth) {
+    case VPX_BITS_8:
+      x->sadperbit16 = sad_per_bit16lut_8[qindex];
+      x->sadperbit4 = sad_per_bit4lut_8[qindex];
+      break;
+    case VPX_BITS_10:
+      x->sadperbit16 = sad_per_bit16lut_10[qindex];
+      x->sadperbit4 = sad_per_bit4lut_10[qindex];
+      break;
+    case VPX_BITS_12:
+      x->sadperbit16 = sad_per_bit16lut_12[qindex];
+      x->sadperbit4 = sad_per_bit4lut_12[qindex];
+      break;
+    default:
+      assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12");
+  }
+#else
+  (void)cpi;
+  x->sadperbit16 = sad_per_bit16lut_8[qindex];
+  x->sadperbit4 = sad_per_bit4lut_8[qindex];
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+}
+
+static void set_block_thresholds(const VP10_COMMON *cm, RD_OPT *rd) {
+  int i, bsize, segment_id;
+
+  for (segment_id = 0; segment_id < MAX_SEGMENTS; ++segment_id) {
+    const int qindex =
+        clamp(vp10_get_qindex(&cm->seg, segment_id, cm->base_qindex) +
+                  cm->y_dc_delta_q,
+              0, MAXQ);
+    const int q = compute_rd_thresh_factor(qindex, cm->bit_depth);
+
+    for (bsize = 0; bsize < BLOCK_SIZES; ++bsize) {
+      // Threshold here seems unnecessarily harsh but fine given actual
+      // range of values used for cpi->sf.thresh_mult[].
+      const int t = q * rd_thresh_block_size_factor[bsize];
+      const int thresh_max = INT_MAX / t;
+
+      if (bsize >= BLOCK_8X8) {
+        for (i = 0; i < MAX_MODES; ++i)
+          rd->threshes[segment_id][bsize][i] = rd->thresh_mult[i] < thresh_max
+                                                   ? rd->thresh_mult[i] * t / 4
+                                                   : INT_MAX;
+      } else {
+        for (i = 0; i < MAX_REFS; ++i)
+          rd->threshes[segment_id][bsize][i] =
+              rd->thresh_mult_sub8x8[i] < thresh_max
+                  ? rd->thresh_mult_sub8x8[i] * t / 4
+                  : INT_MAX;
+      }
+    }
+  }
+}
+
+#if CONFIG_REF_MV
+void vp10_set_mvcost(MACROBLOCK *x, MV_REFERENCE_FRAME ref_frame) {
+  MB_MODE_INFO_EXT *mbmi_ext = x->mbmi_ext;
+  int nmv_ctx = vp10_nmv_ctx(mbmi_ext->ref_mv_count[ref_frame],
+                             mbmi_ext->ref_mv_stack[ref_frame]);
+  x->mvcost = x->mv_cost_stack[nmv_ctx];
+  x->nmvjointcost = x->nmv_vec_cost[nmv_ctx];
+  x->mvsadcost = x->mvcost;
+  x->nmvjointsadcost = x->nmvjointcost;
+
+  x->nmv_vec_cost[nmv_ctx][MV_JOINT_ZERO] =
+      x->zero_rmv_cost[nmv_ctx][1] - x->zero_rmv_cost[nmv_ctx][0];
+}
+#endif
+
+void vp10_initialize_rd_consts(VP10_COMP *cpi) {
+  VP10_COMMON *const cm = &cpi->common;
+  MACROBLOCK *const x = &cpi->td.mb;
+  RD_OPT *const rd = &cpi->rd;
+  int i;
+
+  vpx_clear_system_state();
+
+  rd->RDDIV = RDDIV_BITS;  // In bits (to multiply D by 128).
+  rd->RDMULT = vp10_compute_rd_mult(cpi, cm->base_qindex + cm->y_dc_delta_q);
+
+  set_error_per_bit(x, rd->RDMULT);
+
+  set_block_thresholds(cm, rd);
+
+  if (!frame_is_intra_only(cm)) {
+#if CONFIG_REF_MV
+    int nmv_ctx;
+
+    for (nmv_ctx = 0; nmv_ctx < NMV_CONTEXTS; ++nmv_ctx) {
+      vpx_prob tmp_prob = cm->fc->nmvc[nmv_ctx].joints[MV_JOINT_ZERO];
+      cm->fc->nmvc[nmv_ctx].joints[MV_JOINT_ZERO] = 1;
+
+      vp10_build_nmv_cost_table(
+          x->nmv_vec_cost[nmv_ctx],
+          cm->allow_high_precision_mv ? x->nmvcost_hp[nmv_ctx]
+                                      : x->nmvcost[nmv_ctx],
+          &cm->fc->nmvc[nmv_ctx], cm->allow_high_precision_mv);
+      cm->fc->nmvc[nmv_ctx].joints[MV_JOINT_ZERO] = tmp_prob;
+
+      x->nmv_vec_cost[nmv_ctx][MV_JOINT_ZERO] = 0;
+      x->zero_rmv_cost[nmv_ctx][0] =
+          vp10_cost_bit(cm->fc->nmvc[nmv_ctx].zero_rmv, 0);
+      x->zero_rmv_cost[nmv_ctx][1] =
+          vp10_cost_bit(cm->fc->nmvc[nmv_ctx].zero_rmv, 1);
+    }
+    x->mvcost = x->mv_cost_stack[0];
+    x->nmvjointcost = x->nmv_vec_cost[0];
+    x->mvsadcost = x->mvcost;
+    x->nmvjointsadcost = x->nmvjointcost;
+#else
+    vp10_build_nmv_cost_table(
+        x->nmvjointcost,
+        cm->allow_high_precision_mv ? x->nmvcost_hp : x->nmvcost, &cm->fc->nmvc,
+        cm->allow_high_precision_mv);
+#endif
+  }
+  if (cpi->oxcf.pass != 1) {
+    vp10_fill_token_costs(x->token_costs,
+#if CONFIG_ANS
+                          cm->fc->coef_cdfs,
+#endif  // CONFIG_ANS
+                          cm->fc->coef_probs);
+
+    if (cpi->sf.partition_search_type != VAR_BASED_PARTITION ||
+        cm->frame_type == KEY_FRAME) {
+#if CONFIG_EXT_PARTITION_TYPES
+      vp10_cost_tokens(cpi->partition_cost[0], cm->fc->partition_prob[0],
+                       vp10_partition_tree);
+      for (i = 1; i < PARTITION_CONTEXTS; ++i)
+        vp10_cost_tokens(cpi->partition_cost[i], cm->fc->partition_prob[i],
+                         vp10_ext_partition_tree);
+#else
+      for (i = 0; i < PARTITION_CONTEXTS; ++i)
+        vp10_cost_tokens(cpi->partition_cost[i], cm->fc->partition_prob[i],
+                         vp10_partition_tree);
+#endif  // CONFIG_EXT_PARTITION_TYPES
+    }
+
+    fill_mode_costs(cpi);
+
+    if (!frame_is_intra_only(cm)) {
+#if CONFIG_REF_MV
+      for (i = 0; i < NEWMV_MODE_CONTEXTS; ++i) {
+        cpi->newmv_mode_cost[i][0] = vp10_cost_bit(cm->fc->newmv_prob[i], 0);
+        cpi->newmv_mode_cost[i][1] = vp10_cost_bit(cm->fc->newmv_prob[i], 1);
+      }
+
+      for (i = 0; i < ZEROMV_MODE_CONTEXTS; ++i) {
+        cpi->zeromv_mode_cost[i][0] = vp10_cost_bit(cm->fc->zeromv_prob[i], 0);
+        cpi->zeromv_mode_cost[i][1] = vp10_cost_bit(cm->fc->zeromv_prob[i], 1);
+      }
+
+      for (i = 0; i < REFMV_MODE_CONTEXTS; ++i) {
+        cpi->refmv_mode_cost[i][0] = vp10_cost_bit(cm->fc->refmv_prob[i], 0);
+        cpi->refmv_mode_cost[i][1] = vp10_cost_bit(cm->fc->refmv_prob[i], 1);
+      }
+
+      for (i = 0; i < DRL_MODE_CONTEXTS; ++i) {
+        cpi->drl_mode_cost0[i][0] = vp10_cost_bit(cm->fc->drl_prob[i], 0);
+        cpi->drl_mode_cost0[i][1] = vp10_cost_bit(cm->fc->drl_prob[i], 1);
+      }
+#if CONFIG_EXT_INTER
+      cpi->new2mv_mode_cost[0] = vp10_cost_bit(cm->fc->new2mv_prob, 0);
+      cpi->new2mv_mode_cost[1] = vp10_cost_bit(cm->fc->new2mv_prob, 1);
+#endif  // CONFIG_EXT_INTER
+#else
+      for (i = 0; i < INTER_MODE_CONTEXTS; ++i)
+        vp10_cost_tokens((int *)cpi->inter_mode_cost[i],
+                         cm->fc->inter_mode_probs[i], vp10_inter_mode_tree);
+#endif  // CONFIG_REF_MV
+#if CONFIG_EXT_INTER
+      for (i = 0; i < INTER_MODE_CONTEXTS; ++i)
+        vp10_cost_tokens((int *)cpi->inter_compound_mode_cost[i],
+                         cm->fc->inter_compound_mode_probs[i],
+                         vp10_inter_compound_mode_tree);
+      for (i = 0; i < BLOCK_SIZE_GROUPS; ++i)
+        vp10_cost_tokens((int *)cpi->interintra_mode_cost[i],
+                         cm->fc->interintra_mode_prob[i],
+                         vp10_interintra_mode_tree);
+#endif  // CONFIG_EXT_INTER
+#if CONFIG_OBMC || CONFIG_WARPED_MOTION
+      for (i = BLOCK_8X8; i < BLOCK_SIZES; i++) {
+        vp10_cost_tokens((int *)cpi->motvar_cost[i], cm->fc->motvar_prob[i],
+                         vp10_motvar_tree);
+      }
+#endif  // CONFIG_OBMC || CONFIG_WARPED_MOTION
+    }
+  }
+}
+
+static void model_rd_norm(int xsq_q10, int *r_q10, int *d_q10) {
+  // NOTE: The tables below must be of the same size.
+
+  // The functions described below are sampled at the four most significant
+  // bits of x^2 + 8 / 256.
+
+  // Normalized rate:
+  // This table models the rate for a Laplacian source with given variance
+  // when quantized with a uniform quantizer with given stepsize. The
+  // closed form expression is:
+  // Rn(x) = H(sqrt(r)) + sqrt(r)*[1 + H(r)/(1 - r)],
+  // where r = exp(-sqrt(2) * x) and x = qpstep / sqrt(variance),
+  // and H(x) is the binary entropy function.
+  static const int rate_tab_q10[] = {
+    65536, 6086, 5574, 5275, 5063, 4899, 4764, 4651, 4553, 4389, 4255, 4142,
+    4044,  3958, 3881, 3811, 3748, 3635, 3538, 3453, 3376, 3307, 3244, 3186,
+    3133,  3037, 2952, 2877, 2809, 2747, 2690, 2638, 2589, 2501, 2423, 2353,
+    2290,  2232, 2179, 2130, 2084, 2001, 1928, 1862, 1802, 1748, 1698, 1651,
+    1608,  1530, 1460, 1398, 1342, 1290, 1243, 1199, 1159, 1086, 1021, 963,
+    911,   864,  821,  781,  745,  680,  623,  574,  530,  490,  455,  424,
+    395,   345,  304,  269,  239,  213,  190,  171,  154,  126,  104,  87,
+    73,    61,   52,   44,   38,   28,   21,   16,   12,   10,   8,    6,
+    5,     3,    2,    1,    1,    1,    0,    0,
+  };
+  // Normalized distortion:
+  // This table models the normalized distortion for a Laplacian source
+  // with given variance when quantized with a uniform quantizer
+  // with given stepsize. The closed form expression is:
+  // Dn(x) = 1 - 1/sqrt(2) * x / sinh(x/sqrt(2))
+  // where x = qpstep / sqrt(variance).
+  // Note the actual distortion is Dn * variance.
+  static const int dist_tab_q10[] = {
+    0,    0,    1,    1,    1,    2,    2,    2,    3,    3,    4,    5,
+    5,    6,    7,    7,    8,    9,    11,   12,   13,   15,   16,   17,
+    18,   21,   24,   26,   29,   31,   34,   36,   39,   44,   49,   54,
+    59,   64,   69,   73,   78,   88,   97,   106,  115,  124,  133,  142,
+    151,  167,  184,  200,  215,  231,  245,  260,  274,  301,  327,  351,
+    375,  397,  418,  439,  458,  495,  528,  559,  587,  613,  637,  659,
+    680,  717,  749,  777,  801,  823,  842,  859,  874,  899,  919,  936,
+    949,  960,  969,  977,  983,  994,  1001, 1006, 1010, 1013, 1015, 1017,
+    1018, 1020, 1022, 1022, 1023, 1023, 1023, 1024,
+  };
+  static const int xsq_iq_q10[] = {
+    0,      4,      8,      12,     16,     20,     24,     28,     32,
+    40,     48,     56,     64,     72,     80,     88,     96,     112,
+    128,    144,    160,    176,    192,    208,    224,    256,    288,
+    320,    352,    384,    416,    448,    480,    544,    608,    672,
+    736,    800,    864,    928,    992,    1120,   1248,   1376,   1504,
+    1632,   1760,   1888,   2016,   2272,   2528,   2784,   3040,   3296,
+    3552,   3808,   4064,   4576,   5088,   5600,   6112,   6624,   7136,
+    7648,   8160,   9184,   10208,  11232,  12256,  13280,  14304,  15328,
+    16352,  18400,  20448,  22496,  24544,  26592,  28640,  30688,  32736,
+    36832,  40928,  45024,  49120,  53216,  57312,  61408,  65504,  73696,
+    81888,  90080,  98272,  106464, 114656, 122848, 131040, 147424, 163808,
+    180192, 196576, 212960, 229344, 245728,
+  };
+  const int tmp = (xsq_q10 >> 2) + 8;
+  const int k = get_msb(tmp) - 3;
+  const int xq = (k << 3) + ((tmp >> k) & 0x7);
+  const int one_q10 = 1 << 10;
+  const int a_q10 = ((xsq_q10 - xsq_iq_q10[xq]) << 10) >> (2 + k);
+  const int b_q10 = one_q10 - a_q10;
+  *r_q10 = (rate_tab_q10[xq] * b_q10 + rate_tab_q10[xq + 1] * a_q10) >> 10;
+  *d_q10 = (dist_tab_q10[xq] * b_q10 + dist_tab_q10[xq + 1] * a_q10) >> 10;
+}
+
+void vp10_model_rd_from_var_lapndz(int64_t var, unsigned int n_log2,
+                                   unsigned int qstep, int *rate,
+                                   int64_t *dist) {
+  // This function models the rate and distortion for a Laplacian
+  // source with given variance when quantized with a uniform quantizer
+  // with given stepsize. The closed form expressions are in:
+  // Hang and Chen, "Source Model for transform video coder and its
+  // application - Part I: Fundamental Theory", IEEE Trans. Circ.
+  // Sys. for Video Tech., April 1997.
+  if (var == 0) {
+    *rate = 0;
+    *dist = 0;
+  } else {
+    int d_q10, r_q10;
+    static const uint32_t MAX_XSQ_Q10 = 245727;
+    const uint64_t xsq_q10_64 =
+        (((uint64_t)qstep * qstep << (n_log2 + 10)) + (var >> 1)) / var;
+    const int xsq_q10 = (int)VPXMIN(xsq_q10_64, MAX_XSQ_Q10);
+    model_rd_norm(xsq_q10, &r_q10, &d_q10);
+    *rate = ROUND_POWER_OF_TWO(r_q10 << n_log2, 10 - VP10_PROB_COST_SHIFT);
+    *dist = (var * (int64_t)d_q10 + 512) >> 10;
+  }
+}
+
+static void get_entropy_contexts_plane(
+    BLOCK_SIZE plane_bsize, TX_SIZE tx_size, const struct macroblockd_plane *pd,
+    ENTROPY_CONTEXT t_above[2 * MAX_MIB_SIZE],
+    ENTROPY_CONTEXT t_left[2 * MAX_MIB_SIZE]) {
+  const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize];
+  const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize];
+  const ENTROPY_CONTEXT *const above = pd->above_context;
+  const ENTROPY_CONTEXT *const left = pd->left_context;
+
+  int i;
+  switch (tx_size) {
+    case TX_4X4:
+      memcpy(t_above, above, sizeof(ENTROPY_CONTEXT) * num_4x4_w);
+      memcpy(t_left, left, sizeof(ENTROPY_CONTEXT) * num_4x4_h);
+      break;
+    case TX_8X8:
+      for (i = 0; i < num_4x4_w; i += 2)
+        t_above[i] = !!*(const uint16_t *)&above[i];
+      for (i = 0; i < num_4x4_h; i += 2)
+        t_left[i] = !!*(const uint16_t *)&left[i];
+      break;
+    case TX_16X16:
+      for (i = 0; i < num_4x4_w; i += 4)
+        t_above[i] = !!*(const uint32_t *)&above[i];
+      for (i = 0; i < num_4x4_h; i += 4)
+        t_left[i] = !!*(const uint32_t *)&left[i];
+      break;
+    case TX_32X32:
+      for (i = 0; i < num_4x4_w; i += 8)
+        t_above[i] = !!*(const uint64_t *)&above[i];
+      for (i = 0; i < num_4x4_h; i += 8)
+        t_left[i] = !!*(const uint64_t *)&left[i];
+      break;
+#if CONFIG_EXT_TX && CONFIG_RECT_TX
+    case TX_4X8:
+      memcpy(t_above, above, sizeof(ENTROPY_CONTEXT) * num_4x4_w);
+      for (i = 0; i < num_4x4_h; i += 2)
+        t_left[i] = !!*(const uint16_t *)&left[i];
+      break;
+    case TX_8X4:
+      for (i = 0; i < num_4x4_w; i += 2)
+        t_above[i] = !!*(const uint16_t *)&above[i];
+      memcpy(t_left, left, sizeof(ENTROPY_CONTEXT) * num_4x4_h);
+      break;
+    case TX_8X16:
+      for (i = 0; i < num_4x4_w; i += 2)
+        t_above[i] = !!*(const uint16_t *)&above[i];
+      for (i = 0; i < num_4x4_h; i += 4)
+        t_left[i] = !!*(const uint32_t *)&left[i];
+      break;
+    case TX_16X8:
+      for (i = 0; i < num_4x4_w; i += 4)
+        t_above[i] = !!*(const uint32_t *)&above[i];
+      for (i = 0; i < num_4x4_h; i += 2)
+        t_left[i] = !!*(const uint16_t *)&left[i];
+      break;
+    case TX_16X32:
+      for (i = 0; i < num_4x4_w; i += 4)
+        t_above[i] = !!*(const uint32_t *)&above[i];
+      for (i = 0; i < num_4x4_h; i += 8)
+        t_left[i] = !!*(const uint64_t *)&left[i];
+      break;
+    case TX_32X16:
+      for (i = 0; i < num_4x4_w; i += 8)
+        t_above[i] = !!*(const uint64_t *)&above[i];
+      for (i = 0; i < num_4x4_h; i += 4)
+        t_left[i] = !!*(const uint32_t *)&left[i];
+      break;
+#endif  // CONFIG_EXT_TX && CONFIG_RECT_TX
+    default: assert(0 && "Invalid transform size."); break;
+  }
+}
+
+void vp10_get_entropy_contexts(BLOCK_SIZE bsize, TX_SIZE tx_size,
+                               const struct macroblockd_plane *pd,
+                               ENTROPY_CONTEXT t_above[2 * MAX_MIB_SIZE],
+                               ENTROPY_CONTEXT t_left[2 * MAX_MIB_SIZE]) {
+  const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd);
+  get_entropy_contexts_plane(plane_bsize, tx_size, pd, t_above, t_left);
+}
+
+void vp10_mv_pred(VP10_COMP *cpi, MACROBLOCK *x, uint8_t *ref_y_buffer,
+                  int ref_y_stride, int ref_frame, BLOCK_SIZE block_size) {
+  int i;
+  int zero_seen = 0;
+  int best_index = 0;
+  int best_sad = INT_MAX;
+  int this_sad = INT_MAX;
+  int max_mv = 0;
+  int near_same_nearest;
+  uint8_t *src_y_ptr = x->plane[0].src.buf;
+  uint8_t *ref_y_ptr;
+  const int num_mv_refs =
+      MAX_MV_REF_CANDIDATES +
+      (cpi->sf.adaptive_motion_search && block_size < x->max_partition_size);
+
+  MV pred_mv[3];
+  pred_mv[0] = x->mbmi_ext->ref_mvs[ref_frame][0].as_mv;
+  pred_mv[1] = x->mbmi_ext->ref_mvs[ref_frame][1].as_mv;
+  pred_mv[2] = x->pred_mv[ref_frame];
+  assert(num_mv_refs <= (int)(sizeof(pred_mv) / sizeof(pred_mv[0])));
+
+  near_same_nearest = x->mbmi_ext->ref_mvs[ref_frame][0].as_int ==
+                      x->mbmi_ext->ref_mvs[ref_frame][1].as_int;
+  // Get the sad for each candidate reference mv.
+  for (i = 0; i < num_mv_refs; ++i) {
+    const MV *this_mv = &pred_mv[i];
+    int fp_row, fp_col;
+
+    if (i == 1 && near_same_nearest) continue;
+    fp_row = (this_mv->row + 3 + (this_mv->row >= 0)) >> 3;
+    fp_col = (this_mv->col + 3 + (this_mv->col >= 0)) >> 3;
+    max_mv = VPXMAX(max_mv, VPXMAX(abs(this_mv->row), abs(this_mv->col)) >> 3);
+
+    if (fp_row == 0 && fp_col == 0 && zero_seen) continue;
+    zero_seen |= (fp_row == 0 && fp_col == 0);
+
+    ref_y_ptr = &ref_y_buffer[ref_y_stride * fp_row + fp_col];
+    // Find sad for current vector.
+    this_sad = cpi->fn_ptr[block_size].sdf(src_y_ptr, x->plane[0].src.stride,
+                                           ref_y_ptr, ref_y_stride);
+    // Note if it is the best so far.
+    if (this_sad < best_sad) {
+      best_sad = this_sad;
+      best_index = i;
+    }
+  }
+
+  // Note the index of the mv that worked best in the reference list.
+  x->mv_best_ref_index[ref_frame] = best_index;
+  x->max_mv_context[ref_frame] = max_mv;
+  x->pred_mv_sad[ref_frame] = best_sad;
+}
+
+void vp10_setup_pred_block(const MACROBLOCKD *xd,
+                           struct buf_2d dst[MAX_MB_PLANE],
+                           const YV12_BUFFER_CONFIG *src, int mi_row,
+                           int mi_col, const struct scale_factors *scale,
+                           const struct scale_factors *scale_uv) {
+  int i;
+
+  dst[0].buf = src->y_buffer;
+  dst[0].stride = src->y_stride;
+  dst[1].buf = src->u_buffer;
+  dst[2].buf = src->v_buffer;
+  dst[1].stride = dst[2].stride = src->uv_stride;
+
+  for (i = 0; i < MAX_MB_PLANE; ++i) {
+    setup_pred_plane(dst + i, dst[i].buf,
+                     i ? src->uv_crop_width : src->y_crop_width,
+                     i ? src->uv_crop_height : src->y_crop_height,
+                     dst[i].stride, mi_row, mi_col, i ? scale_uv : scale,
+                     xd->plane[i].subsampling_x, xd->plane[i].subsampling_y);
+  }
+}
+
+int vp10_raster_block_offset(BLOCK_SIZE plane_bsize, int raster_block,
+                             int stride) {
+  const int bw = b_width_log2_lookup[plane_bsize];
+  const int y = 4 * (raster_block >> bw);
+  const int x = 4 * (raster_block & ((1 << bw) - 1));
+  return y * stride + x;
+}
+
+int16_t *vp10_raster_block_offset_int16(BLOCK_SIZE plane_bsize,
+                                        int raster_block, int16_t *base) {
+  const int stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
+  return base + vp10_raster_block_offset(plane_bsize, raster_block, stride);
+}
+
+YV12_BUFFER_CONFIG *vp10_get_scaled_ref_frame(const VP10_COMP *cpi,
+                                              int ref_frame) {
+  const VP10_COMMON *const cm = &cpi->common;
+  const int scaled_idx = cpi->scaled_ref_idx[ref_frame - 1];
+  const int ref_idx = get_ref_frame_buf_idx(cpi, ref_frame);
+  return (scaled_idx != ref_idx && scaled_idx != INVALID_IDX)
+             ? &cm->buffer_pool->frame_bufs[scaled_idx].buf
+             : NULL;
+}
+
+#if CONFIG_DUAL_FILTER
+int vp10_get_switchable_rate(const VP10_COMP *cpi,
+                             const MACROBLOCKD *const xd) {
+  const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
+  int inter_filter_cost = 0;
+  int dir;
+
+  for (dir = 0; dir < 2; ++dir) {
+    if (has_subpel_mv_component(xd->mi[0], xd, dir) ||
+        (mbmi->ref_frame[1] > INTRA_FRAME &&
+         has_subpel_mv_component(xd->mi[0], xd, dir + 2))) {
+      const int ctx = vp10_get_pred_context_switchable_interp(xd, dir);
+      inter_filter_cost +=
+          cpi->switchable_interp_costs[ctx][mbmi->interp_filter[dir]];
+    }
+  }
+  return SWITCHABLE_INTERP_RATE_FACTOR * inter_filter_cost;
+}
+#else
+int vp10_get_switchable_rate(const VP10_COMP *cpi,
+                             const MACROBLOCKD *const xd) {
+  const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
+  const int ctx = vp10_get_pred_context_switchable_interp(xd);
+#if CONFIG_EXT_INTERP
+  if (!vp10_is_interp_needed(xd)) return 0;
+#endif  // CONFIG_EXT_INTERP
+  return SWITCHABLE_INTERP_RATE_FACTOR *
+         cpi->switchable_interp_costs[ctx][mbmi->interp_filter];
+}
+#endif
+
+void vp10_set_rd_speed_thresholds(VP10_COMP *cpi) {
+  int i;
+  RD_OPT *const rd = &cpi->rd;
+  SPEED_FEATURES *const sf = &cpi->sf;
+
+  // Set baseline threshold values.
+  for (i = 0; i < MAX_MODES; ++i)
+    rd->thresh_mult[i] = cpi->oxcf.mode == BEST ? -500 : 0;
+
+  if (sf->adaptive_rd_thresh) {
+    rd->thresh_mult[THR_NEARESTMV] = 300;
+#if CONFIG_EXT_REFS
+    rd->thresh_mult[THR_NEARESTL2] = 300;
+    rd->thresh_mult[THR_NEARESTL3] = 300;
+    rd->thresh_mult[THR_NEARESTB] = 300;
+#endif  // CONFIG_EXT_REFS
+    rd->thresh_mult[THR_NEARESTA] = 300;
+    rd->thresh_mult[THR_NEARESTG] = 300;
+  } else {
+    rd->thresh_mult[THR_NEARESTMV] = 0;
+#if CONFIG_EXT_REFS
+    rd->thresh_mult[THR_NEARESTL2] = 0;
+    rd->thresh_mult[THR_NEARESTL3] = 0;
+    rd->thresh_mult[THR_NEARESTB] = 0;
+#endif  // CONFIG_EXT_REFS
+    rd->thresh_mult[THR_NEARESTA] = 0;
+    rd->thresh_mult[THR_NEARESTG] = 0;
+  }
+
+  rd->thresh_mult[THR_DC] += 1000;
+
+  rd->thresh_mult[THR_NEWMV] += 1000;
+#if CONFIG_EXT_REFS
+  rd->thresh_mult[THR_NEWL2] += 1000;
+  rd->thresh_mult[THR_NEWL3] += 1000;
+  rd->thresh_mult[THR_NEWB] += 1000;
+#endif  // CONFIG_EXT_REFS
+  rd->thresh_mult[THR_NEWA] += 1000;
+  rd->thresh_mult[THR_NEWG] += 1000;
+
+  rd->thresh_mult[THR_NEARMV] += 1000;
+#if CONFIG_EXT_REFS
+  rd->thresh_mult[THR_NEARL2] += 1000;
+  rd->thresh_mult[THR_NEARL3] += 1000;
+  rd->thresh_mult[THR_NEARB] += 1000;
+#endif  // CONFIG_EXT_REFS
+  rd->thresh_mult[THR_NEARA] += 1000;
+  rd->thresh_mult[THR_NEARG] += 1000;
+
+#if CONFIG_EXT_INTER
+  rd->thresh_mult[THR_NEWFROMNEARMV] += 1000;
+#if CONFIG_EXT_REFS
+  rd->thresh_mult[THR_NEWFROMNEARL2] += 1000;
+  rd->thresh_mult[THR_NEWFROMNEARL3] += 1000;
+  rd->thresh_mult[THR_NEWFROMNEARB] += 1000;
+#endif  // CONFIG_EXT_REFS
+  rd->thresh_mult[THR_NEWFROMNEARA] += 1000;
+  rd->thresh_mult[THR_NEWFROMNEARG] += 1000;
+#endif  // CONFIG_EXT_INTER
+
+  rd->thresh_mult[THR_ZEROMV] += 2000;
+#if CONFIG_EXT_REFS
+  rd->thresh_mult[THR_ZEROL2] += 2000;
+  rd->thresh_mult[THR_ZEROL3] += 2000;
+  rd->thresh_mult[THR_ZEROB] += 2000;
+#endif  // CONFIG_EXT_REFS
+  rd->thresh_mult[THR_ZEROG] += 2000;
+  rd->thresh_mult[THR_ZEROA] += 2000;
+
+  rd->thresh_mult[THR_TM] += 1000;
+
+#if CONFIG_EXT_INTER
+
+  rd->thresh_mult[THR_COMP_NEAREST_NEARESTLA] += 1000;
+#if CONFIG_EXT_REFS
+  rd->thresh_mult[THR_COMP_NEAREST_NEARESTL2A] += 1000;
+  rd->thresh_mult[THR_COMP_NEAREST_NEARESTL3A] += 1000;
+#endif  // CONFIG_EXT_REFS
+  rd->thresh_mult[THR_COMP_NEAREST_NEARESTGA] += 1000;
+#if CONFIG_EXT_REFS
+  rd->thresh_mult[THR_COMP_NEAREST_NEARESTLB] += 1000;
+  rd->thresh_mult[THR_COMP_NEAREST_NEARESTL2B] += 1000;
+  rd->thresh_mult[THR_COMP_NEAREST_NEARESTL3B] += 1000;
+  rd->thresh_mult[THR_COMP_NEAREST_NEARESTGB] += 1000;
+#endif  // CONFIG_EXT_REFS
+
+#else  // CONFIG_EXT_INTER
+
+  rd->thresh_mult[THR_COMP_NEARESTLA] += 1000;
+#if CONFIG_EXT_REFS
+  rd->thresh_mult[THR_COMP_NEARESTL2A] += 1000;
+  rd->thresh_mult[THR_COMP_NEARESTL3A] += 1000;
+#endif  // CONFIG_EXT_REFS
+  rd->thresh_mult[THR_COMP_NEARESTGA] += 1000;
+#if CONFIG_EXT_REFS
+  rd->thresh_mult[THR_COMP_NEARESTLB] += 1000;
+  rd->thresh_mult[THR_COMP_NEARESTL2B] += 1000;
+  rd->thresh_mult[THR_COMP_NEARESTL3B] += 1000;
+  rd->thresh_mult[THR_COMP_NEARESTGB] += 1000;
+#endif  // CONFIG_EXT_REFS
+
+#endif  // CONFIG_EXT_INTER
+
+#if CONFIG_EXT_INTER
+
+  rd->thresh_mult[THR_COMP_NEAREST_NEARLA] += 1200;
+  rd->thresh_mult[THR_COMP_NEAR_NEARESTLA] += 1200;
+  rd->thresh_mult[THR_COMP_NEAR_NEARLA] += 1200;
+  rd->thresh_mult[THR_COMP_NEAREST_NEWLA] += 1500;
+  rd->thresh_mult[THR_COMP_NEW_NEARESTLA] += 1500;
+  rd->thresh_mult[THR_COMP_NEAR_NEWLA] += 1700;
+  rd->thresh_mult[THR_COMP_NEW_NEARLA] += 1700;
+  rd->thresh_mult[THR_COMP_NEW_NEWLA] += 2000;
+  rd->thresh_mult[THR_COMP_ZERO_ZEROLA] += 2500;
+
+#if CONFIG_EXT_REFS
+  rd->thresh_mult[THR_COMP_NEAREST_NEARL2A] += 1200;
+  rd->thresh_mult[THR_COMP_NEAR_NEARESTL2A] += 1200;
+  rd->thresh_mult[THR_COMP_NEAR_NEARL2A] += 1200;
+  rd->thresh_mult[THR_COMP_NEAREST_NEWL2A] += 1500;
+  rd->thresh_mult[THR_COMP_NEW_NEARESTL2A] += 1500;
+  rd->thresh_mult[THR_COMP_NEAR_NEWL2A] += 1700;
+  rd->thresh_mult[THR_COMP_NEW_NEARL2A] += 1700;
+  rd->thresh_mult[THR_COMP_NEW_NEWL2A] += 2000;
+  rd->thresh_mult[THR_COMP_ZERO_ZEROL2A] += 2500;
+
+  rd->thresh_mult[THR_COMP_NEAREST_NEARL3A] += 1200;
+  rd->thresh_mult[THR_COMP_NEAR_NEARESTL3A] += 1200;
+  rd->thresh_mult[THR_COMP_NEAR_NEARL3A] += 1200;
+  rd->thresh_mult[THR_COMP_NEAREST_NEWL3A] += 1500;
+  rd->thresh_mult[THR_COMP_NEW_NEARESTL3A] += 1500;
+  rd->thresh_mult[THR_COMP_NEAR_NEWL3A] += 1700;
+  rd->thresh_mult[THR_COMP_NEW_NEARL3A] += 1700;
+  rd->thresh_mult[THR_COMP_NEW_NEWL3A] += 2000;
+  rd->thresh_mult[THR_COMP_ZERO_ZEROL3A] += 2500;
+#endif  // CONFIG_EXT_REFS
+
+  rd->thresh_mult[THR_COMP_NEAREST_NEARGA] += 1200;
+  rd->thresh_mult[THR_COMP_NEAR_NEARESTGA] += 1200;
+  rd->thresh_mult[THR_COMP_NEAR_NEARGA] += 1200;
+  rd->thresh_mult[THR_COMP_NEAREST_NEWGA] += 1500;
+  rd->thresh_mult[THR_COMP_NEW_NEARESTGA] += 1500;
+  rd->thresh_mult[THR_COMP_NEAR_NEWGA] += 1700;
+  rd->thresh_mult[THR_COMP_NEW_NEARGA] += 1700;
+  rd->thresh_mult[THR_COMP_NEW_NEWGA] += 2000;
+  rd->thresh_mult[THR_COMP_ZERO_ZEROGA] += 2500;
+
+#if CONFIG_EXT_REFS
+  rd->thresh_mult[THR_COMP_NEAREST_NEARLB] += 1200;
+  rd->thresh_mult[THR_COMP_NEAR_NEARESTLB] += 1200;
+  rd->thresh_mult[THR_COMP_NEAR_NEARLB] += 1200;
+  rd->thresh_mult[THR_COMP_NEAREST_NEWLB] += 1500;
+  rd->thresh_mult[THR_COMP_NEW_NEARESTLB] += 1500;
+  rd->thresh_mult[THR_COMP_NEAR_NEWLB] += 1700;
+  rd->thresh_mult[THR_COMP_NEW_NEARLB] += 1700;
+  rd->thresh_mult[THR_COMP_NEW_NEWLB] += 2000;
+  rd->thresh_mult[THR_COMP_ZERO_ZEROLB] += 2500;
+
+  rd->thresh_mult[THR_COMP_NEAREST_NEARL2B] += 1200;
+  rd->thresh_mult[THR_COMP_NEAR_NEARESTL2B] += 1200;
+  rd->thresh_mult[THR_COMP_NEAR_NEARL2B] += 1200;
+  rd->thresh_mult[THR_COMP_NEAREST_NEWL2B] += 1500;
+  rd->thresh_mult[THR_COMP_NEW_NEARESTL2B] += 1500;
+  rd->thresh_mult[THR_COMP_NEAR_NEWL2B] += 1700;
+  rd->thresh_mult[THR_COMP_NEW_NEARL2B] += 1700;
+  rd->thresh_mult[THR_COMP_NEW_NEWL2B] += 2000;
+  rd->thresh_mult[THR_COMP_ZERO_ZEROL2B] += 2500;
+
+  rd->thresh_mult[THR_COMP_NEAREST_NEARL3B] += 1200;
+  rd->thresh_mult[THR_COMP_NEAR_NEARESTL3B] += 1200;
+  rd->thresh_mult[THR_COMP_NEAR_NEARL3B] += 1200;
+  rd->thresh_mult[THR_COMP_NEAREST_NEWL3B] += 1500;
+  rd->thresh_mult[THR_COMP_NEW_NEARESTL3B] += 1500;
+  rd->thresh_mult[THR_COMP_NEAR_NEWL3B] += 1700;
+  rd->thresh_mult[THR_COMP_NEW_NEARL3B] += 1700;
+  rd->thresh_mult[THR_COMP_NEW_NEWL3B] += 2000;
+  rd->thresh_mult[THR_COMP_ZERO_ZEROL3B] += 2500;
+
+  rd->thresh_mult[THR_COMP_NEAREST_NEARGB] += 1200;
+  rd->thresh_mult[THR_COMP_NEAR_NEARESTGB] += 1200;
+  rd->thresh_mult[THR_COMP_NEAR_NEARGB] += 1200;
+  rd->thresh_mult[THR_COMP_NEAREST_NEWGB] += 1500;
+  rd->thresh_mult[THR_COMP_NEW_NEARESTGB] += 1500;
+  rd->thresh_mult[THR_COMP_NEAR_NEWGB] += 1700;
+  rd->thresh_mult[THR_COMP_NEW_NEARGB] += 1700;
+  rd->thresh_mult[THR_COMP_NEW_NEWGB] += 2000;
+  rd->thresh_mult[THR_COMP_ZERO_ZEROGB] += 2500;
+#endif  // CONFIG_EXT_REFS
+
+#else  // CONFIG_EXT_INTER
+
+  rd->thresh_mult[THR_COMP_NEARLA] += 1500;
+  rd->thresh_mult[THR_COMP_NEWLA] += 2000;
+#if CONFIG_EXT_REFS
+  rd->thresh_mult[THR_COMP_NEARL2A] += 1500;
+  rd->thresh_mult[THR_COMP_NEWL2A] += 2000;
+  rd->thresh_mult[THR_COMP_NEARL3A] += 1500;
+  rd->thresh_mult[THR_COMP_NEWL3A] += 2000;
+#endif  // CONFIG_EXT_REFS
+  rd->thresh_mult[THR_COMP_NEARGA] += 1500;
+  rd->thresh_mult[THR_COMP_NEWGA] += 2000;
+
+#if CONFIG_EXT_REFS
+  rd->thresh_mult[THR_COMP_NEARLB] += 1500;
+  rd->thresh_mult[THR_COMP_NEWLB] += 2000;
+  rd->thresh_mult[THR_COMP_NEARL2B] += 1500;
+  rd->thresh_mult[THR_COMP_NEWL2B] += 2000;
+  rd->thresh_mult[THR_COMP_NEARL3B] += 1500;
+  rd->thresh_mult[THR_COMP_NEWL3B] += 2000;
+  rd->thresh_mult[THR_COMP_NEARGB] += 1500;
+  rd->thresh_mult[THR_COMP_NEWGB] += 2000;
+#endif  // CONFIG_EXT_REFS
+
+  rd->thresh_mult[THR_COMP_ZEROLA] += 2500;
+#if CONFIG_EXT_REFS
+  rd->thresh_mult[THR_COMP_ZEROL2A] += 2500;
+  rd->thresh_mult[THR_COMP_ZEROL3A] += 2500;
+#endif  // CONFIG_EXT_REFS
+  rd->thresh_mult[THR_COMP_ZEROGA] += 2500;
+
+#if CONFIG_EXT_REFS
+  rd->thresh_mult[THR_COMP_ZEROLB] += 2500;
+  rd->thresh_mult[THR_COMP_ZEROL2B] += 2500;
+  rd->thresh_mult[THR_COMP_ZEROL3B] += 2500;
+  rd->thresh_mult[THR_COMP_ZEROGB] += 2500;
+#endif  // CONFIG_EXT_REFS
+
+#endif  // CONFIG_EXT_INTER
+
+  rd->thresh_mult[THR_H_PRED] += 2000;
+  rd->thresh_mult[THR_V_PRED] += 2000;
+  rd->thresh_mult[THR_D135_PRED] += 2500;
+  rd->thresh_mult[THR_D207_PRED] += 2500;
+  rd->thresh_mult[THR_D153_PRED] += 2500;
+  rd->thresh_mult[THR_D63_PRED] += 2500;
+  rd->thresh_mult[THR_D117_PRED] += 2500;
+  rd->thresh_mult[THR_D45_PRED] += 2500;
+
+#if CONFIG_EXT_INTER
+  rd->thresh_mult[THR_COMP_INTERINTRA_ZEROL] += 1500;
+  rd->thresh_mult[THR_COMP_INTERINTRA_NEARESTL] += 1500;
+  rd->thresh_mult[THR_COMP_INTERINTRA_NEARL] += 1500;
+  rd->thresh_mult[THR_COMP_INTERINTRA_NEWL] += 2000;
+
+#if CONFIG_EXT_REFS
+  rd->thresh_mult[THR_COMP_INTERINTRA_ZEROL2] += 1500;
+  rd->thresh_mult[THR_COMP_INTERINTRA_NEARESTL2] += 1500;
+  rd->thresh_mult[THR_COMP_INTERINTRA_NEARL2] += 1500;
+  rd->thresh_mult[THR_COMP_INTERINTRA_NEWL2] += 2000;
+
+  rd->thresh_mult[THR_COMP_INTERINTRA_ZEROL3] += 1500;
+  rd->thresh_mult[THR_COMP_INTERINTRA_NEARESTL3] += 1500;
+  rd->thresh_mult[THR_COMP_INTERINTRA_NEARL3] += 1500;
+  rd->thresh_mult[THR_COMP_INTERINTRA_NEWL3] += 2000;
+#endif  // CONFIG_EXT_REFS
+
+  rd->thresh_mult[THR_COMP_INTERINTRA_ZEROG] += 1500;
+  rd->thresh_mult[THR_COMP_INTERINTRA_NEARESTG] += 1500;
+  rd->thresh_mult[THR_COMP_INTERINTRA_NEARG] += 1500;
+  rd->thresh_mult[THR_COMP_INTERINTRA_NEWG] += 2000;
+
+#if CONFIG_EXT_REFS
+  rd->thresh_mult[THR_COMP_INTERINTRA_ZEROB] += 1500;
+  rd->thresh_mult[THR_COMP_INTERINTRA_NEARESTB] += 1500;
+  rd->thresh_mult[THR_COMP_INTERINTRA_NEARB] += 1500;
+  rd->thresh_mult[THR_COMP_INTERINTRA_NEWB] += 2000;
+#endif  // CONFIG_EXT_REFS
+
+  rd->thresh_mult[THR_COMP_INTERINTRA_ZEROA] += 1500;
+  rd->thresh_mult[THR_COMP_INTERINTRA_NEARESTA] += 1500;
+  rd->thresh_mult[THR_COMP_INTERINTRA_NEARA] += 1500;
+  rd->thresh_mult[THR_COMP_INTERINTRA_NEWA] += 2000;
+#endif  // CONFIG_EXT_INTER
+}
+
+void vp10_set_rd_speed_thresholds_sub8x8(VP10_COMP *cpi) {
+  static const int thresh_mult[2][MAX_REFS] = {
+#if CONFIG_EXT_REFS
+    { 2500, 2500, 2500, 2500, 2500, 2500, 4500, 4500, 4500, 4500, 4500, 4500,
+      4500, 4500, 2500 },
+    { 2000, 2000, 2000, 2000, 2000, 2000, 4000, 4000, 4000, 4000, 4000, 4000,
+      4000, 4000, 2000 }
+#else
+    { 2500, 2500, 2500, 4500, 4500, 2500 },
+    { 2000, 2000, 2000, 4000, 4000, 2000 }
+#endif  // CONFIG_EXT_REFS
+  };
+  RD_OPT *const rd = &cpi->rd;
+  const int idx = cpi->oxcf.mode == BEST;
+  memcpy(rd->thresh_mult_sub8x8, thresh_mult[idx], sizeof(thresh_mult[idx]));
+}
+
+void vp10_update_rd_thresh_fact(const VP10_COMMON *const cm,
+                                int (*factor_buf)[MAX_MODES], int rd_thresh,
+                                int bsize, int best_mode_index) {
+  if (rd_thresh > 0) {
+    const int top_mode = bsize < BLOCK_8X8 ? MAX_REFS : MAX_MODES;
+    int mode;
+    for (mode = 0; mode < top_mode; ++mode) {
+      const BLOCK_SIZE min_size = VPXMAX(bsize - 1, BLOCK_4X4);
+      const BLOCK_SIZE max_size = VPXMIN(bsize + 2, cm->sb_size);
+      BLOCK_SIZE bs;
+      for (bs = min_size; bs <= max_size; ++bs) {
+        int *const fact = &factor_buf[bs][mode];
+        if (mode == best_mode_index) {
+          *fact -= (*fact >> 4);
+        } else {
+          *fact = VPXMIN(*fact + RD_THRESH_INC, rd_thresh * RD_THRESH_MAX_FACT);
+        }
+      }
+    }
+  }
+}
+
+int vp10_get_intra_cost_penalty(int qindex, int qdelta,
+                                vpx_bit_depth_t bit_depth) {
+  const int q = vp10_dc_quant(qindex, qdelta, bit_depth);
+#if CONFIG_VP9_HIGHBITDEPTH
+  switch (bit_depth) {
+    case VPX_BITS_8: return 20 * q;
+    case VPX_BITS_10: return 5 * q;
+    case VPX_BITS_12: return ROUND_POWER_OF_TWO(5 * q, 2);
+    default:
+      assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12");
+      return -1;
+  }
+#else
+  return 20 * q;
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+}
diff --git a/av1/encoder/rd.h b/av1/encoder/rd.h
new file mode 100644
index 0000000..9680215
--- /dev/null
+++ b/av1/encoder/rd.h
@@ -0,0 +1,464 @@
+/*
+ *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP10_ENCODER_RD_H_
+#define VP10_ENCODER_RD_H_
+
+#include <limits.h>
+
+#if CONFIG_ANS
+#include "av1/common/ans.h"
+#endif  // CONFIG_ANS
+#include "av1/common/blockd.h"
+
+#include "av1/encoder/block.h"
+#include "av1/encoder/context_tree.h"
+#include "av1/encoder/cost.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define RDDIV_BITS 7
+#define RD_EPB_SHIFT 6
+
+#define RDCOST(RM, DM, R, D) \
+  (ROUND_POWER_OF_TWO(((int64_t)R) * (RM), VP10_PROB_COST_SHIFT) + (D << DM))
+
+#define RDCOST_DBL(RM, DM, R, D)                                    \
+  (((((double)(R)) * (RM)) / (double)(1 << VP10_PROB_COST_SHIFT)) + \
+   ((double)(D) * (1 << (DM))))
+
+#define QIDX_SKIP_THRESH 115
+
+#define MV_COST_WEIGHT 108
+#define MV_COST_WEIGHT_SUB 120
+
+#define INVALID_MV 0x80008000
+
+#if CONFIG_EXT_REFS
+
+#if CONFIG_EXT_INTER
+#define MAX_MODES 144
+#else  // CONFIG_EXT_INTER
+#define MAX_MODES 66
+#endif  // CONFIG_EXT_INTER
+
+#else  // CONFIG_EXT_REFS
+
+#if CONFIG_EXT_INTER
+#define MAX_MODES 57
+#else  // CONFIG_EXT_INTER
+#define MAX_MODES 30
+#endif  // CONFIG_EXT_INTER
+
+#endif  // CONFIG_EXT_REFS
+
+#if CONFIG_EXT_REFS
+#define MAX_REFS 15
+#else
+#define MAX_REFS 6
+#endif  // CONFIG_EXT_REFS
+
+#define RD_THRESH_MAX_FACT 64
+#define RD_THRESH_INC 1
+
+// This enumerator type needs to be kept aligned with the mode order in
+// const MODE_DEFINITION vp10_mode_order[MAX_MODES] used in the rd code.
+typedef enum {
+  THR_NEARESTMV,
+#if CONFIG_EXT_REFS
+  THR_NEARESTL2,
+  THR_NEARESTL3,
+  THR_NEARESTB,
+#endif  // CONFIG_EXT_REFS
+  THR_NEARESTA,
+  THR_NEARESTG,
+
+  THR_DC,
+
+  THR_NEWMV,
+#if CONFIG_EXT_REFS
+  THR_NEWL2,
+  THR_NEWL3,
+  THR_NEWB,
+#endif  // CONFIG_EXT_REFS
+  THR_NEWA,
+  THR_NEWG,
+
+  THR_NEARMV,
+#if CONFIG_EXT_REFS
+  THR_NEARL2,
+  THR_NEARL3,
+  THR_NEARB,
+#endif  // CONFIG_EXT_REFS
+  THR_NEARA,
+  THR_NEARG,
+
+#if CONFIG_EXT_INTER
+  THR_NEWFROMNEARMV,
+#if CONFIG_EXT_REFS
+  THR_NEWFROMNEARL2,
+  THR_NEWFROMNEARL3,
+  THR_NEWFROMNEARB,
+#endif  // CONFIG_EXT_REFS
+  THR_NEWFROMNEARA,
+  THR_NEWFROMNEARG,
+#endif  // CONFIG_EXT_INTER
+
+  THR_ZEROMV,
+#if CONFIG_EXT_REFS
+  THR_ZEROL2,
+  THR_ZEROL3,
+  THR_ZEROB,
+#endif  // CONFIG_EXT_REFS
+  THR_ZEROG,
+  THR_ZEROA,
+
+#if CONFIG_EXT_INTER
+
+  THR_COMP_NEAREST_NEARESTLA,
+#if CONFIG_EXT_REFS
+  THR_COMP_NEAREST_NEARESTL2A,
+  THR_COMP_NEAREST_NEARESTL3A,
+#endif  // CONFIG_EXT_REFS
+  THR_COMP_NEAREST_NEARESTGA,
+#if CONFIG_EXT_REFS
+  THR_COMP_NEAREST_NEARESTLB,
+  THR_COMP_NEAREST_NEARESTL2B,
+  THR_COMP_NEAREST_NEARESTL3B,
+  THR_COMP_NEAREST_NEARESTGB,
+#endif  // CONFIG_EXT_REFS
+
+#else  // CONFIG_EXT_INTER
+
+  THR_COMP_NEARESTLA,
+#if CONFIG_EXT_REFS
+  THR_COMP_NEARESTL2A,
+  THR_COMP_NEARESTL3A,
+#endif  // CONFIG_EXT_REFS
+  THR_COMP_NEARESTGA,
+#if CONFIG_EXT_REFS
+  THR_COMP_NEARESTLB,
+  THR_COMP_NEARESTL2B,
+  THR_COMP_NEARESTL3B,
+  THR_COMP_NEARESTGB,
+#endif  // CONFIG_EXT_REFS
+
+#endif  // CONFIG_EXT_INTER
+
+  THR_TM,
+
+#if CONFIG_EXT_INTER
+
+  THR_COMP_NEAR_NEARESTLA,
+  THR_COMP_NEAREST_NEARLA,
+  THR_COMP_NEAR_NEARLA,
+  THR_COMP_NEW_NEARESTLA,
+  THR_COMP_NEAREST_NEWLA,
+  THR_COMP_NEW_NEARLA,
+  THR_COMP_NEAR_NEWLA,
+  THR_COMP_NEW_NEWLA,
+  THR_COMP_ZERO_ZEROLA,
+
+#if CONFIG_EXT_REFS
+  THR_COMP_NEAR_NEARESTL2A,
+  THR_COMP_NEAREST_NEARL2A,
+  THR_COMP_NEAR_NEARL2A,
+  THR_COMP_NEW_NEARESTL2A,
+  THR_COMP_NEAREST_NEWL2A,
+  THR_COMP_NEW_NEARL2A,
+  THR_COMP_NEAR_NEWL2A,
+  THR_COMP_NEW_NEWL2A,
+  THR_COMP_ZERO_ZEROL2A,
+
+  THR_COMP_NEAR_NEARESTL3A,
+  THR_COMP_NEAREST_NEARL3A,
+  THR_COMP_NEAR_NEARL3A,
+  THR_COMP_NEW_NEARESTL3A,
+  THR_COMP_NEAREST_NEWL3A,
+  THR_COMP_NEW_NEARL3A,
+  THR_COMP_NEAR_NEWL3A,
+  THR_COMP_NEW_NEWL3A,
+  THR_COMP_ZERO_ZEROL3A,
+#endif  // CONFIG_EXT_REFS
+
+  THR_COMP_NEAR_NEARESTGA,
+  THR_COMP_NEAREST_NEARGA,
+  THR_COMP_NEAR_NEARGA,
+  THR_COMP_NEW_NEARESTGA,
+  THR_COMP_NEAREST_NEWGA,
+  THR_COMP_NEW_NEARGA,
+  THR_COMP_NEAR_NEWGA,
+  THR_COMP_NEW_NEWGA,
+  THR_COMP_ZERO_ZEROGA,
+
+#if CONFIG_EXT_REFS
+  THR_COMP_NEAR_NEARESTLB,
+  THR_COMP_NEAREST_NEARLB,
+  THR_COMP_NEAR_NEARLB,
+  THR_COMP_NEW_NEARESTLB,
+  THR_COMP_NEAREST_NEWLB,
+  THR_COMP_NEW_NEARLB,
+  THR_COMP_NEAR_NEWLB,
+  THR_COMP_NEW_NEWLB,
+  THR_COMP_ZERO_ZEROLB,
+
+  THR_COMP_NEAR_NEARESTL2B,
+  THR_COMP_NEAREST_NEARL2B,
+  THR_COMP_NEAR_NEARL2B,
+  THR_COMP_NEW_NEARESTL2B,
+  THR_COMP_NEAREST_NEWL2B,
+  THR_COMP_NEW_NEARL2B,
+  THR_COMP_NEAR_NEWL2B,
+  THR_COMP_NEW_NEWL2B,
+  THR_COMP_ZERO_ZEROL2B,
+
+  THR_COMP_NEAR_NEARESTL3B,
+  THR_COMP_NEAREST_NEARL3B,
+  THR_COMP_NEAR_NEARL3B,
+  THR_COMP_NEW_NEARESTL3B,
+  THR_COMP_NEAREST_NEWL3B,
+  THR_COMP_NEW_NEARL3B,
+  THR_COMP_NEAR_NEWL3B,
+  THR_COMP_NEW_NEWL3B,
+  THR_COMP_ZERO_ZEROL3B,
+
+  THR_COMP_NEAR_NEARESTGB,
+  THR_COMP_NEAREST_NEARGB,
+  THR_COMP_NEAR_NEARGB,
+  THR_COMP_NEW_NEARESTGB,
+  THR_COMP_NEAREST_NEWGB,
+  THR_COMP_NEW_NEARGB,
+  THR_COMP_NEAR_NEWGB,
+  THR_COMP_NEW_NEWGB,
+  THR_COMP_ZERO_ZEROGB,
+#endif  // CONFIG_EXT_REFS
+
+#else  // CONFIG_EXT_INTER
+
+  THR_COMP_NEARLA,
+  THR_COMP_NEWLA,
+#if CONFIG_EXT_REFS
+  THR_COMP_NEARL2A,
+  THR_COMP_NEWL2A,
+  THR_COMP_NEARL3A,
+  THR_COMP_NEWL3A,
+#endif  // CONFIG_EXT_REFS
+  THR_COMP_NEARGA,
+  THR_COMP_NEWGA,
+
+#if CONFIG_EXT_REFS
+  THR_COMP_NEARLB,
+  THR_COMP_NEWLB,
+  THR_COMP_NEARL2B,
+  THR_COMP_NEWL2B,
+  THR_COMP_NEARL3B,
+  THR_COMP_NEWL3B,
+  THR_COMP_NEARGB,
+  THR_COMP_NEWGB,
+#endif  // CONFIG_EXT_REFS
+
+  THR_COMP_ZEROLA,
+#if CONFIG_EXT_REFS
+  THR_COMP_ZEROL2A,
+  THR_COMP_ZEROL3A,
+#endif  // CONFIG_EXT_REFS
+  THR_COMP_ZEROGA,
+
+#if CONFIG_EXT_REFS
+  THR_COMP_ZEROLB,
+  THR_COMP_ZEROL2B,
+  THR_COMP_ZEROL3B,
+  THR_COMP_ZEROGB,
+#endif  // CONFIG_EXT_REFS
+
+#endif  // CONFIG_EXT_INTER
+
+  THR_H_PRED,
+  THR_V_PRED,
+  THR_D135_PRED,
+  THR_D207_PRED,
+  THR_D153_PRED,
+  THR_D63_PRED,
+  THR_D117_PRED,
+  THR_D45_PRED,
+
+#if CONFIG_EXT_INTER
+  THR_COMP_INTERINTRA_ZEROL,
+  THR_COMP_INTERINTRA_NEARESTL,
+  THR_COMP_INTERINTRA_NEARL,
+  THR_COMP_INTERINTRA_NEWL,
+
+#if CONFIG_EXT_REFS
+  THR_COMP_INTERINTRA_ZEROL2,
+  THR_COMP_INTERINTRA_NEARESTL2,
+  THR_COMP_INTERINTRA_NEARL2,
+  THR_COMP_INTERINTRA_NEWL2,
+
+  THR_COMP_INTERINTRA_ZEROL3,
+  THR_COMP_INTERINTRA_NEARESTL3,
+  THR_COMP_INTERINTRA_NEARL3,
+  THR_COMP_INTERINTRA_NEWL3,
+#endif  // CONFIG_EXT_REFS
+
+  THR_COMP_INTERINTRA_ZEROG,
+  THR_COMP_INTERINTRA_NEARESTG,
+  THR_COMP_INTERINTRA_NEARG,
+  THR_COMP_INTERINTRA_NEWG,
+
+#if CONFIG_EXT_REFS
+  THR_COMP_INTERINTRA_ZEROB,
+  THR_COMP_INTERINTRA_NEARESTB,
+  THR_COMP_INTERINTRA_NEARB,
+  THR_COMP_INTERINTRA_NEWB,
+#endif  // CONFIG_EXT_REFS
+
+  THR_COMP_INTERINTRA_ZEROA,
+  THR_COMP_INTERINTRA_NEARESTA,
+  THR_COMP_INTERINTRA_NEARA,
+  THR_COMP_INTERINTRA_NEWA,
+#endif  // CONFIG_EXT_INTER
+} THR_MODES;
+
+typedef enum {
+  THR_LAST,
+#if CONFIG_EXT_REFS
+  THR_LAST2,
+  THR_LAST3,
+  THR_BWDR,
+#endif  // CONFIG_EXT_REFS
+  THR_GOLD,
+  THR_ALTR,
+
+  THR_COMP_LA,
+#if CONFIG_EXT_REFS
+  THR_COMP_L2A,
+  THR_COMP_L3A,
+#endif  // CONFIG_EXT_REFS
+  THR_COMP_GA,
+
+#if CONFIG_EXT_REFS
+  THR_COMP_LB,
+  THR_COMP_L2B,
+  THR_COMP_L3B,
+  THR_COMP_GB,
+#endif  // CONFIG_EXT_REFS
+
+  THR_INTRA,
+} THR_MODES_SUB8X8;
+
+typedef struct RD_OPT {
+  // Thresh_mult is used to set a threshold for the rd score. A higher value
+  // means that we will accept the best mode so far more often. This number
+  // is used in combination with the current block size, and thresh_freq_fact
+  // to pick a threshold.
+  int thresh_mult[MAX_MODES];
+  int thresh_mult_sub8x8[MAX_REFS];
+
+  int threshes[MAX_SEGMENTS][BLOCK_SIZES][MAX_MODES];
+
+  int64_t prediction_type_threshes[TOTAL_REFS_PER_FRAME][REFERENCE_MODES];
+
+  int RDMULT;
+  int RDDIV;
+} RD_OPT;
+
+typedef struct RD_COST {
+  int rate;
+  int64_t dist;
+  int64_t rdcost;
+} RD_COST;
+
+// Reset the rate distortion cost values to maximum (invalid) value.
+void vp10_rd_cost_reset(RD_COST *rd_cost);
+// Initialize the rate distortion cost values to zero.
+void vp10_rd_cost_init(RD_COST *rd_cost);
+
+struct TileInfo;
+struct TileDataEnc;
+struct VP10_COMP;
+struct macroblock;
+
+int vp10_compute_rd_mult(const struct VP10_COMP *cpi, int qindex);
+
+void vp10_initialize_rd_consts(struct VP10_COMP *cpi);
+
+void vp10_initialize_me_consts(const struct VP10_COMP *cpi, MACROBLOCK *x,
+                               int qindex);
+
+void vp10_model_rd_from_var_lapndz(int64_t var, unsigned int n,
+                                   unsigned int qstep, int *rate,
+                                   int64_t *dist);
+
+int vp10_get_switchable_rate(const struct VP10_COMP *cpi,
+                             const MACROBLOCKD *const xd);
+
+int vp10_raster_block_offset(BLOCK_SIZE plane_bsize, int raster_block,
+                             int stride);
+
+int16_t *vp10_raster_block_offset_int16(BLOCK_SIZE plane_bsize,
+                                        int raster_block, int16_t *base);
+
+YV12_BUFFER_CONFIG *vp10_get_scaled_ref_frame(const struct VP10_COMP *cpi,
+                                              int ref_frame);
+
+void vp10_init_me_luts(void);
+
+#if CONFIG_REF_MV
+void vp10_set_mvcost(MACROBLOCK *x, MV_REFERENCE_FRAME ref_frame);
+#endif
+
+void vp10_get_entropy_contexts(BLOCK_SIZE bsize, TX_SIZE tx_size,
+                               const struct macroblockd_plane *pd,
+                               ENTROPY_CONTEXT t_above[2 * MAX_MIB_SIZE],
+                               ENTROPY_CONTEXT t_left[2 * MAX_MIB_SIZE]);
+
+void vp10_set_rd_speed_thresholds(struct VP10_COMP *cpi);
+
+void vp10_set_rd_speed_thresholds_sub8x8(struct VP10_COMP *cpi);
+
+void vp10_update_rd_thresh_fact(const VP10_COMMON *const cm,
+                                int (*fact)[MAX_MODES], int rd_thresh,
+                                int bsize, int best_mode_index);
+
+void vp10_fill_token_costs(vp10_coeff_cost *c,
+#if CONFIG_ANS
+                           coeff_cdf_model (*cdf)[PLANE_TYPES],
+#endif  // CONFIG_ANS
+                           vp10_coeff_probs_model (*p)[PLANE_TYPES]);
+
+static INLINE int rd_less_than_thresh(int64_t best_rd, int thresh,
+                                      int thresh_fact) {
+  return best_rd < ((int64_t)thresh * thresh_fact >> 5) || thresh == INT_MAX;
+}
+
+void vp10_mv_pred(struct VP10_COMP *cpi, MACROBLOCK *x, uint8_t *ref_y_buffer,
+                  int ref_y_stride, int ref_frame, BLOCK_SIZE block_size);
+
+static INLINE void set_error_per_bit(MACROBLOCK *x, int rdmult) {
+  x->errorperbit = rdmult >> RD_EPB_SHIFT;
+  x->errorperbit += (x->errorperbit == 0);
+}
+
+void vp10_setup_pred_block(const MACROBLOCKD *xd,
+                           struct buf_2d dst[MAX_MB_PLANE],
+                           const YV12_BUFFER_CONFIG *src, int mi_row,
+                           int mi_col, const struct scale_factors *scale,
+                           const struct scale_factors *scale_uv);
+
+int vp10_get_intra_cost_penalty(int qindex, int qdelta,
+                                vpx_bit_depth_t bit_depth);
+
+#ifdef __cplusplus
+}  // extern "C"
+#endif
+
+#endif  // VP10_ENCODER_RD_H_
diff --git a/av1/encoder/rdopt.c b/av1/encoder/rdopt.c
new file mode 100644
index 0000000..62334a3
--- /dev/null
+++ b/av1/encoder/rdopt.c
@@ -0,0 +1,10808 @@
+/*
+ *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include <math.h>
+
+#include "./vp10_rtcd.h"
+#include "./vpx_dsp_rtcd.h"
+
+#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/blend.h"
+#include "aom_mem/vpx_mem.h"
+#include "aom_ports/mem.h"
+#include "aom_ports/system_state.h"
+
+#include "av1/common/common.h"
+#include "av1/common/common_data.h"
+#include "av1/common/entropy.h"
+#include "av1/common/entropymode.h"
+#include "av1/common/idct.h"
+#include "av1/common/mvref_common.h"
+#include "av1/common/pred_common.h"
+#include "av1/common/quant_common.h"
+#include "av1/common/reconinter.h"
+#include "av1/common/reconintra.h"
+#include "av1/common/scan.h"
+#include "av1/common/seg_common.h"
+
+#include "av1/encoder/cost.h"
+#include "av1/encoder/encodemb.h"
+#include "av1/encoder/encodemv.h"
+#include "av1/encoder/encoder.h"
+#include "av1/encoder/hybrid_fwd_txfm.h"
+#include "av1/encoder/mcomp.h"
+#include "av1/encoder/palette.h"
+#include "av1/encoder/quantize.h"
+#include "av1/encoder/ratectrl.h"
+#include "av1/encoder/rd.h"
+#include "av1/encoder/rdopt.h"
+#include "av1/encoder/aq_variance.h"
+
+#if CONFIG_DUAL_FILTER
+#if CONFIG_EXT_INTERP
+static const int filter_sets[25][2] = {
+  { 0, 0 }, { 0, 1 }, { 0, 2 }, { 0, 3 }, { 0, 4 }, { 1, 0 }, { 1, 1 },
+  { 1, 2 }, { 1, 3 }, { 1, 4 }, { 2, 0 }, { 2, 1 }, { 2, 2 }, { 2, 3 },
+  { 2, 4 }, { 3, 0 }, { 3, 1 }, { 3, 2 }, { 3, 3 }, { 3, 4 }, { 4, 0 },
+  { 4, 1 }, { 4, 2 }, { 4, 3 }, { 4, 4 },
+};
+#else
+static const int filter_sets[9][2] = {
+  { 0, 0 }, { 0, 1 }, { 0, 2 }, { 1, 0 }, { 1, 1 },
+  { 1, 2 }, { 2, 0 }, { 2, 1 }, { 2, 2 },
+};
+#endif
+#endif
+
+#if CONFIG_EXT_REFS
+
+#define LAST_FRAME_MODE_MASK                                      \
+  ((1 << INTRA_FRAME) | (1 << LAST2_FRAME) | (1 << LAST3_FRAME) | \
+   (1 << GOLDEN_FRAME) | (1 << BWDREF_FRAME) | (1 << ALTREF_FRAME))
+#define LAST2_FRAME_MODE_MASK                                    \
+  ((1 << INTRA_FRAME) | (1 << LAST_FRAME) | (1 << LAST3_FRAME) | \
+   (1 << GOLDEN_FRAME) | (1 << BWDREF_FRAME) | (1 << ALTREF_FRAME))
+#define LAST3_FRAME_MODE_MASK                                    \
+  ((1 << INTRA_FRAME) | (1 << LAST_FRAME) | (1 << LAST2_FRAME) | \
+   (1 << GOLDEN_FRAME) | (1 << BWDREF_FRAME) | (1 << ALTREF_FRAME))
+#define GOLDEN_FRAME_MODE_MASK                                   \
+  ((1 << INTRA_FRAME) | (1 << LAST_FRAME) | (1 << LAST2_FRAME) | \
+   (1 << LAST3_FRAME) | (1 << BWDREF_FRAME) | (1 << ALTREF_FRAME))
+#define BWDREF_FRAME_MODE_MASK                                   \
+  ((1 << INTRA_FRAME) | (1 << LAST_FRAME) | (1 << LAST2_FRAME) | \
+   (1 << LAST3_FRAME) | (1 << GOLDEN_FRAME) | (1 << ALTREF_FRAME))
+#define ALTREF_FRAME_MODE_MASK                                   \
+  ((1 << INTRA_FRAME) | (1 << LAST_FRAME) | (1 << LAST2_FRAME) | \
+   (1 << LAST3_FRAME) | (1 << GOLDEN_FRAME) | (1 << BWDREF_FRAME))
+
+#else
+
+#define LAST_FRAME_MODE_MASK \
+  ((1 << GOLDEN_FRAME) | (1 << ALTREF_FRAME) | (1 << INTRA_FRAME))
+#define GOLDEN_FRAME_MODE_MASK \
+  ((1 << LAST_FRAME) | (1 << ALTREF_FRAME) | (1 << INTRA_FRAME))
+#define ALTREF_FRAME_MODE_MASK \
+  ((1 << LAST_FRAME) | (1 << GOLDEN_FRAME) | (1 << INTRA_FRAME))
+
+#endif  // CONFIG_EXT_REFS
+
+#if CONFIG_EXT_REFS
+#define SECOND_REF_FRAME_MASK ((1 << ALTREF_FRAME) | (1 << BWDREF_FRAME) | 0x01)
+#else
+#define SECOND_REF_FRAME_MASK ((1 << ALTREF_FRAME) | 0x01)
+#endif  // CONFIG_EXT_REFS
+
+#define MIN_EARLY_TERM_INDEX 3
+#define NEW_MV_DISCOUNT_FACTOR 8
+
+#if CONFIG_EXT_INTRA
+#define ANGLE_FAST_SEARCH 1
+#define ANGLE_SKIP_THRESH 10
+#define FILTER_FAST_SEARCH 1
+#endif  // CONFIG_EXT_INTRA
+
+const double ADST_FLIP_SVM[8] = { -6.6623, -2.8062, -3.2531, 3.1671,    // vert
+                                  -7.7051, -3.2234, -3.6193, 3.4533 };  // horz
+
+typedef struct {
+  PREDICTION_MODE mode;
+  MV_REFERENCE_FRAME ref_frame[2];
+} MODE_DEFINITION;
+
+typedef struct { MV_REFERENCE_FRAME ref_frame[2]; } REF_DEFINITION;
+
+struct rdcost_block_args {
+  const VP10_COMP *cpi;
+  MACROBLOCK *x;
+  ENTROPY_CONTEXT t_above[2 * MAX_MIB_SIZE];
+  ENTROPY_CONTEXT t_left[2 * MAX_MIB_SIZE];
+  int this_rate;
+  int64_t this_dist;
+  int64_t this_sse;
+  int64_t this_rd;
+  int64_t best_rd;
+  int exit_early;
+  int use_fast_coef_costing;
+  const scan_order *so;
+  uint8_t skippable;
+};
+
+#define LAST_NEW_MV_INDEX 6
+static const MODE_DEFINITION vp10_mode_order[MAX_MODES] = {
+  { NEARESTMV, { LAST_FRAME, NONE } },
+#if CONFIG_EXT_REFS
+  { NEARESTMV, { LAST2_FRAME, NONE } },
+  { NEARESTMV, { LAST3_FRAME, NONE } },
+  { NEARESTMV, { BWDREF_FRAME, NONE } },
+#endif  // CONFIG_EXT_REFS
+  { NEARESTMV, { ALTREF_FRAME, NONE } },
+  { NEARESTMV, { GOLDEN_FRAME, NONE } },
+
+  { DC_PRED, { INTRA_FRAME, NONE } },
+
+  { NEWMV, { LAST_FRAME, NONE } },
+#if CONFIG_EXT_REFS
+  { NEWMV, { LAST2_FRAME, NONE } },
+  { NEWMV, { LAST3_FRAME, NONE } },
+  { NEWMV, { BWDREF_FRAME, NONE } },
+#endif  // CONFIG_EXT_REFS
+  { NEWMV, { ALTREF_FRAME, NONE } },
+  { NEWMV, { GOLDEN_FRAME, NONE } },
+
+  { NEARMV, { LAST_FRAME, NONE } },
+#if CONFIG_EXT_REFS
+  { NEARMV, { LAST2_FRAME, NONE } },
+  { NEARMV, { LAST3_FRAME, NONE } },
+  { NEARMV, { BWDREF_FRAME, NONE } },
+#endif  // CONFIG_EXT_REFS
+  { NEARMV, { ALTREF_FRAME, NONE } },
+  { NEARMV, { GOLDEN_FRAME, NONE } },
+
+#if CONFIG_EXT_INTER
+  { NEWFROMNEARMV, { LAST_FRAME, NONE } },
+#if CONFIG_EXT_REFS
+  { NEWFROMNEARMV, { LAST2_FRAME, NONE } },
+  { NEWFROMNEARMV, { LAST3_FRAME, NONE } },
+  { NEWFROMNEARMV, { BWDREF_FRAME, NONE } },
+#endif  // CONFIG_EXT_REFS
+  { NEWFROMNEARMV, { ALTREF_FRAME, NONE } },
+  { NEWFROMNEARMV, { GOLDEN_FRAME, NONE } },
+#endif  // CONFIG_EXT_INTER
+
+  { ZEROMV, { LAST_FRAME, NONE } },
+#if CONFIG_EXT_REFS
+  { ZEROMV, { LAST2_FRAME, NONE } },
+  { ZEROMV, { LAST3_FRAME, NONE } },
+  { ZEROMV, { BWDREF_FRAME, NONE } },
+#endif  // CONFIG_EXT_REFS
+  { ZEROMV, { GOLDEN_FRAME, NONE } },
+  { ZEROMV, { ALTREF_FRAME, NONE } },
+
+// TODO(zoeliu): May need to reconsider the order on the modes to check
+
+#if CONFIG_EXT_INTER
+  { NEAREST_NEARESTMV, { LAST_FRAME, ALTREF_FRAME } },
+#if CONFIG_EXT_REFS
+  { NEAREST_NEARESTMV, { LAST2_FRAME, ALTREF_FRAME } },
+  { NEAREST_NEARESTMV, { LAST3_FRAME, ALTREF_FRAME } },
+#endif  // CONFIG_EXT_REFS
+  { NEAREST_NEARESTMV, { GOLDEN_FRAME, ALTREF_FRAME } },
+#if CONFIG_EXT_REFS
+  { NEAREST_NEARESTMV, { LAST_FRAME, BWDREF_FRAME } },
+  { NEAREST_NEARESTMV, { LAST2_FRAME, BWDREF_FRAME } },
+  { NEAREST_NEARESTMV, { LAST3_FRAME, BWDREF_FRAME } },
+  { NEAREST_NEARESTMV, { GOLDEN_FRAME, BWDREF_FRAME } },
+#endif  // CONFIG_EXT_REFS
+
+#else  // CONFIG_EXT_INTER
+
+  { NEARESTMV, { LAST_FRAME, ALTREF_FRAME } },
+#if CONFIG_EXT_REFS
+  { NEARESTMV, { LAST2_FRAME, ALTREF_FRAME } },
+  { NEARESTMV, { LAST3_FRAME, ALTREF_FRAME } },
+#endif  // CONFIG_EXT_REFS
+  { NEARESTMV, { GOLDEN_FRAME, ALTREF_FRAME } },
+#if CONFIG_EXT_REFS
+  { NEARESTMV, { LAST_FRAME, BWDREF_FRAME } },
+  { NEARESTMV, { LAST2_FRAME, BWDREF_FRAME } },
+  { NEARESTMV, { LAST3_FRAME, BWDREF_FRAME } },
+  { NEARESTMV, { GOLDEN_FRAME, BWDREF_FRAME } },
+#endif  // CONFIG_EXT_REFS
+#endif  // CONFIG_EXT_INTER
+
+  { TM_PRED, { INTRA_FRAME, NONE } },
+
+#if CONFIG_EXT_INTER
+  { NEAR_NEARESTMV, { LAST_FRAME, ALTREF_FRAME } },
+  { NEAREST_NEARMV, { LAST_FRAME, ALTREF_FRAME } },
+  { NEAR_NEARMV, { LAST_FRAME, ALTREF_FRAME } },
+  { NEW_NEARESTMV, { LAST_FRAME, ALTREF_FRAME } },
+  { NEAREST_NEWMV, { LAST_FRAME, ALTREF_FRAME } },
+  { NEW_NEARMV, { LAST_FRAME, ALTREF_FRAME } },
+  { NEAR_NEWMV, { LAST_FRAME, ALTREF_FRAME } },
+  { NEW_NEWMV, { LAST_FRAME, ALTREF_FRAME } },
+  { ZERO_ZEROMV, { LAST_FRAME, ALTREF_FRAME } },
+
+#if CONFIG_EXT_REFS
+  { NEAR_NEARESTMV, { LAST2_FRAME, ALTREF_FRAME } },
+  { NEAREST_NEARMV, { LAST2_FRAME, ALTREF_FRAME } },
+  { NEAR_NEARMV, { LAST2_FRAME, ALTREF_FRAME } },
+  { NEW_NEARESTMV, { LAST2_FRAME, ALTREF_FRAME } },
+  { NEAREST_NEWMV, { LAST2_FRAME, ALTREF_FRAME } },
+  { NEW_NEARMV, { LAST2_FRAME, ALTREF_FRAME } },
+  { NEAR_NEWMV, { LAST2_FRAME, ALTREF_FRAME } },
+  { NEW_NEWMV, { LAST2_FRAME, ALTREF_FRAME } },
+  { ZERO_ZEROMV, { LAST2_FRAME, ALTREF_FRAME } },
+
+  { NEAR_NEARESTMV, { LAST3_FRAME, ALTREF_FRAME } },
+  { NEAREST_NEARMV, { LAST3_FRAME, ALTREF_FRAME } },
+  { NEAR_NEARMV, { LAST3_FRAME, ALTREF_FRAME } },
+  { NEW_NEARESTMV, { LAST3_FRAME, ALTREF_FRAME } },
+  { NEAREST_NEWMV, { LAST3_FRAME, ALTREF_FRAME } },
+  { NEW_NEARMV, { LAST3_FRAME, ALTREF_FRAME } },
+  { NEAR_NEWMV, { LAST3_FRAME, ALTREF_FRAME } },
+  { NEW_NEWMV, { LAST3_FRAME, ALTREF_FRAME } },
+  { ZERO_ZEROMV, { LAST3_FRAME, ALTREF_FRAME } },
+#endif  // CONFIG_EXT_REFS
+
+  { NEAR_NEARESTMV, { GOLDEN_FRAME, ALTREF_FRAME } },
+  { NEAREST_NEARMV, { GOLDEN_FRAME, ALTREF_FRAME } },
+  { NEAR_NEARMV, { GOLDEN_FRAME, ALTREF_FRAME } },
+  { NEW_NEARESTMV, { GOLDEN_FRAME, ALTREF_FRAME } },
+  { NEAREST_NEWMV, { GOLDEN_FRAME, ALTREF_FRAME } },
+  { NEW_NEARMV, { GOLDEN_FRAME, ALTREF_FRAME } },
+  { NEAR_NEWMV, { GOLDEN_FRAME, ALTREF_FRAME } },
+  { NEW_NEWMV, { GOLDEN_FRAME, ALTREF_FRAME } },
+  { ZERO_ZEROMV, { GOLDEN_FRAME, ALTREF_FRAME } },
+
+#if CONFIG_EXT_REFS
+  { NEAR_NEARESTMV, { LAST_FRAME, BWDREF_FRAME } },
+  { NEAREST_NEARMV, { LAST_FRAME, BWDREF_FRAME } },
+  { NEAR_NEARMV, { LAST_FRAME, BWDREF_FRAME } },
+  { NEW_NEARESTMV, { LAST_FRAME, BWDREF_FRAME } },
+  { NEAREST_NEWMV, { LAST_FRAME, BWDREF_FRAME } },
+  { NEW_NEARMV, { LAST_FRAME, BWDREF_FRAME } },
+  { NEAR_NEWMV, { LAST_FRAME, BWDREF_FRAME } },
+  { NEW_NEWMV, { LAST_FRAME, BWDREF_FRAME } },
+  { ZERO_ZEROMV, { LAST_FRAME, BWDREF_FRAME } },
+
+  { NEAR_NEARESTMV, { LAST2_FRAME, BWDREF_FRAME } },
+  { NEAREST_NEARMV, { LAST2_FRAME, BWDREF_FRAME } },
+  { NEAR_NEARMV, { LAST2_FRAME, BWDREF_FRAME } },
+  { NEW_NEARESTMV, { LAST2_FRAME, BWDREF_FRAME } },
+  { NEAREST_NEWMV, { LAST2_FRAME, BWDREF_FRAME } },
+  { NEW_NEARMV, { LAST2_FRAME, BWDREF_FRAME } },
+  { NEAR_NEWMV, { LAST2_FRAME, BWDREF_FRAME } },
+  { NEW_NEWMV, { LAST2_FRAME, BWDREF_FRAME } },
+  { ZERO_ZEROMV, { LAST2_FRAME, BWDREF_FRAME } },
+
+  { NEAR_NEARESTMV, { LAST3_FRAME, BWDREF_FRAME } },
+  { NEAREST_NEARMV, { LAST3_FRAME, BWDREF_FRAME } },
+  { NEAR_NEARMV, { LAST3_FRAME, BWDREF_FRAME } },
+  { NEW_NEARESTMV, { LAST3_FRAME, BWDREF_FRAME } },
+  { NEAREST_NEWMV, { LAST3_FRAME, BWDREF_FRAME } },
+  { NEW_NEARMV, { LAST3_FRAME, BWDREF_FRAME } },
+  { NEAR_NEWMV, { LAST3_FRAME, BWDREF_FRAME } },
+  { NEW_NEWMV, { LAST3_FRAME, BWDREF_FRAME } },
+  { ZERO_ZEROMV, { LAST3_FRAME, BWDREF_FRAME } },
+
+  { NEAR_NEARESTMV, { GOLDEN_FRAME, BWDREF_FRAME } },
+  { NEAREST_NEARMV, { GOLDEN_FRAME, BWDREF_FRAME } },
+  { NEAR_NEARMV, { GOLDEN_FRAME, BWDREF_FRAME } },
+  { NEW_NEARESTMV, { GOLDEN_FRAME, BWDREF_FRAME } },
+  { NEAREST_NEWMV, { GOLDEN_FRAME, BWDREF_FRAME } },
+  { NEW_NEARMV, { GOLDEN_FRAME, BWDREF_FRAME } },
+  { NEAR_NEWMV, { GOLDEN_FRAME, BWDREF_FRAME } },
+  { NEW_NEWMV, { GOLDEN_FRAME, BWDREF_FRAME } },
+  { ZERO_ZEROMV, { GOLDEN_FRAME, BWDREF_FRAME } },
+#endif  // CONFIG_EXT_REFS
+
+#else  // CONFIG_EXT_INTER
+
+  { NEARMV, { LAST_FRAME, ALTREF_FRAME } },
+  { NEWMV, { LAST_FRAME, ALTREF_FRAME } },
+#if CONFIG_EXT_REFS
+  { NEARMV, { LAST2_FRAME, ALTREF_FRAME } },
+  { NEWMV, { LAST2_FRAME, ALTREF_FRAME } },
+  { NEARMV, { LAST3_FRAME, ALTREF_FRAME } },
+  { NEWMV, { LAST3_FRAME, ALTREF_FRAME } },
+#endif  // CONFIG_EXT_REFS
+  { NEARMV, { GOLDEN_FRAME, ALTREF_FRAME } },
+  { NEWMV, { GOLDEN_FRAME, ALTREF_FRAME } },
+
+#if CONFIG_EXT_REFS
+  { NEARMV, { LAST_FRAME, BWDREF_FRAME } },
+  { NEWMV, { LAST_FRAME, BWDREF_FRAME } },
+  { NEARMV, { LAST2_FRAME, BWDREF_FRAME } },
+  { NEWMV, { LAST2_FRAME, BWDREF_FRAME } },
+  { NEARMV, { LAST3_FRAME, BWDREF_FRAME } },
+  { NEWMV, { LAST3_FRAME, BWDREF_FRAME } },
+  { NEARMV, { GOLDEN_FRAME, BWDREF_FRAME } },
+  { NEWMV, { GOLDEN_FRAME, BWDREF_FRAME } },
+#endif  // CONFIG_EXT_REFS
+
+  { ZEROMV, { LAST_FRAME, ALTREF_FRAME } },
+#if CONFIG_EXT_REFS
+  { ZEROMV, { LAST2_FRAME, ALTREF_FRAME } },
+  { ZEROMV, { LAST3_FRAME, ALTREF_FRAME } },
+#endif  // CONFIG_EXT_REFS
+  { ZEROMV, { GOLDEN_FRAME, ALTREF_FRAME } },
+
+#if CONFIG_EXT_REFS
+  { ZEROMV, { LAST_FRAME, BWDREF_FRAME } },
+  { ZEROMV, { LAST2_FRAME, BWDREF_FRAME } },
+  { ZEROMV, { LAST3_FRAME, BWDREF_FRAME } },
+  { ZEROMV, { GOLDEN_FRAME, BWDREF_FRAME } },
+#endif  // CONFIG_EXT_REFS
+
+#endif  // CONFIG_EXT_INTER
+
+  { H_PRED, { INTRA_FRAME, NONE } },
+  { V_PRED, { INTRA_FRAME, NONE } },
+  { D135_PRED, { INTRA_FRAME, NONE } },
+  { D207_PRED, { INTRA_FRAME, NONE } },
+  { D153_PRED, { INTRA_FRAME, NONE } },
+  { D63_PRED, { INTRA_FRAME, NONE } },
+  { D117_PRED, { INTRA_FRAME, NONE } },
+  { D45_PRED, { INTRA_FRAME, NONE } },
+
+#if CONFIG_EXT_INTER
+  { ZEROMV, { LAST_FRAME, INTRA_FRAME } },
+  { NEARESTMV, { LAST_FRAME, INTRA_FRAME } },
+  { NEARMV, { LAST_FRAME, INTRA_FRAME } },
+  { NEWMV, { LAST_FRAME, INTRA_FRAME } },
+
+#if CONFIG_EXT_REFS
+  { ZEROMV, { LAST2_FRAME, INTRA_FRAME } },
+  { NEARESTMV, { LAST2_FRAME, INTRA_FRAME } },
+  { NEARMV, { LAST2_FRAME, INTRA_FRAME } },
+  { NEWMV, { LAST2_FRAME, INTRA_FRAME } },
+
+  { ZEROMV, { LAST3_FRAME, INTRA_FRAME } },
+  { NEARESTMV, { LAST3_FRAME, INTRA_FRAME } },
+  { NEARMV, { LAST3_FRAME, INTRA_FRAME } },
+  { NEWMV, { LAST3_FRAME, INTRA_FRAME } },
+#endif  // CONFIG_EXT_REFS
+
+  { ZEROMV, { GOLDEN_FRAME, INTRA_FRAME } },
+  { NEARESTMV, { GOLDEN_FRAME, INTRA_FRAME } },
+  { NEARMV, { GOLDEN_FRAME, INTRA_FRAME } },
+  { NEWMV, { GOLDEN_FRAME, INTRA_FRAME } },
+
+#if CONFIG_EXT_REFS
+  { ZEROMV, { BWDREF_FRAME, INTRA_FRAME } },
+  { NEARESTMV, { BWDREF_FRAME, INTRA_FRAME } },
+  { NEARMV, { BWDREF_FRAME, INTRA_FRAME } },
+  { NEWMV, { BWDREF_FRAME, INTRA_FRAME } },
+#endif  // CONFIG_EXT_REFS
+
+  { ZEROMV, { ALTREF_FRAME, INTRA_FRAME } },
+  { NEARESTMV, { ALTREF_FRAME, INTRA_FRAME } },
+  { NEARMV, { ALTREF_FRAME, INTRA_FRAME } },
+  { NEWMV, { ALTREF_FRAME, INTRA_FRAME } },
+#endif  // CONFIG_EXT_INTER
+};
+
+static const REF_DEFINITION vp10_ref_order[MAX_REFS] = {
+  { { LAST_FRAME, NONE } },
+#if CONFIG_EXT_REFS
+  { { LAST2_FRAME, NONE } },          { { LAST3_FRAME, NONE } },
+  { { BWDREF_FRAME, NONE } },
+#endif  // CONFIG_EXT_REFS
+  { { GOLDEN_FRAME, NONE } },         { { ALTREF_FRAME, NONE } },
+
+  { { LAST_FRAME, ALTREF_FRAME } },
+#if CONFIG_EXT_REFS
+  { { LAST2_FRAME, ALTREF_FRAME } },  { { LAST3_FRAME, ALTREF_FRAME } },
+#endif  // CONFIG_EXT_REFS
+  { { GOLDEN_FRAME, ALTREF_FRAME } },
+
+#if CONFIG_EXT_REFS
+  { { LAST_FRAME, BWDREF_FRAME } },   { { LAST2_FRAME, BWDREF_FRAME } },
+  { { LAST3_FRAME, BWDREF_FRAME } },  { { GOLDEN_FRAME, BWDREF_FRAME } },
+#endif  // CONFIG_EXT_REFS
+
+  { { INTRA_FRAME, NONE } },
+};
+
+static INLINE int write_uniform_cost(int n, int v) {
+  int l = get_unsigned_bits(n), m = (1 << l) - n;
+  if (l == 0) return 0;
+  if (v < m)
+    return (l - 1) * vp10_cost_bit(128, 0);
+  else
+    return l * vp10_cost_bit(128, 0);
+}
+
+// constants for prune 1 and prune 2 decision boundaries
+#define FAST_EXT_TX_CORR_MID 0.0
+#define FAST_EXT_TX_EDST_MID 0.1
+#define FAST_EXT_TX_CORR_MARGIN 0.5
+#define FAST_EXT_TX_EDST_MARGIN 0.3
+
+static const TX_TYPE_1D vtx_tab[TX_TYPES] = {
+  DCT_1D,      ADST_1D, DCT_1D,      ADST_1D,
+#if CONFIG_EXT_TX
+  FLIPADST_1D, DCT_1D,  FLIPADST_1D, ADST_1D, FLIPADST_1D, IDTX_1D,
+  DCT_1D,      IDTX_1D, ADST_1D,     IDTX_1D, FLIPADST_1D, IDTX_1D,
+#endif  // CONFIG_EXT_TX
+};
+
+static const TX_TYPE_1D htx_tab[TX_TYPES] = {
+  DCT_1D,  DCT_1D,      ADST_1D,     ADST_1D,
+#if CONFIG_EXT_TX
+  DCT_1D,  FLIPADST_1D, FLIPADST_1D, FLIPADST_1D, ADST_1D, IDTX_1D,
+  IDTX_1D, DCT_1D,      IDTX_1D,     ADST_1D,     IDTX_1D, FLIPADST_1D,
+#endif  // CONFIG_EXT_TX
+};
+
+static void get_energy_distribution_fine(const VP10_COMP *cpi, BLOCK_SIZE bsize,
+                                         uint8_t *src, int src_stride,
+                                         uint8_t *dst, int dst_stride,
+                                         double *hordist, double *verdist) {
+  int bw = 4 << (b_width_log2_lookup[bsize]);
+  int bh = 4 << (b_height_log2_lookup[bsize]);
+  unsigned int esq[16] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+  unsigned int var[16];
+  double total = 0;
+
+  const int f_index = bsize - BLOCK_16X16;
+  if (f_index < 0) {
+    int i, j, index;
+    int w_shift = bw == 8 ? 1 : 2;
+    int h_shift = bh == 8 ? 1 : 2;
+#if CONFIG_VP9_HIGHBITDEPTH
+    if (cpi->common.use_highbitdepth) {
+      uint16_t *src16 = CONVERT_TO_SHORTPTR(src);
+      uint16_t *dst16 = CONVERT_TO_SHORTPTR(dst);
+      for (i = 0; i < bh; ++i)
+        for (j = 0; j < bw; ++j) {
+          index = (j >> w_shift) + ((i >> h_shift) << 2);
+          esq[index] +=
+              (src16[j + i * src_stride] - dst16[j + i * dst_stride]) *
+              (src16[j + i * src_stride] - dst16[j + i * dst_stride]);
+        }
+    } else {
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+      for (i = 0; i < bh; ++i)
+        for (j = 0; j < bw; ++j) {
+          index = (j >> w_shift) + ((i >> h_shift) << 2);
+          esq[index] += (src[j + i * src_stride] - dst[j + i * dst_stride]) *
+                        (src[j + i * src_stride] - dst[j + i * dst_stride]);
+        }
+#if CONFIG_VP9_HIGHBITDEPTH
+    }
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+  } else {
+    var[0] = cpi->fn_ptr[f_index].vf(src, src_stride, dst, dst_stride, &esq[0]);
+    var[1] = cpi->fn_ptr[f_index].vf(src + bw / 4, src_stride, dst + bw / 4,
+                                     dst_stride, &esq[1]);
+    var[2] = cpi->fn_ptr[f_index].vf(src + bw / 2, src_stride, dst + bw / 2,
+                                     dst_stride, &esq[2]);
+    var[3] = cpi->fn_ptr[f_index].vf(src + 3 * bw / 4, src_stride,
+                                     dst + 3 * bw / 4, dst_stride, &esq[3]);
+    src += bh / 4 * src_stride;
+    dst += bh / 4 * dst_stride;
+
+    var[4] = cpi->fn_ptr[f_index].vf(src, src_stride, dst, dst_stride, &esq[4]);
+    var[5] = cpi->fn_ptr[f_index].vf(src + bw / 4, src_stride, dst + bw / 4,
+                                     dst_stride, &esq[5]);
+    var[6] = cpi->fn_ptr[f_index].vf(src + bw / 2, src_stride, dst + bw / 2,
+                                     dst_stride, &esq[6]);
+    var[7] = cpi->fn_ptr[f_index].vf(src + 3 * bw / 4, src_stride,
+                                     dst + 3 * bw / 4, dst_stride, &esq[7]);
+    src += bh / 4 * src_stride;
+    dst += bh / 4 * dst_stride;
+
+    var[8] = cpi->fn_ptr[f_index].vf(src, src_stride, dst, dst_stride, &esq[8]);
+    var[9] = cpi->fn_ptr[f_index].vf(src + bw / 4, src_stride, dst + bw / 4,
+                                     dst_stride, &esq[9]);
+    var[10] = cpi->fn_ptr[f_index].vf(src + bw / 2, src_stride, dst + bw / 2,
+                                      dst_stride, &esq[10]);
+    var[11] = cpi->fn_ptr[f_index].vf(src + 3 * bw / 4, src_stride,
+                                      dst + 3 * bw / 4, dst_stride, &esq[11]);
+    src += bh / 4 * src_stride;
+    dst += bh / 4 * dst_stride;
+
+    var[12] =
+        cpi->fn_ptr[f_index].vf(src, src_stride, dst, dst_stride, &esq[12]);
+    var[13] = cpi->fn_ptr[f_index].vf(src + bw / 4, src_stride, dst + bw / 4,
+                                      dst_stride, &esq[13]);
+    var[14] = cpi->fn_ptr[f_index].vf(src + bw / 2, src_stride, dst + bw / 2,
+                                      dst_stride, &esq[14]);
+    var[15] = cpi->fn_ptr[f_index].vf(src + 3 * bw / 4, src_stride,
+                                      dst + 3 * bw / 4, dst_stride, &esq[15]);
+  }
+
+  total = esq[0] + esq[1] + esq[2] + esq[3] + esq[4] + esq[5] + esq[6] +
+          esq[7] + esq[8] + esq[9] + esq[10] + esq[11] + esq[12] + esq[13] +
+          esq[14] + esq[15];
+  if (total > 0) {
+    const double e_recip = 1.0 / total;
+    hordist[0] =
+        ((double)esq[0] + (double)esq[4] + (double)esq[8] + (double)esq[12]) *
+        e_recip;
+    hordist[1] =
+        ((double)esq[1] + (double)esq[5] + (double)esq[9] + (double)esq[13]) *
+        e_recip;
+    hordist[2] =
+        ((double)esq[2] + (double)esq[6] + (double)esq[10] + (double)esq[14]) *
+        e_recip;
+    verdist[0] =
+        ((double)esq[0] + (double)esq[1] + (double)esq[2] + (double)esq[3]) *
+        e_recip;
+    verdist[1] =
+        ((double)esq[4] + (double)esq[5] + (double)esq[6] + (double)esq[7]) *
+        e_recip;
+    verdist[2] =
+        ((double)esq[8] + (double)esq[9] + (double)esq[10] + (double)esq[11]) *
+        e_recip;
+  } else {
+    hordist[0] = verdist[0] = 0.25;
+    hordist[1] = verdist[1] = 0.25;
+    hordist[2] = verdist[2] = 0.25;
+  }
+  (void)var[0];
+  (void)var[1];
+  (void)var[2];
+  (void)var[3];
+  (void)var[4];
+  (void)var[5];
+  (void)var[6];
+  (void)var[7];
+  (void)var[8];
+  (void)var[9];
+  (void)var[10];
+  (void)var[11];
+  (void)var[12];
+  (void)var[13];
+  (void)var[14];
+  (void)var[15];
+}
+
+static int adst_vs_flipadst(const VP10_COMP *cpi, BLOCK_SIZE bsize,
+                            uint8_t *src, int src_stride, uint8_t *dst,
+                            int dst_stride, double *hdist, double *vdist) {
+  int prune_bitmask = 0;
+  double svm_proj_h = 0, svm_proj_v = 0;
+  get_energy_distribution_fine(cpi, bsize, src, src_stride, dst, dst_stride,
+                               hdist, vdist);
+
+  svm_proj_v = vdist[0] * ADST_FLIP_SVM[0] + vdist[1] * ADST_FLIP_SVM[1] +
+               vdist[2] * ADST_FLIP_SVM[2] + ADST_FLIP_SVM[3];
+  svm_proj_h = hdist[0] * ADST_FLIP_SVM[4] + hdist[1] * ADST_FLIP_SVM[5] +
+               hdist[2] * ADST_FLIP_SVM[6] + ADST_FLIP_SVM[7];
+  if (svm_proj_v > FAST_EXT_TX_EDST_MID + FAST_EXT_TX_EDST_MARGIN)
+    prune_bitmask |= 1 << FLIPADST_1D;
+  else if (svm_proj_v < FAST_EXT_TX_EDST_MID - FAST_EXT_TX_EDST_MARGIN)
+    prune_bitmask |= 1 << ADST_1D;
+
+  if (svm_proj_h > FAST_EXT_TX_EDST_MID + FAST_EXT_TX_EDST_MARGIN)
+    prune_bitmask |= 1 << (FLIPADST_1D + 8);
+  else if (svm_proj_h < FAST_EXT_TX_EDST_MID - FAST_EXT_TX_EDST_MARGIN)
+    prune_bitmask |= 1 << (ADST_1D + 8);
+
+  return prune_bitmask;
+}
+
+#if CONFIG_EXT_TX
+static void get_horver_correlation(int16_t *diff, int stride, int w, int h,
+                                   double *hcorr, double *vcorr) {
+  // Returns hor/ver correlation coefficient
+  const int num = (h - 1) * (w - 1);
+  double num_r;
+  int i, j;
+  int64_t xy_sum = 0, xz_sum = 0;
+  int64_t x_sum = 0, y_sum = 0, z_sum = 0;
+  int64_t x2_sum = 0, y2_sum = 0, z2_sum = 0;
+  double x_var_n, y_var_n, z_var_n, xy_var_n, xz_var_n;
+  *hcorr = *vcorr = 1;
+
+  assert(num > 0);
+  num_r = 1.0 / num;
+  for (i = 1; i < h; ++i) {
+    for (j = 1; j < w; ++j) {
+      const int16_t x = diff[i * stride + j];
+      const int16_t y = diff[i * stride + j - 1];
+      const int16_t z = diff[(i - 1) * stride + j];
+      xy_sum += x * y;
+      xz_sum += x * z;
+      x_sum += x;
+      y_sum += y;
+      z_sum += z;
+      x2_sum += x * x;
+      y2_sum += y * y;
+      z2_sum += z * z;
+    }
+  }
+  x_var_n = x2_sum - (x_sum * x_sum) * num_r;
+  y_var_n = y2_sum - (y_sum * y_sum) * num_r;
+  z_var_n = z2_sum - (z_sum * z_sum) * num_r;
+  xy_var_n = xy_sum - (x_sum * y_sum) * num_r;
+  xz_var_n = xz_sum - (x_sum * z_sum) * num_r;
+  if (x_var_n > 0 && y_var_n > 0) {
+    *hcorr = xy_var_n / sqrt(x_var_n * y_var_n);
+    *hcorr = *hcorr < 0 ? 0 : *hcorr;
+  }
+  if (x_var_n > 0 && z_var_n > 0) {
+    *vcorr = xz_var_n / sqrt(x_var_n * z_var_n);
+    *vcorr = *vcorr < 0 ? 0 : *vcorr;
+  }
+}
+
+int dct_vs_idtx(int16_t *diff, int stride, int w, int h, double *hcorr,
+                double *vcorr) {
+  int prune_bitmask = 0;
+  get_horver_correlation(diff, stride, w, h, hcorr, vcorr);
+
+  if (*vcorr > FAST_EXT_TX_CORR_MID + FAST_EXT_TX_CORR_MARGIN)
+    prune_bitmask |= 1 << IDTX_1D;
+  else if (*vcorr < FAST_EXT_TX_CORR_MID - FAST_EXT_TX_CORR_MARGIN)
+    prune_bitmask |= 1 << DCT_1D;
+
+  if (*hcorr > FAST_EXT_TX_CORR_MID + FAST_EXT_TX_CORR_MARGIN)
+    prune_bitmask |= 1 << (IDTX_1D + 8);
+  else if (*hcorr < FAST_EXT_TX_CORR_MID - FAST_EXT_TX_CORR_MARGIN)
+    prune_bitmask |= 1 << (DCT_1D + 8);
+  return prune_bitmask;
+}
+
+// Performance drop: 0.5%, Speed improvement: 24%
+static int prune_two_for_sby(const VP10_COMP *cpi, BLOCK_SIZE bsize,
+                             MACROBLOCK *x, MACROBLOCKD *xd, int adst_flipadst,
+                             int dct_idtx) {
+  struct macroblock_plane *const p = &x->plane[0];
+  struct macroblockd_plane *const pd = &xd->plane[0];
+  const BLOCK_SIZE bs = get_plane_block_size(bsize, pd);
+  const int bw = 4 << (b_width_log2_lookup[bs]);
+  const int bh = 4 << (b_height_log2_lookup[bs]);
+  double hdist[3] = { 0, 0, 0 }, vdist[3] = { 0, 0, 0 };
+  double hcorr, vcorr;
+  int prune = 0;
+  vp10_subtract_plane(x, bsize, 0);
+
+  if (adst_flipadst)
+    prune |= adst_vs_flipadst(cpi, bsize, p->src.buf, p->src.stride,
+                              pd->dst.buf, pd->dst.stride, hdist, vdist);
+  if (dct_idtx) prune |= dct_vs_idtx(p->src_diff, bw, bw, bh, &hcorr, &vcorr);
+
+  return prune;
+}
+#endif  // CONFIG_EXT_TX
+
+// Performance drop: 0.3%, Speed improvement: 5%
+static int prune_one_for_sby(const VP10_COMP *cpi, BLOCK_SIZE bsize,
+                             MACROBLOCK *x, MACROBLOCKD *xd) {
+  struct macroblock_plane *const p = &x->plane[0];
+  struct macroblockd_plane *const pd = &xd->plane[0];
+  double hdist[3] = { 0, 0, 0 }, vdist[3] = { 0, 0, 0 };
+  vp10_subtract_plane(x, bsize, 0);
+  return adst_vs_flipadst(cpi, bsize, p->src.buf, p->src.stride, pd->dst.buf,
+                          pd->dst.stride, hdist, vdist);
+}
+
+static int prune_tx_types(const VP10_COMP *cpi, BLOCK_SIZE bsize, MACROBLOCK *x,
+                          MACROBLOCKD *xd, int tx_set) {
+#if CONFIG_EXT_TX
+  const int *tx_set_1D = ext_tx_used_inter_1D[tx_set];
+#else
+  const int tx_set_1D[TX_TYPES_1D] = { 0 };
+#endif
+
+  switch (cpi->sf.tx_type_search.prune_mode) {
+    case NO_PRUNE: return 0; break;
+    case PRUNE_ONE:
+      if ((tx_set >= 0) & !(tx_set_1D[FLIPADST_1D] & tx_set_1D[ADST_1D]))
+        return 0;
+      return prune_one_for_sby(cpi, bsize, x, xd);
+      break;
+#if CONFIG_EXT_TX
+    case PRUNE_TWO:
+      if ((tx_set >= 0) & !(tx_set_1D[FLIPADST_1D] & tx_set_1D[ADST_1D])) {
+        if (!(tx_set_1D[DCT_1D] & tx_set_1D[IDTX_1D])) return 0;
+        return prune_two_for_sby(cpi, bsize, x, xd, 0, 1);
+      }
+      if ((tx_set >= 0) & !(tx_set_1D[DCT_1D] & tx_set_1D[IDTX_1D]))
+        return prune_two_for_sby(cpi, bsize, x, xd, 1, 0);
+      return prune_two_for_sby(cpi, bsize, x, xd, 1, 1);
+      break;
+#endif
+  }
+  assert(0);
+  return 0;
+}
+
+static int do_tx_type_search(TX_TYPE tx_type, int prune) {
+// TODO(sarahparker) implement for non ext tx
+#if CONFIG_EXT_TX
+  return !(((prune >> vtx_tab[tx_type]) & 1) |
+           ((prune >> (htx_tab[tx_type] + 8)) & 1));
+#else
+  // temporary to avoid compiler warnings
+  (void)vtx_tab;
+  (void)htx_tab;
+  (void)tx_type;
+  (void)prune;
+  return 1;
+#endif
+}
+
+static void model_rd_from_sse(const VP10_COMP *const cpi,
+                              const MACROBLOCKD *const xd, BLOCK_SIZE bsize,
+                              int plane, int64_t sse, int *rate,
+                              int64_t *dist) {
+  const struct macroblockd_plane *const pd = &xd->plane[plane];
+  const int dequant_shift =
+#if CONFIG_VP9_HIGHBITDEPTH
+      (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) ? xd->bd - 5 :
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+                                                    3;
+
+  // Fast approximate the modelling function.
+  if (cpi->sf.simple_model_rd_from_var) {
+    const int64_t square_error = sse;
+    int quantizer = (pd->dequant[1] >> dequant_shift);
+
+    if (quantizer < 120)
+      *rate = (int)((square_error * (280 - quantizer)) >>
+                    (16 - VP10_PROB_COST_SHIFT));
+    else
+      *rate = 0;
+    *dist = (square_error * quantizer) >> 8;
+  } else {
+    vp10_model_rd_from_var_lapndz(sse, num_pels_log2_lookup[bsize],
+                                  pd->dequant[1] >> dequant_shift, rate, dist);
+  }
+
+  *dist <<= 4;
+}
+
+static void model_rd_for_sb(const VP10_COMP *const cpi, BLOCK_SIZE bsize,
+                            MACROBLOCK *x, MACROBLOCKD *xd, int plane_from,
+                            int plane_to, int *out_rate_sum,
+                            int64_t *out_dist_sum, int *skip_txfm_sb,
+                            int64_t *skip_sse_sb) {
+  // Note our transform coeffs are 8 times an orthogonal transform.
+  // Hence quantizer step is also 8 times. To get effective quantizer
+  // we need to divide by 8 before sending to modeling function.
+  int plane;
+  const int ref = xd->mi[0]->mbmi.ref_frame[0];
+
+  int64_t rate_sum = 0;
+  int64_t dist_sum = 0;
+  int64_t total_sse = 0;
+
+  x->pred_sse[ref] = 0;
+
+  for (plane = plane_from; plane <= plane_to; ++plane) {
+    struct macroblock_plane *const p = &x->plane[plane];
+    struct macroblockd_plane *const pd = &xd->plane[plane];
+    const BLOCK_SIZE bs = get_plane_block_size(bsize, pd);
+
+    unsigned int sse;
+    int rate;
+    int64_t dist;
+
+    // TODO(geza): Write direct sse functions that do not compute
+    // variance as well.
+    cpi->fn_ptr[bs].vf(p->src.buf, p->src.stride, pd->dst.buf, pd->dst.stride,
+                       &sse);
+
+    if (plane == 0) x->pred_sse[ref] = sse;
+
+    total_sse += sse;
+
+    model_rd_from_sse(cpi, xd, bs, plane, sse, &rate, &dist);
+
+    rate_sum += rate;
+    dist_sum += dist;
+  }
+
+  *skip_txfm_sb = total_sse == 0;
+  *skip_sse_sb = total_sse << 4;
+  *out_rate_sum = (int)rate_sum;
+  *out_dist_sum = dist_sum;
+}
+
+int64_t vp10_block_error_c(const tran_low_t *coeff, const tran_low_t *dqcoeff,
+                           intptr_t block_size, int64_t *ssz) {
+  int i;
+  int64_t error = 0, sqcoeff = 0;
+
+  for (i = 0; i < block_size; i++) {
+    const int diff = coeff[i] - dqcoeff[i];
+    error += diff * diff;
+    sqcoeff += coeff[i] * coeff[i];
+  }
+
+  *ssz = sqcoeff;
+  return error;
+}
+
+int64_t vp10_block_error_fp_c(const int16_t *coeff, const int16_t *dqcoeff,
+                              int block_size) {
+  int i;
+  int64_t error = 0;
+
+  for (i = 0; i < block_size; i++) {
+    const int diff = coeff[i] - dqcoeff[i];
+    error += diff * diff;
+  }
+
+  return error;
+}
+
+#if CONFIG_VP9_HIGHBITDEPTH
+int64_t vp10_highbd_block_error_c(const tran_low_t *coeff,
+                                  const tran_low_t *dqcoeff,
+                                  intptr_t block_size, int64_t *ssz, int bd) {
+  int i;
+  int64_t error = 0, sqcoeff = 0;
+  int shift = 2 * (bd - 8);
+  int rounding = shift > 0 ? 1 << (shift - 1) : 0;
+
+  for (i = 0; i < block_size; i++) {
+    const int64_t diff = coeff[i] - dqcoeff[i];
+    error += diff * diff;
+    sqcoeff += (int64_t)coeff[i] * (int64_t)coeff[i];
+  }
+  assert(error >= 0 && sqcoeff >= 0);
+  error = (error + rounding) >> shift;
+  sqcoeff = (sqcoeff + rounding) >> shift;
+
+  *ssz = sqcoeff;
+  return error;
+}
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+/* The trailing '0' is a terminator which is used inside cost_coeffs() to
+ * decide whether to include cost of a trailing EOB node or not (i.e. we
+ * can skip this if the last coefficient in this transform block, e.g. the
+ * 16th coefficient in a 4x4 block or the 64th coefficient in a 8x8 block,
+ * were non-zero). */
+static int cost_coeffs(MACROBLOCK *x, int plane, int block,
+#if CONFIG_VAR_TX
+                       int coeff_ctx,
+#else
+                       ENTROPY_CONTEXT *A, ENTROPY_CONTEXT *L,
+#endif
+                       TX_SIZE tx_size, const int16_t *scan, const int16_t *nb,
+                       int use_fast_coef_costing) {
+  MACROBLOCKD *const xd = &x->e_mbd;
+  MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
+  const struct macroblock_plane *p = &x->plane[plane];
+  const struct macroblockd_plane *pd = &xd->plane[plane];
+  const PLANE_TYPE type = pd->plane_type;
+  const uint16_t *band_count = &band_count_table[tx_size][1];
+  const int eob = p->eobs[block];
+  const tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block);
+  const int tx_size_ctx = txsize_sqr_map[tx_size];
+  unsigned int(*token_costs)[2][COEFF_CONTEXTS][ENTROPY_TOKENS] =
+      x->token_costs[tx_size_ctx][type][is_inter_block(mbmi)];
+  uint8_t token_cache[MAX_TX_SQUARE];
+#if CONFIG_VAR_TX
+  int pt = coeff_ctx;
+#else
+  int pt = combine_entropy_contexts(*A, *L);
+#endif
+  int c, cost;
+#if CONFIG_VP9_HIGHBITDEPTH
+  const int *cat6_high_cost = vp10_get_high_cost_table(xd->bd);
+#else
+  const int *cat6_high_cost = vp10_get_high_cost_table(8);
+#endif
+
+#if !CONFIG_VAR_TX && !CONFIG_SUPERTX
+  // Check for consistency of tx_size with mode info
+  assert(type == PLANE_TYPE_Y ? mbmi->tx_size == tx_size
+                              : get_uv_tx_size(mbmi, pd) == tx_size);
+#endif  // !CONFIG_VAR_TX && !CONFIG_SUPERTX
+
+  if (eob == 0) {
+    // single eob token
+    cost = token_costs[0][0][pt][EOB_TOKEN];
+    c = 0;
+  } else {
+    if (use_fast_coef_costing) {
+      int band_left = *band_count++;
+
+      // dc token
+      int v = qcoeff[0];
+      int16_t prev_t;
+      cost = vp10_get_token_cost(v, &prev_t, cat6_high_cost);
+      cost += (*token_costs)[0][pt][prev_t];
+
+      token_cache[0] = vp10_pt_energy_class[prev_t];
+      ++token_costs;
+
+      // ac tokens
+      for (c = 1; c < eob; c++) {
+        const int rc = scan[c];
+        int16_t t;
+
+        v = qcoeff[rc];
+        cost += vp10_get_token_cost(v, &t, cat6_high_cost);
+        cost += (*token_costs)[!prev_t][!prev_t][t];
+        prev_t = t;
+        if (!--band_left) {
+          band_left = *band_count++;
+          ++token_costs;
+        }
+      }
+
+      // eob token
+      if (band_left) cost += (*token_costs)[0][!prev_t][EOB_TOKEN];
+
+    } else {  // !use_fast_coef_costing
+      int band_left = *band_count++;
+
+      // dc token
+      int v = qcoeff[0];
+      int16_t tok;
+      unsigned int(*tok_cost_ptr)[COEFF_CONTEXTS][ENTROPY_TOKENS];
+      cost = vp10_get_token_cost(v, &tok, cat6_high_cost);
+      cost += (*token_costs)[0][pt][tok];
+
+      token_cache[0] = vp10_pt_energy_class[tok];
+      ++token_costs;
+
+      tok_cost_ptr = &((*token_costs)[!tok]);
+
+      // ac tokens
+      for (c = 1; c < eob; c++) {
+        const int rc = scan[c];
+
+        v = qcoeff[rc];
+        cost += vp10_get_token_cost(v, &tok, cat6_high_cost);
+        pt = get_coef_context(nb, token_cache, c);
+        cost += (*tok_cost_ptr)[pt][tok];
+        token_cache[rc] = vp10_pt_energy_class[tok];
+        if (!--band_left) {
+          band_left = *band_count++;
+          ++token_costs;
+        }
+        tok_cost_ptr = &((*token_costs)[!tok]);
+      }
+
+      // eob token
+      if (band_left) {
+        pt = get_coef_context(nb, token_cache, c);
+        cost += (*token_costs)[0][pt][EOB_TOKEN];
+      }
+    }
+  }
+
+#if !CONFIG_VAR_TX
+  // is eob first coefficient;
+  *A = *L = (c > 0);
+#endif
+
+  return cost;
+}
+
+static void dist_block(const VP10_COMP *cpi, MACROBLOCK *x, int plane,
+                       int block, int blk_row, int blk_col, TX_SIZE tx_size,
+                       int64_t *out_dist, int64_t *out_sse) {
+  MACROBLOCKD *const xd = &x->e_mbd;
+  const struct macroblock_plane *const p = &x->plane[plane];
+  const struct macroblockd_plane *const pd = &xd->plane[plane];
+  if (cpi->sf.use_transform_domain_distortion) {
+    // Transform domain distortion computation is more accurate as it does
+    // not involve an inverse transform, but it is less accurate.
+    const int ss_txfrm_size = num_4x4_blocks_txsize_log2_lookup[tx_size];
+    int64_t this_sse;
+    int tx_type = get_tx_type(pd->plane_type, xd, block, tx_size);
+    int shift = (MAX_TX_SCALE - get_tx_scale(xd, tx_type, tx_size)) * 2;
+    tran_low_t *const coeff = BLOCK_OFFSET(p->coeff, block);
+    tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
+#if CONFIG_VP9_HIGHBITDEPTH
+    const int bd = (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) ? xd->bd : 8;
+    *out_dist = vp10_highbd_block_error(coeff, dqcoeff, 16 << ss_txfrm_size,
+                                        &this_sse, bd) >>
+                shift;
+#else
+    *out_dist =
+        vp10_block_error(coeff, dqcoeff, 16 << ss_txfrm_size, &this_sse) >>
+        shift;
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+    *out_sse = this_sse >> shift;
+  } else {
+    const BLOCK_SIZE tx_bsize = txsize_to_bsize[tx_size];
+    const int bsw = 4 * num_4x4_blocks_wide_lookup[tx_bsize];
+    const int bsh = 4 * num_4x4_blocks_high_lookup[tx_bsize];
+    const int src_stride = x->plane[plane].src.stride;
+    const int dst_stride = xd->plane[plane].dst.stride;
+    const int src_idx = 4 * (blk_row * src_stride + blk_col);
+    const int dst_idx = 4 * (blk_row * dst_stride + blk_col);
+    const uint8_t *src = &x->plane[plane].src.buf[src_idx];
+    const uint8_t *dst = &xd->plane[plane].dst.buf[dst_idx];
+    const tran_low_t *dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
+    const uint16_t eob = p->eobs[block];
+
+    unsigned int tmp;
+
+    assert(cpi != NULL);
+
+    cpi->fn_ptr[tx_bsize].vf(src, src_stride, dst, dst_stride, &tmp);
+    *out_sse = (int64_t)tmp * 16;
+
+    if (eob) {
+      const MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
+#if CONFIG_VP9_HIGHBITDEPTH
+      DECLARE_ALIGNED(16, uint16_t, recon16[MAX_TX_SQUARE]);
+      uint8_t *recon = (uint8_t *)recon16;
+#else
+      DECLARE_ALIGNED(16, uint8_t, recon[MAX_TX_SQUARE]);
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+      const PLANE_TYPE plane_type = plane == 0 ? PLANE_TYPE_Y : PLANE_TYPE_UV;
+
+      INV_TXFM_PARAM inv_txfm_param;
+
+      inv_txfm_param.tx_type = get_tx_type(plane_type, xd, block, tx_size);
+      inv_txfm_param.tx_size = tx_size;
+      inv_txfm_param.eob = eob;
+      inv_txfm_param.lossless = xd->lossless[mbmi->segment_id];
+
+#if CONFIG_VP9_HIGHBITDEPTH
+      if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+        recon = CONVERT_TO_BYTEPTR(recon);
+        inv_txfm_param.bd = xd->bd;
+        vpx_highbd_convolve_copy(dst, dst_stride, recon, MAX_TX_SIZE, NULL, 0,
+                                 NULL, 0, bsw, bsh, xd->bd);
+        highbd_inv_txfm_add(dqcoeff, recon, MAX_TX_SIZE, &inv_txfm_param);
+      } else
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+      {
+        vpx_convolve_copy(dst, dst_stride, recon, MAX_TX_SIZE, NULL, 0, NULL, 0,
+                          bsw, bsh);
+        inv_txfm_add(dqcoeff, recon, MAX_TX_SIZE, &inv_txfm_param);
+      }
+
+      cpi->fn_ptr[tx_bsize].vf(src, src_stride, recon, MAX_TX_SIZE, &tmp);
+    }
+
+    *out_dist = (int64_t)tmp * 16;
+  }
+}
+
+static int rate_block(int plane, int block, int blk_row, int blk_col,
+                      TX_SIZE tx_size, struct rdcost_block_args *args) {
+#if CONFIG_VAR_TX
+  int coeff_ctx = combine_entropy_contexts(*(args->t_above + blk_col),
+                                           *(args->t_left + blk_row));
+  int coeff_cost =
+      cost_coeffs(args->x, plane, block, coeff_ctx, tx_size, args->so->scan,
+                  args->so->neighbors, args->use_fast_coef_costing);
+  const struct macroblock_plane *p = &args->x->plane[plane];
+  *(args->t_above + blk_col) = !(p->eobs[block] == 0);
+  *(args->t_left + blk_row) = !(p->eobs[block] == 0);
+  return coeff_cost;
+#else
+  return cost_coeffs(args->x, plane, block, args->t_above + blk_col,
+                     args->t_left + blk_row, tx_size, args->so->scan,
+                     args->so->neighbors, args->use_fast_coef_costing);
+#endif  // CONFIG_VAR_TX
+}
+
+static uint64_t sum_squares_2d(const int16_t *diff, int diff_stride,
+                               TX_SIZE tx_size) {
+  uint64_t sse;
+  switch (tx_size) {
+#if CONFIG_EXT_TX
+    case TX_4X8:
+      sse = vpx_sum_squares_2d_i16(diff, diff_stride, 4) +
+            vpx_sum_squares_2d_i16(diff + 4 * diff_stride, diff_stride, 4);
+      break;
+    case TX_8X4:
+      sse = vpx_sum_squares_2d_i16(diff, diff_stride, 4) +
+            vpx_sum_squares_2d_i16(diff + 4, diff_stride, 4);
+      break;
+    case TX_8X16:
+      sse = vpx_sum_squares_2d_i16(diff, diff_stride, 8) +
+            vpx_sum_squares_2d_i16(diff + 8 * diff_stride, diff_stride, 8);
+      break;
+    case TX_16X8:
+      sse = vpx_sum_squares_2d_i16(diff, diff_stride, 8) +
+            vpx_sum_squares_2d_i16(diff + 8, diff_stride, 8);
+      break;
+    case TX_16X32:
+      sse = vpx_sum_squares_2d_i16(diff, diff_stride, 16) +
+            vpx_sum_squares_2d_i16(diff + 16 * diff_stride, diff_stride, 16);
+      break;
+    case TX_32X16:
+      sse = vpx_sum_squares_2d_i16(diff, diff_stride, 16) +
+            vpx_sum_squares_2d_i16(diff + 16, diff_stride, 16);
+      break;
+#endif  // CONFIG_EXT_TX
+    default:
+      assert(tx_size < TX_SIZES);
+      sse = vpx_sum_squares_2d_i16(
+          diff, diff_stride, num_4x4_blocks_wide_txsize_lookup[tx_size] << 2);
+      break;
+  }
+  return sse;
+}
+
+static void block_rd_txfm(int plane, int block, int blk_row, int blk_col,
+                          BLOCK_SIZE plane_bsize, TX_SIZE tx_size, void *arg) {
+  struct rdcost_block_args *args = arg;
+  MACROBLOCK *const x = args->x;
+  MACROBLOCKD *const xd = &x->e_mbd;
+  MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
+  int64_t rd1, rd2, rd;
+  int rate;
+  int64_t dist;
+  int64_t sse;
+  int coeff_ctx = combine_entropy_contexts(*(args->t_above + blk_col),
+                                           *(args->t_left + blk_row));
+
+  if (args->exit_early) return;
+
+  if (!is_inter_block(mbmi)) {
+    struct encode_b_args intra_arg = {
+      x, NULL, &mbmi->skip, args->t_above, args->t_left, 1
+    };
+    vp10_encode_block_intra(plane, block, blk_row, blk_col, plane_bsize,
+                            tx_size, &intra_arg);
+
+    if (args->cpi->sf.use_transform_domain_distortion) {
+      dist_block(args->cpi, x, plane, block, blk_row, blk_col, tx_size, &dist,
+                 &sse);
+    } else {
+      // Note that the encode block_intra call above already calls
+      // inv_txfm_add, so we can't just call dist_block here.
+      const BLOCK_SIZE tx_bsize = txsize_to_bsize[tx_size];
+      const vpx_variance_fn_t variance = args->cpi->fn_ptr[tx_bsize].vf;
+
+      const struct macroblock_plane *const p = &x->plane[plane];
+      const struct macroblockd_plane *const pd = &xd->plane[plane];
+
+      const int src_stride = p->src.stride;
+      const int dst_stride = pd->dst.stride;
+      const int diff_stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
+
+      const uint8_t *src = &p->src.buf[4 * (blk_row * src_stride + blk_col)];
+      const uint8_t *dst = &pd->dst.buf[4 * (blk_row * dst_stride + blk_col)];
+      const int16_t *diff = &p->src_diff[4 * (blk_row * diff_stride + blk_col)];
+
+      unsigned int tmp;
+      sse = sum_squares_2d(diff, diff_stride, tx_size);
+
+#if CONFIG_VP9_HIGHBITDEPTH
+      if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
+        sse = ROUND_POWER_OF_TWO(sse, (xd->bd - 8) * 2);
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+      sse = (int64_t)sse * 16;
+
+      variance(src, src_stride, dst, dst_stride, &tmp);
+      dist = (int64_t)tmp * 16;
+    }
+  } else {
+// full forward transform and quantization
+#if CONFIG_NEW_QUANT
+    vp10_xform_quant_fp_nuq(x, plane, block, blk_row, blk_col, plane_bsize,
+                            tx_size, coeff_ctx);
+#else
+    vp10_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size,
+                     VP10_XFORM_QUANT_FP);
+#endif  // CONFIG_NEW_QUANT
+    if (x->plane[plane].eobs[block])
+      vp10_optimize_b(x, plane, block, tx_size, coeff_ctx);
+    dist_block(args->cpi, x, plane, block, blk_row, blk_col, tx_size, &dist,
+               &sse);
+  }
+
+  rd = RDCOST(x->rdmult, x->rddiv, 0, dist);
+  if (args->this_rd + rd > args->best_rd) {
+    args->exit_early = 1;
+    return;
+  }
+
+  rate = rate_block(plane, block, blk_row, blk_col, tx_size, args);
+  rd1 = RDCOST(x->rdmult, x->rddiv, rate, dist);
+  rd2 = RDCOST(x->rdmult, x->rddiv, 0, sse);
+
+  // TODO(jingning): temporarily enabled only for luma component
+  rd = VPXMIN(rd1, rd2);
+
+  args->this_rate += rate;
+  args->this_dist += dist;
+  args->this_sse += sse;
+  args->this_rd += rd;
+
+  if (args->this_rd > args->best_rd) {
+    args->exit_early = 1;
+    return;
+  }
+
+  args->skippable &= !x->plane[plane].eobs[block];
+}
+
+static void txfm_rd_in_plane(MACROBLOCK *x, const VP10_COMP *cpi, int *rate,
+                             int64_t *distortion, int *skippable, int64_t *sse,
+                             int64_t ref_best_rd, int plane, BLOCK_SIZE bsize,
+                             TX_SIZE tx_size, int use_fast_coef_casting) {
+  MACROBLOCKD *const xd = &x->e_mbd;
+  const struct macroblockd_plane *const pd = &xd->plane[plane];
+  TX_TYPE tx_type;
+  struct rdcost_block_args args;
+  vp10_zero(args);
+  args.x = x;
+  args.cpi = cpi;
+  args.best_rd = ref_best_rd;
+  args.use_fast_coef_costing = use_fast_coef_casting;
+  args.skippable = 1;
+
+  if (plane == 0) xd->mi[0]->mbmi.tx_size = tx_size;
+
+  vp10_get_entropy_contexts(bsize, tx_size, pd, args.t_above, args.t_left);
+
+  tx_type = get_tx_type(pd->plane_type, xd, 0, tx_size);
+  args.so = get_scan(tx_size, tx_type, is_inter_block(&xd->mi[0]->mbmi));
+
+  vp10_foreach_transformed_block_in_plane(xd, bsize, plane, block_rd_txfm,
+                                          &args);
+  if (args.exit_early) {
+    *rate = INT_MAX;
+    *distortion = INT64_MAX;
+    *sse = INT64_MAX;
+    *skippable = 0;
+  } else {
+    *distortion = args.this_dist;
+    *rate = args.this_rate;
+    *sse = args.this_sse;
+    *skippable = args.skippable;
+  }
+}
+
+#if CONFIG_SUPERTX
+void vp10_txfm_rd_in_plane_supertx(MACROBLOCK *x, const VP10_COMP *cpi,
+                                   int *rate, int64_t *distortion,
+                                   int *skippable, int64_t *sse,
+                                   int64_t ref_best_rd, int plane,
+                                   BLOCK_SIZE bsize, TX_SIZE tx_size,
+                                   int use_fast_coef_casting) {
+  MACROBLOCKD *const xd = &x->e_mbd;
+  const struct macroblockd_plane *const pd = &xd->plane[plane];
+  struct rdcost_block_args args;
+  TX_TYPE tx_type;
+
+  vp10_zero(args);
+  args.cpi = cpi;
+  args.x = x;
+  args.best_rd = ref_best_rd;
+  args.use_fast_coef_costing = use_fast_coef_casting;
+
+#if CONFIG_EXT_TX
+  assert(tx_size < TX_SIZES);
+#endif  // CONFIG_EXT_TX
+
+  if (plane == 0) xd->mi[0]->mbmi.tx_size = tx_size;
+
+  vp10_get_entropy_contexts(bsize, tx_size, pd, args.t_above, args.t_left);
+
+  tx_type = get_tx_type(pd->plane_type, xd, 0, tx_size);
+  args.so = get_scan(tx_size, tx_type, is_inter_block(&xd->mi[0]->mbmi));
+
+  block_rd_txfm(plane, 0, 0, 0, get_plane_block_size(bsize, pd), tx_size,
+                &args);
+
+  if (args.exit_early) {
+    *rate = INT_MAX;
+    *distortion = INT64_MAX;
+    *sse = INT64_MAX;
+    *skippable = 0;
+  } else {
+    *distortion = args.this_dist;
+    *rate = args.this_rate;
+    *sse = args.this_sse;
+    *skippable = !x->plane[plane].eobs[0];
+  }
+}
+#endif  // CONFIG_SUPERTX
+
+static int64_t txfm_yrd(VP10_COMP *cpi, MACROBLOCK *x, int *r, int64_t *d,
+                        int *s, int64_t *sse, int64_t ref_best_rd,
+                        BLOCK_SIZE bs, TX_TYPE tx_type, int tx_size) {
+  VP10_COMMON *const cm = &cpi->common;
+  MACROBLOCKD *const xd = &x->e_mbd;
+  MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
+  int64_t rd = INT64_MAX;
+  vpx_prob skip_prob = vp10_get_skip_prob(cm, xd);
+  int s0, s1;
+  const int is_inter = is_inter_block(mbmi);
+  const int tx_size_ctx = get_tx_size_context(xd);
+  const int tx_size_cat =
+      is_inter ? inter_tx_size_cat_lookup[bs] : intra_tx_size_cat_lookup[bs];
+  const TX_SIZE coded_tx_size = txsize_sqr_up_map[tx_size];
+  const int tx_select = cm->tx_mode == TX_MODE_SELECT;
+  const int r_tx_size =
+      cpi->tx_size_cost[tx_size_cat][tx_size_ctx][coded_tx_size];
+
+  assert(skip_prob > 0);
+#if CONFIG_EXT_TX && CONFIG_RECT_TX
+  assert(IMPLIES(is_rect_tx(tx_size), is_rect_tx_allowed_bsize(bs)));
+#endif  // CONFIG_EXT_TX && CONFIG_RECT_TX
+
+  s0 = vp10_cost_bit(skip_prob, 0);
+  s1 = vp10_cost_bit(skip_prob, 1);
+
+  mbmi->tx_type = tx_type;
+  mbmi->tx_size = tx_size;
+  txfm_rd_in_plane(x, cpi, r, d, s, sse, ref_best_rd, 0, bs, tx_size,
+                   cpi->sf.use_fast_coef_costing);
+  if (*r == INT_MAX) return INT64_MAX;
+#if CONFIG_EXT_TX
+  if (get_ext_tx_types(tx_size, bs, is_inter) > 1 &&
+      !xd->lossless[xd->mi[0]->mbmi.segment_id]) {
+    const int ext_tx_set = get_ext_tx_set(tx_size, bs, is_inter);
+    if (is_inter) {
+      if (ext_tx_set > 0)
+        *r += cpi->inter_tx_type_costs
+                  [ext_tx_set][txsize_sqr_map[mbmi->tx_size]][mbmi->tx_type];
+    } else {
+      if (ext_tx_set > 0 && ALLOW_INTRA_EXT_TX)
+        *r += cpi->intra_tx_type_costs[ext_tx_set][mbmi->tx_size][mbmi->mode]
+                                      [mbmi->tx_type];
+    }
+  }
+#else
+  if (tx_size < TX_32X32 && !xd->lossless[xd->mi[0]->mbmi.segment_id] &&
+      !FIXED_TX_TYPE) {
+    if (is_inter) {
+      *r += cpi->inter_tx_type_costs[mbmi->tx_size][mbmi->tx_type];
+    } else {
+      *r += cpi->intra_tx_type_costs[mbmi->tx_size]
+                                    [intra_mode_to_tx_type_context[mbmi->mode]]
+                                    [mbmi->tx_type];
+    }
+  }
+#endif  // CONFIG_EXT_TX
+
+  if (*s) {
+    if (is_inter) {
+      rd = RDCOST(x->rdmult, x->rddiv, s1, *sse);
+    } else {
+      rd = RDCOST(x->rdmult, x->rddiv, s1 + r_tx_size * tx_select, *sse);
+    }
+  } else {
+    rd = RDCOST(x->rdmult, x->rddiv, *r + s0 + r_tx_size * tx_select, *d);
+  }
+
+  if (tx_select) *r += r_tx_size;
+
+  if (is_inter && !xd->lossless[xd->mi[0]->mbmi.segment_id] && !(*s))
+    rd = VPXMIN(rd, RDCOST(x->rdmult, x->rddiv, s1, *sse));
+
+  return rd;
+}
+
+static int64_t choose_tx_size_fix_type(VP10_COMP *cpi, BLOCK_SIZE bs,
+                                       MACROBLOCK *x, int *rate,
+                                       int64_t *distortion, int *skip,
+                                       int64_t *psse, int64_t ref_best_rd,
+                                       TX_TYPE tx_type, int prune) {
+  VP10_COMMON *const cm = &cpi->common;
+  MACROBLOCKD *const xd = &x->e_mbd;
+  MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
+  int r, s;
+  int64_t d, sse;
+  int64_t rd = INT64_MAX;
+  int n;
+  int start_tx, end_tx;
+  int64_t best_rd = INT64_MAX, last_rd = INT64_MAX;
+  const TX_SIZE max_tx_size = max_txsize_lookup[bs];
+  TX_SIZE best_tx_size = max_tx_size;
+  const int tx_select = cm->tx_mode == TX_MODE_SELECT;
+  const int is_inter = is_inter_block(mbmi);
+#if CONFIG_EXT_TX
+#if CONFIG_RECT_TX
+  int evaulate_rect_tx = 0;
+#endif  // CONFIG_RECT_TX
+  int ext_tx_set;
+#endif  // CONFIG_EXT_TX
+
+  if (tx_select) {
+#if CONFIG_EXT_TX && CONFIG_RECT_TX
+    evaulate_rect_tx = is_rect_tx_allowed(mbmi);
+#endif  // CONFIG_EXT_TX && CONFIG_RECT_TX
+    start_tx = max_tx_size;
+    end_tx = 0;
+  } else {
+    const TX_SIZE chosen_tx_size =
+        tx_size_from_tx_mode(bs, cm->tx_mode, is_inter);
+#if CONFIG_EXT_TX && CONFIG_RECT_TX
+    evaulate_rect_tx = is_rect_tx(chosen_tx_size);
+    assert(IMPLIES(evaulate_rect_tx, is_rect_tx_allowed(mbmi)));
+#endif  // CONFIG_EXT_TX && CONFIG_RECT_TX
+    start_tx = chosen_tx_size;
+    end_tx = chosen_tx_size;
+  }
+
+  *distortion = INT64_MAX;
+  *rate = INT_MAX;
+  *skip = 0;
+  *psse = INT64_MAX;
+
+  mbmi->tx_type = tx_type;
+
+#if CONFIG_EXT_TX && CONFIG_RECT_TX
+  if (evaulate_rect_tx) {
+    const TX_SIZE rect_tx_size = max_txsize_rect_lookup[bs];
+    const int ext_tx_set = get_ext_tx_set(rect_tx_size, bs, 1);
+    if (ext_tx_used_inter[ext_tx_set][tx_type]) {
+      rd = txfm_yrd(cpi, x, &r, &d, &s, &sse, ref_best_rd, bs, tx_type,
+                    rect_tx_size);
+      best_tx_size = rect_tx_size;
+      best_rd = rd;
+      *distortion = d;
+      *rate = r;
+      *skip = s;
+      *psse = sse;
+    }
+  }
+#endif  // CONFIG_EXT_TX && CONFIG_RECT_TX
+
+  last_rd = INT64_MAX;
+  for (n = start_tx; n >= end_tx; --n) {
+#if CONFIG_EXT_TX && CONFIG_RECT_TX
+    if (is_rect_tx(n)) break;
+#endif  // CONFIG_EXT_TX && CONFIG_RECT_TX
+    if (FIXED_TX_TYPE && tx_type != get_default_tx_type(0, xd, 0, n)) continue;
+    if (!is_inter && x->use_default_intra_tx_type &&
+        tx_type != get_default_tx_type(0, xd, 0, n))
+      continue;
+    if (is_inter && x->use_default_inter_tx_type &&
+        tx_type != get_default_tx_type(0, xd, 0, n))
+      continue;
+    if (max_tx_size == TX_32X32 && n == TX_4X4) continue;
+#if CONFIG_EXT_TX
+    ext_tx_set = get_ext_tx_set(n, bs, is_inter);
+    if (is_inter) {
+      if (!ext_tx_used_inter[ext_tx_set][tx_type]) continue;
+      if (cpi->sf.tx_type_search.prune_mode > NO_PRUNE) {
+        if (!do_tx_type_search(tx_type, prune)) continue;
+      }
+    } else {
+      if (!ALLOW_INTRA_EXT_TX && bs >= BLOCK_8X8) {
+        if (tx_type != intra_mode_to_tx_type_context[mbmi->mode]) continue;
+      }
+      if (!ext_tx_used_intra[ext_tx_set][tx_type]) continue;
+    }
+#else   // CONFIG_EXT_TX
+    if (n >= TX_32X32 && tx_type != DCT_DCT) continue;
+    if (is_inter && cpi->sf.tx_type_search.prune_mode > NO_PRUNE &&
+        !do_tx_type_search(tx_type, prune))
+      continue;
+#endif  // CONFIG_EXT_TX
+
+    rd = txfm_yrd(cpi, x, &r, &d, &s, &sse, ref_best_rd, bs, tx_type, n);
+
+    // Early termination in transform size search.
+    if (cpi->sf.tx_size_search_breakout &&
+        (rd == INT64_MAX || (s == 1 && tx_type != DCT_DCT && n < start_tx) ||
+         (n < (int)max_tx_size && rd > last_rd)))
+      break;
+
+    last_rd = rd;
+    if (rd < best_rd) {
+      best_tx_size = n;
+      best_rd = rd;
+      *distortion = d;
+      *rate = r;
+      *skip = s;
+      *psse = sse;
+    }
+  }
+  mbmi->tx_size = best_tx_size;
+
+  return best_rd;
+}
+
+#if CONFIG_EXT_INTER
+static int64_t estimate_yrd_for_sb(VP10_COMP *cpi, BLOCK_SIZE bs, MACROBLOCK *x,
+                                   int *r, int64_t *d, int *s, int64_t *sse,
+                                   int64_t ref_best_rd) {
+  return txfm_yrd(cpi, x, r, d, s, sse, ref_best_rd, bs, DCT_DCT,
+                  max_txsize_lookup[bs]);
+}
+#endif  // CONFIG_EXT_INTER
+
+static void choose_largest_tx_size(VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+                                   int64_t *distortion, int *skip, int64_t *sse,
+                                   int64_t ref_best_rd, BLOCK_SIZE bs) {
+  VP10_COMMON *const cm = &cpi->common;
+  MACROBLOCKD *const xd = &x->e_mbd;
+  MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
+  TX_TYPE tx_type, best_tx_type = DCT_DCT;
+  int r, s;
+  int64_t d, psse, this_rd, best_rd = INT64_MAX;
+  vpx_prob skip_prob = vp10_get_skip_prob(cm, xd);
+  int s0 = vp10_cost_bit(skip_prob, 0);
+  int s1 = vp10_cost_bit(skip_prob, 1);
+  const int is_inter = is_inter_block(mbmi);
+  int prune = 0;
+#if CONFIG_EXT_TX
+  int ext_tx_set;
+#endif  // CONFIG_EXT_TX
+
+  mbmi->tx_size = tx_size_from_tx_mode(bs, cm->tx_mode, is_inter);
+
+#if CONFIG_EXT_TX
+  ext_tx_set = get_ext_tx_set(mbmi->tx_size, bs, is_inter);
+#endif  // CONFIG_EXT_TX
+
+  if (is_inter && cpi->sf.tx_type_search.prune_mode > NO_PRUNE)
+#if CONFIG_EXT_TX
+    prune = prune_tx_types(cpi, bs, x, xd, ext_tx_set);
+#else
+    prune = prune_tx_types(cpi, bs, x, xd, 0);
+#endif
+#if CONFIG_EXT_TX
+  if (get_ext_tx_types(mbmi->tx_size, bs, is_inter) > 1 &&
+      !xd->lossless[mbmi->segment_id]) {
+    for (tx_type = 0; tx_type < TX_TYPES; ++tx_type) {
+      if (is_inter) {
+        if (x->use_default_inter_tx_type &&
+            tx_type != get_default_tx_type(0, xd, 0, mbmi->tx_size))
+          continue;
+        if (!ext_tx_used_inter[ext_tx_set][tx_type]) continue;
+        if (cpi->sf.tx_type_search.prune_mode > NO_PRUNE) {
+          if (!do_tx_type_search(tx_type, prune)) continue;
+        }
+      } else {
+        if (x->use_default_intra_tx_type &&
+            tx_type != get_default_tx_type(0, xd, 0, mbmi->tx_size))
+          continue;
+        if (!ALLOW_INTRA_EXT_TX && bs >= BLOCK_8X8) {
+          if (tx_type != intra_mode_to_tx_type_context[mbmi->mode]) continue;
+        }
+        if (!ext_tx_used_intra[ext_tx_set][tx_type]) continue;
+      }
+
+      mbmi->tx_type = tx_type;
+
+      txfm_rd_in_plane(x, cpi, &r, &d, &s, &psse, ref_best_rd, 0, bs,
+                       mbmi->tx_size, cpi->sf.use_fast_coef_costing);
+
+      if (r == INT_MAX) continue;
+      if (get_ext_tx_types(mbmi->tx_size, bs, is_inter) > 1) {
+        if (is_inter) {
+          if (ext_tx_set > 0)
+            r += cpi->inter_tx_type_costs[ext_tx_set][mbmi->tx_size]
+                                         [mbmi->tx_type];
+        } else {
+          if (ext_tx_set > 0 && ALLOW_INTRA_EXT_TX)
+            r += cpi->intra_tx_type_costs[ext_tx_set][mbmi->tx_size][mbmi->mode]
+                                         [mbmi->tx_type];
+        }
+      }
+
+      if (s)
+        this_rd = RDCOST(x->rdmult, x->rddiv, s1, psse);
+      else
+        this_rd = RDCOST(x->rdmult, x->rddiv, r + s0, d);
+      if (is_inter_block(mbmi) && !xd->lossless[mbmi->segment_id] && !s)
+        this_rd = VPXMIN(this_rd, RDCOST(x->rdmult, x->rddiv, s1, psse));
+
+      if (this_rd < best_rd) {
+        best_rd = this_rd;
+        best_tx_type = mbmi->tx_type;
+      }
+    }
+  }
+
+#else   // CONFIG_EXT_TX
+  if (mbmi->tx_size < TX_32X32 && !xd->lossless[mbmi->segment_id]) {
+    for (tx_type = 0; tx_type < TX_TYPES; ++tx_type) {
+      if (!is_inter && x->use_default_intra_tx_type &&
+          tx_type != get_default_tx_type(0, xd, 0, mbmi->tx_size))
+        continue;
+      if (is_inter && x->use_default_inter_tx_type &&
+          tx_type != get_default_tx_type(0, xd, 0, mbmi->tx_size))
+        continue;
+      mbmi->tx_type = tx_type;
+      txfm_rd_in_plane(x, cpi, &r, &d, &s, &psse, ref_best_rd, 0, bs,
+                       mbmi->tx_size, cpi->sf.use_fast_coef_costing);
+      if (r == INT_MAX) continue;
+      if (is_inter) {
+        r += cpi->inter_tx_type_costs[mbmi->tx_size][mbmi->tx_type];
+        if (cpi->sf.tx_type_search.prune_mode > NO_PRUNE &&
+            !do_tx_type_search(tx_type, prune))
+          continue;
+      } else {
+        r += cpi->intra_tx_type_costs[mbmi->tx_size]
+                                     [intra_mode_to_tx_type_context[mbmi->mode]]
+                                     [mbmi->tx_type];
+      }
+      if (s)
+        this_rd = RDCOST(x->rdmult, x->rddiv, s1, psse);
+      else
+        this_rd = RDCOST(x->rdmult, x->rddiv, r + s0, d);
+      if (is_inter && !xd->lossless[mbmi->segment_id] && !s)
+        this_rd = VPXMIN(this_rd, RDCOST(x->rdmult, x->rddiv, s1, psse));
+
+      if (this_rd < best_rd) {
+        best_rd = this_rd;
+        best_tx_type = mbmi->tx_type;
+      }
+    }
+  }
+#endif  // CONFIG_EXT_TX
+  mbmi->tx_type = best_tx_type;
+
+  txfm_rd_in_plane(x, cpi, rate, distortion, skip, sse, ref_best_rd, 0, bs,
+                   mbmi->tx_size, cpi->sf.use_fast_coef_costing);
+}
+
+static void choose_smallest_tx_size(VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+                                    int64_t *distortion, int *skip,
+                                    int64_t *sse, int64_t ref_best_rd,
+                                    BLOCK_SIZE bs) {
+  MACROBLOCKD *const xd = &x->e_mbd;
+  MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
+
+  mbmi->tx_size = TX_4X4;
+  mbmi->tx_type = DCT_DCT;
+
+  txfm_rd_in_plane(x, cpi, rate, distortion, skip, sse, ref_best_rd, 0, bs,
+                   mbmi->tx_size, cpi->sf.use_fast_coef_costing);
+}
+
+static void choose_tx_size_type_from_rd(VP10_COMP *cpi, MACROBLOCK *x,
+                                        int *rate, int64_t *distortion,
+                                        int *skip, int64_t *psse,
+                                        int64_t ref_best_rd, BLOCK_SIZE bs) {
+  MACROBLOCKD *const xd = &x->e_mbd;
+  MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
+  int r, s;
+  int64_t d, sse;
+  int64_t rd = INT64_MAX;
+  int64_t best_rd = INT64_MAX;
+  TX_SIZE best_tx = max_txsize_lookup[bs];
+  const int is_inter = is_inter_block(mbmi);
+  TX_TYPE tx_type, best_tx_type = DCT_DCT;
+  int prune = 0;
+
+  if (is_inter && cpi->sf.tx_type_search.prune_mode > NO_PRUNE)
+    // passing -1 in for tx_type indicates that all 1D
+    // transforms should be considered for pruning
+    prune = prune_tx_types(cpi, bs, x, xd, -1);
+
+  *distortion = INT64_MAX;
+  *rate = INT_MAX;
+  *skip = 0;
+  *psse = INT64_MAX;
+
+  for (tx_type = DCT_DCT; tx_type < TX_TYPES; ++tx_type) {
+#if CONFIG_REF_MV
+    if (tx_type != DCT_DCT && is_inter && mbmi->ref_mv_idx > 0) continue;
+#endif
+    rd = choose_tx_size_fix_type(cpi, bs, x, &r, &d, &s, &sse, ref_best_rd,
+                                 tx_type, prune);
+    if (rd < best_rd) {
+      best_rd = rd;
+      *distortion = d;
+      *rate = r;
+      *skip = s;
+      *psse = sse;
+      best_tx_type = tx_type;
+      best_tx = mbmi->tx_size;
+    }
+  }
+
+  mbmi->tx_size = best_tx;
+  mbmi->tx_type = best_tx_type;
+
+#if !CONFIG_EXT_TX
+  if (mbmi->tx_size >= TX_32X32) assert(mbmi->tx_type == DCT_DCT);
+#endif
+}
+
+static void super_block_yrd(VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+                            int64_t *distortion, int *skip, int64_t *psse,
+                            BLOCK_SIZE bs, int64_t ref_best_rd) {
+  MACROBLOCKD *xd = &x->e_mbd;
+  int64_t sse;
+  int64_t *ret_sse = psse ? psse : &sse;
+
+  assert(bs == xd->mi[0]->mbmi.sb_type);
+
+  if (xd->lossless[xd->mi[0]->mbmi.segment_id]) {
+    choose_smallest_tx_size(cpi, x, rate, distortion, skip, ret_sse,
+                            ref_best_rd, bs);
+  } else if (cpi->sf.tx_size_search_method == USE_LARGESTALL) {
+    choose_largest_tx_size(cpi, x, rate, distortion, skip, ret_sse, ref_best_rd,
+                           bs);
+  } else {
+    choose_tx_size_type_from_rd(cpi, x, rate, distortion, skip, ret_sse,
+                                ref_best_rd, bs);
+  }
+}
+
+static int conditional_skipintra(PREDICTION_MODE mode,
+                                 PREDICTION_MODE best_intra_mode) {
+  if (mode == D117_PRED && best_intra_mode != V_PRED &&
+      best_intra_mode != D135_PRED)
+    return 1;
+  if (mode == D63_PRED && best_intra_mode != V_PRED &&
+      best_intra_mode != D45_PRED)
+    return 1;
+  if (mode == D207_PRED && best_intra_mode != H_PRED &&
+      best_intra_mode != D45_PRED)
+    return 1;
+  if (mode == D153_PRED && best_intra_mode != H_PRED &&
+      best_intra_mode != D135_PRED)
+    return 1;
+  return 0;
+}
+
+static int rd_pick_palette_intra_sby(
+    VP10_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize, int palette_ctx,
+    int dc_mode_cost, PALETTE_MODE_INFO *palette_mode_info,
+    uint8_t *best_palette_color_map, TX_SIZE *best_tx, TX_TYPE *best_tx_type,
+    PREDICTION_MODE *mode_selected, int64_t *best_rd) {
+  int rate_overhead = 0;
+  MACROBLOCKD *const xd = &x->e_mbd;
+  MODE_INFO *const mic = xd->mi[0];
+  const int rows = 4 * num_4x4_blocks_high_lookup[bsize];
+  const int cols = 4 * num_4x4_blocks_wide_lookup[bsize];
+  int this_rate, this_rate_tokenonly, s, colors, n;
+  int64_t this_distortion, this_rd;
+  const int src_stride = x->plane[0].src.stride;
+  const uint8_t *const src = x->plane[0].src.buf;
+
+  assert(cpi->common.allow_screen_content_tools);
+
+#if CONFIG_VP9_HIGHBITDEPTH
+  if (cpi->common.use_highbitdepth)
+    colors = vp10_count_colors_highbd(src, src_stride, rows, cols,
+                                      cpi->common.bit_depth);
+  else
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+    colors = vp10_count_colors(src, src_stride, rows, cols);
+  palette_mode_info->palette_size[0] = 0;
+#if CONFIG_EXT_INTRA
+  mic->mbmi.ext_intra_mode_info.use_ext_intra_mode[0] = 0;
+#endif  // CONFIG_EXT_INTRA
+
+  if (colors > 1 && colors <= 64) {
+    int r, c, i, j, k;
+    const int max_itr = 50;
+    int color_ctx, color_idx = 0;
+    int color_order[PALETTE_MAX_SIZE];
+    float *const data = x->palette_buffer->kmeans_data_buf;
+    float centroids[PALETTE_MAX_SIZE];
+    uint8_t *const color_map = xd->plane[0].color_index_map;
+    float lb, ub, val;
+    MB_MODE_INFO *const mbmi = &mic->mbmi;
+    PALETTE_MODE_INFO *const pmi = &mbmi->palette_mode_info;
+#if CONFIG_VP9_HIGHBITDEPTH
+    uint16_t *src16 = CONVERT_TO_SHORTPTR(src);
+    if (cpi->common.use_highbitdepth)
+      lb = ub = src16[0];
+    else
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+      lb = ub = src[0];
+
+#if CONFIG_VP9_HIGHBITDEPTH
+    if (cpi->common.use_highbitdepth) {
+      for (r = 0; r < rows; ++r) {
+        for (c = 0; c < cols; ++c) {
+          val = src16[r * src_stride + c];
+          data[r * cols + c] = val;
+          if (val < lb)
+            lb = val;
+          else if (val > ub)
+            ub = val;
+        }
+      }
+    } else {
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+      for (r = 0; r < rows; ++r) {
+        for (c = 0; c < cols; ++c) {
+          val = src[r * src_stride + c];
+          data[r * cols + c] = val;
+          if (val < lb)
+            lb = val;
+          else if (val > ub)
+            ub = val;
+        }
+      }
+#if CONFIG_VP9_HIGHBITDEPTH
+    }
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+    mbmi->mode = DC_PRED;
+#if CONFIG_EXT_INTRA
+    mbmi->ext_intra_mode_info.use_ext_intra_mode[0] = 0;
+#endif  // CONFIG_EXT_INTRA
+
+    if (rows * cols > PALETTE_MAX_BLOCK_SIZE) return 0;
+
+    for (n = colors > PALETTE_MAX_SIZE ? PALETTE_MAX_SIZE : colors; n >= 2;
+         --n) {
+      for (i = 0; i < n; ++i)
+        centroids[i] = lb + (2 * i + 1) * (ub - lb) / n / 2;
+      vp10_k_means(data, centroids, color_map, rows * cols, n, 1, max_itr);
+      k = vp10_remove_duplicates(centroids, n);
+
+#if CONFIG_VP9_HIGHBITDEPTH
+      if (cpi->common.use_highbitdepth)
+        for (i = 0; i < k; ++i)
+          pmi->palette_colors[i] =
+              clip_pixel_highbd((int)centroids[i], cpi->common.bit_depth);
+      else
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+        for (i = 0; i < k; ++i)
+          pmi->palette_colors[i] = clip_pixel((int)centroids[i]);
+      pmi->palette_size[0] = k;
+
+      vp10_calc_indices(data, centroids, color_map, rows * cols, k, 1);
+
+      super_block_yrd(cpi, x, &this_rate_tokenonly, &this_distortion, &s, NULL,
+                      bsize, *best_rd);
+      if (this_rate_tokenonly == INT_MAX) continue;
+
+      this_rate =
+          this_rate_tokenonly + dc_mode_cost +
+          cpi->common.bit_depth * k * vp10_cost_bit(128, 0) +
+          cpi->palette_y_size_cost[bsize - BLOCK_8X8][k - 2] +
+          write_uniform_cost(k, color_map[0]) +
+          vp10_cost_bit(
+              vp10_default_palette_y_mode_prob[bsize - BLOCK_8X8][palette_ctx],
+              1);
+      for (i = 0; i < rows; ++i) {
+        for (j = (i == 0 ? 1 : 0); j < cols; ++j) {
+          color_ctx = vp10_get_palette_color_context(color_map, cols, i, j, k,
+                                                     color_order);
+          for (r = 0; r < k; ++r)
+            if (color_map[i * cols + j] == color_order[r]) {
+              color_idx = r;
+              break;
+            }
+          assert(color_idx >= 0 && color_idx < k);
+          this_rate += cpi->palette_y_color_cost[k - 2][color_ctx][color_idx];
+        }
+      }
+      this_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_distortion);
+
+      if (this_rd < *best_rd) {
+        *best_rd = this_rd;
+        *palette_mode_info = *pmi;
+        memcpy(best_palette_color_map, color_map,
+               rows * cols * sizeof(color_map[0]));
+        *mode_selected = DC_PRED;
+        *best_tx = mbmi->tx_size;
+        *best_tx_type = mbmi->tx_type;
+        rate_overhead = this_rate - this_rate_tokenonly;
+      }
+    }
+  }
+  return rate_overhead;
+}
+
+static int64_t rd_pick_intra4x4block(VP10_COMP *cpi, MACROBLOCK *x, int row,
+                                     int col, PREDICTION_MODE *best_mode,
+                                     const int *bmode_costs, ENTROPY_CONTEXT *a,
+                                     ENTROPY_CONTEXT *l, int *bestrate,
+                                     int *bestratey, int64_t *bestdistortion,
+                                     BLOCK_SIZE bsize, int64_t rd_thresh) {
+  PREDICTION_MODE mode;
+  MACROBLOCKD *const xd = &x->e_mbd;
+  int64_t best_rd = rd_thresh;
+  struct macroblock_plane *p = &x->plane[0];
+  struct macroblockd_plane *pd = &xd->plane[0];
+  const int src_stride = p->src.stride;
+  const int dst_stride = pd->dst.stride;
+  const uint8_t *src_init = &p->src.buf[row * 4 * src_stride + col * 4];
+  uint8_t *dst_init = &pd->dst.buf[row * 4 * src_stride + col * 4];
+  ENTROPY_CONTEXT ta[2], tempa[2];
+  ENTROPY_CONTEXT tl[2], templ[2];
+  const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
+  const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
+  int idx, idy;
+  uint8_t best_dst[8 * 8];
+#if CONFIG_VP9_HIGHBITDEPTH
+  uint16_t best_dst16[8 * 8];
+#endif
+
+  memcpy(ta, a, num_4x4_blocks_wide * sizeof(a[0]));
+  memcpy(tl, l, num_4x4_blocks_high * sizeof(l[0]));
+  xd->mi[0]->mbmi.tx_size = TX_4X4;
+  xd->mi[0]->mbmi.palette_mode_info.palette_size[0] = 0;
+
+#if CONFIG_VP9_HIGHBITDEPTH
+  if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+    for (mode = DC_PRED; mode <= TM_PRED; ++mode) {
+      int64_t this_rd;
+      int ratey = 0;
+      int64_t distortion = 0;
+      int rate = bmode_costs[mode];
+
+      if (!(cpi->sf.intra_y_mode_mask[TX_4X4] & (1 << mode))) continue;
+
+      // Only do the oblique modes if the best so far is
+      // one of the neighboring directional modes
+      if (cpi->sf.mode_search_skip_flags & FLAG_SKIP_INTRA_DIRMISMATCH) {
+        if (conditional_skipintra(mode, *best_mode)) continue;
+      }
+
+      memcpy(tempa, ta, num_4x4_blocks_wide * sizeof(ta[0]));
+      memcpy(templ, tl, num_4x4_blocks_high * sizeof(tl[0]));
+
+      for (idy = 0; idy < num_4x4_blocks_high; ++idy) {
+        for (idx = 0; idx < num_4x4_blocks_wide; ++idx) {
+          const int block = (row + idy) * 2 + (col + idx);
+          const uint8_t *const src = &src_init[idx * 4 + idy * 4 * src_stride];
+          uint8_t *const dst = &dst_init[idx * 4 + idy * 4 * dst_stride];
+          int16_t *const src_diff =
+              vp10_raster_block_offset_int16(BLOCK_8X8, block, p->src_diff);
+          xd->mi[0]->bmi[block].as_mode = mode;
+          vp10_predict_intra_block(xd, 1, 1, TX_4X4, mode, dst, dst_stride, dst,
+                                   dst_stride, col + idx, row + idy, 0);
+          vpx_highbd_subtract_block(4, 4, src_diff, 8, src, src_stride, dst,
+                                    dst_stride, xd->bd);
+          if (xd->lossless[xd->mi[0]->mbmi.segment_id]) {
+            TX_TYPE tx_type = get_tx_type(PLANE_TYPE_Y, xd, block, TX_4X4);
+            const scan_order *so = get_scan(TX_4X4, tx_type, 0);
+#if CONFIG_VAR_TX | CONFIG_NEW_QUANT
+            const int coeff_ctx =
+                combine_entropy_contexts(*(tempa + idx), *(templ + idy));
+#endif  // CONFIG_VAR_TX | CONFIG_NEW_QUANT
+#if CONFIG_NEW_QUANT
+            vp10_xform_quant_fp_nuq(x, 0, block, row + idy, col + idx,
+                                    BLOCK_8X8, TX_4X4, coeff_ctx);
+#else
+            vp10_xform_quant(x, 0, block, row + idy, col + idx, BLOCK_8X8,
+                             TX_4X4, VP10_XFORM_QUANT_FP);
+#endif  // CONFIG_NEW_QUANT
+#if CONFIG_VAR_TX
+            ratey += cost_coeffs(x, 0, block, coeff_ctx, TX_4X4, so->scan,
+                                 so->neighbors, cpi->sf.use_fast_coef_costing);
+            *(tempa + idx) = !(p->eobs[block] == 0);
+            *(templ + idy) = !(p->eobs[block] == 0);
+#else
+            ratey += cost_coeffs(x, 0, block, tempa + idx, templ + idy, TX_4X4,
+                                 so->scan, so->neighbors,
+                                 cpi->sf.use_fast_coef_costing);
+#endif  // CONFIG_VAR_TX
+            if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd)
+              goto next_highbd;
+            vp10_highbd_inv_txfm_add_4x4(BLOCK_OFFSET(pd->dqcoeff, block), dst,
+                                         dst_stride, p->eobs[block], xd->bd,
+                                         DCT_DCT, 1);
+          } else {
+            int64_t dist;
+            unsigned int tmp;
+            TX_TYPE tx_type = get_tx_type(PLANE_TYPE_Y, xd, block, TX_4X4);
+            const scan_order *so = get_scan(TX_4X4, tx_type, 0);
+            const int coeff_ctx =
+                combine_entropy_contexts(*(tempa + idx), *(templ + idy));
+#if CONFIG_NEW_QUANT
+            vp10_xform_quant_fp_nuq(x, 0, block, row + idy, col + idx,
+                                    BLOCK_8X8, TX_4X4, coeff_ctx);
+#else
+            vp10_xform_quant(x, 0, block, row + idy, col + idx, BLOCK_8X8,
+                             TX_4X4, VP10_XFORM_QUANT_FP);
+#endif  // CONFIG_NEW_QUANT
+            vp10_optimize_b(x, 0, block, TX_4X4, coeff_ctx);
+#if CONFIG_VAR_TX
+            ratey += cost_coeffs(x, 0, block, coeff_ctx, TX_4X4, so->scan,
+                                 so->neighbors, cpi->sf.use_fast_coef_costing);
+            *(tempa + idx) = !(p->eobs[block] == 0);
+            *(templ + idy) = !(p->eobs[block] == 0);
+#else
+            ratey += cost_coeffs(x, 0, block, tempa + idx, templ + idy, TX_4X4,
+                                 so->scan, so->neighbors,
+                                 cpi->sf.use_fast_coef_costing);
+#endif  // CONFIG_VAR_TX
+            vp10_highbd_inv_txfm_add_4x4(BLOCK_OFFSET(pd->dqcoeff, block), dst,
+                                         dst_stride, p->eobs[block], xd->bd,
+                                         tx_type, 0);
+            cpi->fn_ptr[BLOCK_4X4].vf(src, src_stride, dst, dst_stride, &tmp);
+            dist = (int64_t)tmp << 4;
+            distortion += dist;
+            if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd)
+              goto next_highbd;
+          }
+        }
+      }
+
+      rate += ratey;
+      this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
+
+      if (this_rd < best_rd) {
+        *bestrate = rate;
+        *bestratey = ratey;
+        *bestdistortion = distortion;
+        best_rd = this_rd;
+        *best_mode = mode;
+        memcpy(a, tempa, num_4x4_blocks_wide * sizeof(tempa[0]));
+        memcpy(l, templ, num_4x4_blocks_high * sizeof(templ[0]));
+        for (idy = 0; idy < num_4x4_blocks_high * 4; ++idy) {
+          memcpy(best_dst16 + idy * 8,
+                 CONVERT_TO_SHORTPTR(dst_init + idy * dst_stride),
+                 num_4x4_blocks_wide * 4 * sizeof(uint16_t));
+        }
+      }
+    next_highbd : {}
+    }
+
+    if (best_rd >= rd_thresh) return best_rd;
+
+    for (idy = 0; idy < num_4x4_blocks_high * 4; ++idy) {
+      memcpy(CONVERT_TO_SHORTPTR(dst_init + idy * dst_stride),
+             best_dst16 + idy * 8, num_4x4_blocks_wide * 4 * sizeof(uint16_t));
+    }
+
+    return best_rd;
+  }
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+  for (mode = DC_PRED; mode <= TM_PRED; ++mode) {
+    int64_t this_rd;
+    int ratey = 0;
+    int64_t distortion = 0;
+    int rate = bmode_costs[mode];
+
+    if (!(cpi->sf.intra_y_mode_mask[TX_4X4] & (1 << mode))) continue;
+
+    // Only do the oblique modes if the best so far is
+    // one of the neighboring directional modes
+    if (cpi->sf.mode_search_skip_flags & FLAG_SKIP_INTRA_DIRMISMATCH) {
+      if (conditional_skipintra(mode, *best_mode)) continue;
+    }
+
+    memcpy(tempa, ta, num_4x4_blocks_wide * sizeof(ta[0]));
+    memcpy(templ, tl, num_4x4_blocks_high * sizeof(tl[0]));
+
+    for (idy = 0; idy < num_4x4_blocks_high; ++idy) {
+      for (idx = 0; idx < num_4x4_blocks_wide; ++idx) {
+        const int block = (row + idy) * 2 + (col + idx);
+        const uint8_t *const src = &src_init[idx * 4 + idy * 4 * src_stride];
+        uint8_t *const dst = &dst_init[idx * 4 + idy * 4 * dst_stride];
+        int16_t *const src_diff =
+            vp10_raster_block_offset_int16(BLOCK_8X8, block, p->src_diff);
+        xd->mi[0]->bmi[block].as_mode = mode;
+        vp10_predict_intra_block(xd, 1, 1, TX_4X4, mode, dst, dst_stride, dst,
+                                 dst_stride, col + idx, row + idy, 0);
+        vpx_subtract_block(4, 4, src_diff, 8, src, src_stride, dst, dst_stride);
+
+        if (xd->lossless[xd->mi[0]->mbmi.segment_id]) {
+          TX_TYPE tx_type = get_tx_type(PLANE_TYPE_Y, xd, block, TX_4X4);
+          const scan_order *so = get_scan(TX_4X4, tx_type, 0);
+#if CONFIG_VAR_TX | CONFIG_NEW_QUANT
+          const int coeff_ctx =
+              combine_entropy_contexts(*(tempa + idx), *(templ + idy));
+#endif  // CONFIG_VAR_TX | CONFIG_NEW_QUANT
+#if CONFIG_NEW_QUANT
+          vp10_xform_quant_fp_nuq(x, 0, block, row + idy, col + idx, BLOCK_8X8,
+                                  TX_4X4, coeff_ctx);
+#else
+          vp10_xform_quant(x, 0, block, row + idy, col + idx, BLOCK_8X8, TX_4X4,
+                           VP10_XFORM_QUANT_B);
+#endif  // CONFIG_NEW_QUANT
+#if CONFIG_VAR_TX
+          ratey += cost_coeffs(x, 0, block, coeff_ctx, TX_4X4, so->scan,
+                               so->neighbors, cpi->sf.use_fast_coef_costing);
+          *(tempa + idx) = !(p->eobs[block] == 0);
+          *(templ + idy) = !(p->eobs[block] == 0);
+#else
+          ratey += cost_coeffs(x, 0, block, tempa + idx, templ + idy, TX_4X4,
+                               so->scan, so->neighbors,
+                               cpi->sf.use_fast_coef_costing);
+#endif
+          if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd)
+            goto next;
+          vp10_inv_txfm_add_4x4(BLOCK_OFFSET(pd->dqcoeff, block), dst,
+                                dst_stride, p->eobs[block], DCT_DCT, 1);
+        } else {
+          int64_t dist;
+          unsigned int tmp;
+          TX_TYPE tx_type = get_tx_type(PLANE_TYPE_Y, xd, block, TX_4X4);
+          const scan_order *so = get_scan(TX_4X4, tx_type, 0);
+          const int coeff_ctx =
+              combine_entropy_contexts(*(tempa + idx), *(templ + idy));
+#if CONFIG_NEW_QUANT
+          vp10_xform_quant_fp_nuq(x, 0, block, row + idy, col + idx, BLOCK_8X8,
+                                  TX_4X4, coeff_ctx);
+#else
+          vp10_xform_quant(x, 0, block, row + idy, col + idx, BLOCK_8X8, TX_4X4,
+                           VP10_XFORM_QUANT_FP);
+#endif  // CONFIG_NEW_QUANT
+          vp10_optimize_b(x, 0, block, TX_4X4, coeff_ctx);
+#if CONFIG_VAR_TX
+          ratey += cost_coeffs(x, 0, block, coeff_ctx, TX_4X4, so->scan,
+                               so->neighbors, cpi->sf.use_fast_coef_costing);
+          *(tempa + idx) = !(p->eobs[block] == 0);
+          *(templ + idy) = !(p->eobs[block] == 0);
+#else
+          ratey += cost_coeffs(x, 0, block, tempa + idx, templ + idy, TX_4X4,
+                               so->scan, so->neighbors,
+                               cpi->sf.use_fast_coef_costing);
+#endif
+          vp10_inv_txfm_add_4x4(BLOCK_OFFSET(pd->dqcoeff, block), dst,
+                                dst_stride, p->eobs[block], tx_type, 0);
+          cpi->fn_ptr[BLOCK_4X4].vf(src, src_stride, dst, dst_stride, &tmp);
+          dist = (int64_t)tmp << 4;
+          distortion += dist;
+          // To use the pixel domain distortion, the step below needs to be
+          // put behind the inv txfm. Compared to calculating the distortion
+          // in the frequency domain, the overhead of encoding effort is low.
+          if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd)
+            goto next;
+        }
+      }
+    }
+
+    rate += ratey;
+    this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
+
+    if (this_rd < best_rd) {
+      *bestrate = rate;
+      *bestratey = ratey;
+      *bestdistortion = distortion;
+      best_rd = this_rd;
+      *best_mode = mode;
+      memcpy(a, tempa, num_4x4_blocks_wide * sizeof(tempa[0]));
+      memcpy(l, templ, num_4x4_blocks_high * sizeof(templ[0]));
+      for (idy = 0; idy < num_4x4_blocks_high * 4; ++idy)
+        memcpy(best_dst + idy * 8, dst_init + idy * dst_stride,
+               num_4x4_blocks_wide * 4);
+    }
+  next : {}
+  }
+
+  if (best_rd >= rd_thresh) return best_rd;
+
+  for (idy = 0; idy < num_4x4_blocks_high * 4; ++idy)
+    memcpy(dst_init + idy * dst_stride, best_dst + idy * 8,
+           num_4x4_blocks_wide * 4);
+
+  return best_rd;
+}
+
+static int64_t rd_pick_intra_sub_8x8_y_mode(VP10_COMP *cpi, MACROBLOCK *mb,
+                                            int *rate, int *rate_y,
+                                            int64_t *distortion,
+                                            int64_t best_rd) {
+  int i, j;
+  const MACROBLOCKD *const xd = &mb->e_mbd;
+  MODE_INFO *const mic = xd->mi[0];
+  const MODE_INFO *above_mi = xd->above_mi;
+  const MODE_INFO *left_mi = xd->left_mi;
+  const BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
+  const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
+  const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
+  int idx, idy;
+  int cost = 0;
+  int64_t total_distortion = 0;
+  int tot_rate_y = 0;
+  int64_t total_rd = 0;
+  const int *bmode_costs = cpi->mbmode_cost[0];
+
+#if CONFIG_EXT_INTRA
+  mic->mbmi.ext_intra_mode_info.use_ext_intra_mode[0] = 0;
+  mic->mbmi.intra_filter = INTRA_FILTER_LINEAR;
+#endif  // CONFIG_EXT_INTRA
+
+  // TODO(any): Add search of the tx_type to improve rd performance at the
+  // expense of speed.
+  mic->mbmi.tx_type = DCT_DCT;
+  mic->mbmi.tx_size = TX_4X4;
+
+  // Pick modes for each sub-block (of size 4x4, 4x8, or 8x4) in an 8x8 block.
+  for (idy = 0; idy < 2; idy += num_4x4_blocks_high) {
+    for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) {
+      PREDICTION_MODE best_mode = DC_PRED;
+      int r = INT_MAX, ry = INT_MAX;
+      int64_t d = INT64_MAX, this_rd = INT64_MAX;
+      i = idy * 2 + idx;
+      if (cpi->common.frame_type == KEY_FRAME) {
+        const PREDICTION_MODE A = vp10_above_block_mode(mic, above_mi, i);
+        const PREDICTION_MODE L = vp10_left_block_mode(mic, left_mi, i);
+
+        bmode_costs = cpi->y_mode_costs[A][L];
+      }
+
+      this_rd = rd_pick_intra4x4block(
+          cpi, mb, idy, idx, &best_mode, bmode_costs,
+          xd->plane[0].above_context + idx, xd->plane[0].left_context + idy, &r,
+          &ry, &d, bsize, best_rd - total_rd);
+      if (this_rd >= best_rd - total_rd) return INT64_MAX;
+
+      total_rd += this_rd;
+      cost += r;
+      total_distortion += d;
+      tot_rate_y += ry;
+
+      mic->bmi[i].as_mode = best_mode;
+      for (j = 1; j < num_4x4_blocks_high; ++j)
+        mic->bmi[i + j * 2].as_mode = best_mode;
+      for (j = 1; j < num_4x4_blocks_wide; ++j)
+        mic->bmi[i + j].as_mode = best_mode;
+
+      if (total_rd >= best_rd) return INT64_MAX;
+    }
+  }
+  mic->mbmi.mode = mic->bmi[3].as_mode;
+
+  // Add in the cost of the transform type
+  if (!xd->lossless[mic->mbmi.segment_id]) {
+    int rate_tx_type = 0;
+#if CONFIG_EXT_TX
+    if (get_ext_tx_types(TX_4X4, bsize, 0) > 1) {
+      const int eset = get_ext_tx_set(TX_4X4, bsize, 0);
+      rate_tx_type =
+          cpi->intra_tx_type_costs[eset][TX_4X4][mic->mbmi.mode][mic->mbmi
+                                                                     .tx_type];
+    }
+#else
+    rate_tx_type = cpi->intra_tx_type_costs
+                       [TX_4X4][intra_mode_to_tx_type_context[mic->mbmi.mode]]
+                       [mic->mbmi.tx_type];
+#endif
+    assert(mic->mbmi.tx_size == TX_4X4);
+    cost += rate_tx_type;
+    tot_rate_y += rate_tx_type;
+  }
+
+  *rate = cost;
+  *rate_y = tot_rate_y;
+  *distortion = total_distortion;
+
+  return RDCOST(mb->rdmult, mb->rddiv, cost, total_distortion);
+}
+
+#if CONFIG_EXT_INTRA
+// Return 1 if an ext intra mode is selected; return 0 otherwise.
+static int rd_pick_ext_intra_sby(VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+                                 int *rate_tokenonly, int64_t *distortion,
+                                 int *skippable, BLOCK_SIZE bsize,
+                                 int mode_cost, int64_t *best_rd,
+                                 uint16_t skip_mask) {
+  MACROBLOCKD *const xd = &x->e_mbd;
+  MODE_INFO *const mic = xd->mi[0];
+  MB_MODE_INFO *mbmi = &mic->mbmi;
+  int this_rate, this_rate_tokenonly, s;
+  int ext_intra_selected_flag = 0;
+  int64_t this_distortion, this_rd;
+  EXT_INTRA_MODE mode;
+  TX_SIZE best_tx_size = TX_4X4;
+  EXT_INTRA_MODE_INFO ext_intra_mode_info;
+  TX_TYPE best_tx_type;
+
+  vp10_zero(ext_intra_mode_info);
+  mbmi->ext_intra_mode_info.use_ext_intra_mode[0] = 1;
+  mbmi->mode = DC_PRED;
+  mbmi->palette_mode_info.palette_size[0] = 0;
+
+  for (mode = 0; mode < FILTER_INTRA_MODES; ++mode) {
+    if (skip_mask & (1 << mode)) continue;
+    mbmi->ext_intra_mode_info.ext_intra_mode[0] = mode;
+    super_block_yrd(cpi, x, &this_rate_tokenonly, &this_distortion, &s, NULL,
+                    bsize, *best_rd);
+    if (this_rate_tokenonly == INT_MAX) continue;
+
+    this_rate = this_rate_tokenonly +
+                vp10_cost_bit(cpi->common.fc->ext_intra_probs[0], 1) +
+                write_uniform_cost(FILTER_INTRA_MODES, mode) + mode_cost;
+    this_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_distortion);
+
+    if (this_rd < *best_rd) {
+      *best_rd = this_rd;
+      best_tx_size = mic->mbmi.tx_size;
+      ext_intra_mode_info = mbmi->ext_intra_mode_info;
+      best_tx_type = mic->mbmi.tx_type;
+      *rate = this_rate;
+      *rate_tokenonly = this_rate_tokenonly;
+      *distortion = this_distortion;
+      *skippable = s;
+      ext_intra_selected_flag = 1;
+    }
+  }
+
+  if (ext_intra_selected_flag) {
+    mbmi->mode = DC_PRED;
+    mbmi->tx_size = best_tx_size;
+    mbmi->ext_intra_mode_info.use_ext_intra_mode[0] =
+        ext_intra_mode_info.use_ext_intra_mode[0];
+    mbmi->ext_intra_mode_info.ext_intra_mode[0] =
+        ext_intra_mode_info.ext_intra_mode[0];
+    mbmi->tx_type = best_tx_type;
+    return 1;
+  } else {
+    return 0;
+  }
+}
+
+static void pick_intra_angle_routine_sby(
+    VP10_COMP *cpi, MACROBLOCK *x, int *rate, int *rate_tokenonly,
+    int64_t *distortion, int *skippable, int *best_angle_delta,
+    TX_SIZE *best_tx_size, TX_TYPE *best_tx_type, INTRA_FILTER *best_filter,
+    BLOCK_SIZE bsize, int rate_overhead, int64_t *best_rd) {
+  int this_rate, this_rate_tokenonly, s;
+  int64_t this_distortion, this_rd;
+  MB_MODE_INFO *mbmi = &x->e_mbd.mi[0]->mbmi;
+  super_block_yrd(cpi, x, &this_rate_tokenonly, &this_distortion, &s, NULL,
+                  bsize, *best_rd);
+  if (this_rate_tokenonly == INT_MAX) return;
+
+  this_rate = this_rate_tokenonly + rate_overhead;
+  this_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_distortion);
+
+  if (this_rd < *best_rd) {
+    *best_rd = this_rd;
+    *best_angle_delta = mbmi->angle_delta[0];
+    *best_tx_size = mbmi->tx_size;
+    *best_filter = mbmi->intra_filter;
+    *best_tx_type = mbmi->tx_type;
+    *rate = this_rate;
+    *rate_tokenonly = this_rate_tokenonly;
+    *distortion = this_distortion;
+    *skippable = s;
+  }
+}
+
+static int64_t rd_pick_intra_angle_sby(VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+                                       int *rate_tokenonly, int64_t *distortion,
+                                       int *skippable, BLOCK_SIZE bsize,
+                                       int rate_overhead, int64_t best_rd) {
+  MACROBLOCKD *const xd = &x->e_mbd;
+  MODE_INFO *const mic = xd->mi[0];
+  MB_MODE_INFO *mbmi = &mic->mbmi;
+  int this_rate, this_rate_tokenonly, s;
+  int angle_delta, best_angle_delta = 0, p_angle;
+  const int intra_filter_ctx = vp10_get_pred_context_intra_interp(xd);
+  INTRA_FILTER filter, best_filter = INTRA_FILTER_LINEAR;
+  const double rd_adjust = 1.2;
+  int64_t this_distortion, this_rd;
+  TX_SIZE best_tx_size = mic->mbmi.tx_size;
+  TX_TYPE best_tx_type = mbmi->tx_type;
+
+  if (ANGLE_FAST_SEARCH) {
+    int deltas_level1[3] = { 0, -2, 2 };
+    int deltas_level2[3][2] = {
+      { -1, 1 }, { -3, -1 }, { 1, 3 },
+    };
+    const int level1 = 3, level2 = 2;
+    int i, j, best_i = -1;
+
+    for (i = 0; i < level1; ++i) {
+      mic->mbmi.angle_delta[0] = deltas_level1[i];
+      p_angle =
+          mode_to_angle_map[mbmi->mode] + mbmi->angle_delta[0] * ANGLE_STEP;
+      for (filter = INTRA_FILTER_LINEAR; filter < INTRA_FILTERS; ++filter) {
+        int64_t tmp_best_rd;
+        if ((FILTER_FAST_SEARCH || !vp10_is_intra_filter_switchable(p_angle)) &&
+            filter != INTRA_FILTER_LINEAR)
+          continue;
+        mic->mbmi.intra_filter = filter;
+        tmp_best_rd =
+            (i == 0 && filter == INTRA_FILTER_LINEAR && best_rd < INT64_MAX)
+                ? (int64_t)(best_rd * rd_adjust)
+                : best_rd;
+        super_block_yrd(cpi, x, &this_rate_tokenonly, &this_distortion, &s,
+                        NULL, bsize, tmp_best_rd);
+        if (this_rate_tokenonly == INT_MAX) {
+          if (i == 0 && filter == INTRA_FILTER_LINEAR)
+            return best_rd;
+          else
+            continue;
+        }
+        this_rate = this_rate_tokenonly + rate_overhead +
+                    cpi->intra_filter_cost[intra_filter_ctx][filter];
+        this_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_distortion);
+        if (i == 0 && filter == INTRA_FILTER_LINEAR && best_rd < INT64_MAX &&
+            this_rd > best_rd * rd_adjust)
+          return best_rd;
+        if (this_rd < best_rd) {
+          best_i = i;
+          best_rd = this_rd;
+          best_angle_delta = mbmi->angle_delta[0];
+          best_tx_size = mbmi->tx_size;
+          best_filter = mbmi->intra_filter;
+          best_tx_type = mbmi->tx_type;
+          *rate = this_rate;
+          *rate_tokenonly = this_rate_tokenonly;
+          *distortion = this_distortion;
+          *skippable = s;
+        }
+      }
+    }
+
+    if (best_i >= 0) {
+      for (j = 0; j < level2; ++j) {
+        mic->mbmi.angle_delta[0] = deltas_level2[best_i][j];
+        p_angle =
+            mode_to_angle_map[mbmi->mode] + mbmi->angle_delta[0] * ANGLE_STEP;
+        for (filter = INTRA_FILTER_LINEAR; filter < INTRA_FILTERS; ++filter) {
+          mic->mbmi.intra_filter = filter;
+          if ((FILTER_FAST_SEARCH ||
+               !vp10_is_intra_filter_switchable(p_angle)) &&
+              filter != INTRA_FILTER_LINEAR)
+            continue;
+          pick_intra_angle_routine_sby(
+              cpi, x, rate, rate_tokenonly, distortion, skippable,
+              &best_angle_delta, &best_tx_size, &best_tx_type, &best_filter,
+              bsize,
+              rate_overhead + cpi->intra_filter_cost[intra_filter_ctx][filter],
+              &best_rd);
+        }
+      }
+    }
+  } else {
+    for (angle_delta = -MAX_ANGLE_DELTAS; angle_delta <= MAX_ANGLE_DELTAS;
+         ++angle_delta) {
+      mbmi->angle_delta[0] = angle_delta;
+      p_angle =
+          mode_to_angle_map[mbmi->mode] + mbmi->angle_delta[0] * ANGLE_STEP;
+      for (filter = INTRA_FILTER_LINEAR; filter < INTRA_FILTERS; ++filter) {
+        mic->mbmi.intra_filter = filter;
+        if ((FILTER_FAST_SEARCH || !vp10_is_intra_filter_switchable(p_angle)) &&
+            filter != INTRA_FILTER_LINEAR)
+          continue;
+        pick_intra_angle_routine_sby(
+            cpi, x, rate, rate_tokenonly, distortion, skippable,
+            &best_angle_delta, &best_tx_size, &best_tx_type, &best_filter,
+            bsize,
+            rate_overhead + cpi->intra_filter_cost[intra_filter_ctx][filter],
+            &best_rd);
+      }
+    }
+  }
+
+  if (FILTER_FAST_SEARCH && *rate_tokenonly < INT_MAX) {
+    mbmi->angle_delta[0] = best_angle_delta;
+    p_angle = mode_to_angle_map[mbmi->mode] + mbmi->angle_delta[0] * ANGLE_STEP;
+    if (vp10_is_intra_filter_switchable(p_angle)) {
+      for (filter = INTRA_FILTER_LINEAR + 1; filter < INTRA_FILTERS; ++filter) {
+        mic->mbmi.intra_filter = filter;
+        pick_intra_angle_routine_sby(
+            cpi, x, rate, rate_tokenonly, distortion, skippable,
+            &best_angle_delta, &best_tx_size, &best_tx_type, &best_filter,
+            bsize,
+            rate_overhead + cpi->intra_filter_cost[intra_filter_ctx][filter],
+            &best_rd);
+      }
+    }
+  }
+
+  mbmi->tx_size = best_tx_size;
+  mbmi->angle_delta[0] = best_angle_delta;
+  mic->mbmi.intra_filter = best_filter;
+  mbmi->tx_type = best_tx_type;
+  return best_rd;
+}
+
+// Indices are sign, integer, and fractional part of the gradient value
+static const uint8_t gradient_to_angle_bin[2][7][16] = {
+  {
+      { 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 0, 0, 0, 0 },
+      { 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1 },
+      { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 },
+      { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 },
+      { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 },
+      { 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 },
+      { 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 },
+  },
+  {
+      { 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4 },
+      { 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3 },
+      { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 },
+      { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 },
+      { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 },
+      { 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2 },
+      { 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 },
+  },
+};
+
+static const uint8_t mode_to_angle_bin[INTRA_MODES] = {
+  0, 2, 6, 0, 4, 3, 5, 7, 1, 0,
+};
+
+static void angle_estimation(const uint8_t *src, int src_stride, int rows,
+                             int cols, uint8_t *directional_mode_skip_mask) {
+  int i, r, c, index, dx, dy, temp, sn, remd, quot;
+  uint64_t hist[DIRECTIONAL_MODES];
+  uint64_t hist_sum = 0;
+
+  memset(hist, 0, DIRECTIONAL_MODES * sizeof(hist[0]));
+  src += src_stride;
+  for (r = 1; r < rows; ++r) {
+    for (c = 1; c < cols; ++c) {
+      dx = src[c] - src[c - 1];
+      dy = src[c] - src[c - src_stride];
+      temp = dx * dx + dy * dy;
+      if (dy == 0) {
+        index = 2;
+      } else {
+        sn = (dx > 0) ^ (dy > 0);
+        dx = abs(dx);
+        dy = abs(dy);
+        remd = dx % dy;
+        quot = dx / dy;
+        remd = remd * 16 / dy;
+        index = gradient_to_angle_bin[sn][VPXMIN(quot, 6)][VPXMIN(remd, 15)];
+      }
+      hist[index] += temp;
+    }
+    src += src_stride;
+  }
+
+  for (i = 0; i < DIRECTIONAL_MODES; ++i) hist_sum += hist[i];
+  for (i = 0; i < INTRA_MODES; ++i) {
+    if (i != DC_PRED && i != TM_PRED) {
+      int index = mode_to_angle_bin[i];
+      uint64_t score = 2 * hist[index];
+      int weight = 2;
+      if (index > 0) {
+        score += hist[index - 1];
+        weight += 1;
+      }
+      if (index < DIRECTIONAL_MODES - 1) {
+        score += hist[index + 1];
+        weight += 1;
+      }
+      if (score * ANGLE_SKIP_THRESH < hist_sum * weight)
+        directional_mode_skip_mask[i] = 1;
+    }
+  }
+}
+
+#if CONFIG_VP9_HIGHBITDEPTH
+static void highbd_angle_estimation(const uint8_t *src8, int src_stride,
+                                    int rows, int cols,
+                                    uint8_t *directional_mode_skip_mask) {
+  int i, r, c, index, dx, dy, temp, sn, remd, quot;
+  uint64_t hist[DIRECTIONAL_MODES];
+  uint64_t hist_sum = 0;
+  uint16_t *src = CONVERT_TO_SHORTPTR(src8);
+
+  memset(hist, 0, DIRECTIONAL_MODES * sizeof(hist[0]));
+  src += src_stride;
+  for (r = 1; r < rows; ++r) {
+    for (c = 1; c < cols; ++c) {
+      dx = src[c] - src[c - 1];
+      dy = src[c] - src[c - src_stride];
+      temp = dx * dx + dy * dy;
+      if (dy == 0) {
+        index = 2;
+      } else {
+        sn = (dx > 0) ^ (dy > 0);
+        dx = abs(dx);
+        dy = abs(dy);
+        remd = dx % dy;
+        quot = dx / dy;
+        remd = remd * 16 / dy;
+        index = gradient_to_angle_bin[sn][VPXMIN(quot, 6)][VPXMIN(remd, 15)];
+      }
+      hist[index] += temp;
+    }
+    src += src_stride;
+  }
+
+  for (i = 0; i < DIRECTIONAL_MODES; ++i) hist_sum += hist[i];
+  for (i = 0; i < INTRA_MODES; ++i) {
+    if (i != DC_PRED && i != TM_PRED) {
+      int index = mode_to_angle_bin[i];
+      uint64_t score = 2 * hist[index];
+      int weight = 2;
+      if (index > 0) {
+        score += hist[index - 1];
+        weight += 1;
+      }
+      if (index < DIRECTIONAL_MODES - 1) {
+        score += hist[index + 1];
+        weight += 1;
+      }
+      if (score * ANGLE_SKIP_THRESH < hist_sum * weight)
+        directional_mode_skip_mask[i] = 1;
+    }
+  }
+}
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_EXT_INTRA
+
+// This function is used only for intra_only frames
+static int64_t rd_pick_intra_sby_mode(VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+                                      int *rate_tokenonly, int64_t *distortion,
+                                      int *skippable, BLOCK_SIZE bsize,
+                                      int64_t best_rd) {
+  uint8_t mode_idx;
+  PREDICTION_MODE mode_selected = DC_PRED;
+  MACROBLOCKD *const xd = &x->e_mbd;
+  MODE_INFO *const mic = xd->mi[0];
+  int this_rate, this_rate_tokenonly, s;
+  int64_t this_distortion, this_rd;
+  TX_SIZE best_tx = TX_4X4;
+#if CONFIG_EXT_INTRA
+  const int intra_filter_ctx = vp10_get_pred_context_intra_interp(xd);
+  EXT_INTRA_MODE_INFO ext_intra_mode_info;
+  int is_directional_mode, rate_overhead, best_angle_delta = 0;
+  INTRA_FILTER best_filter = INTRA_FILTER_LINEAR;
+  uint8_t directional_mode_skip_mask[INTRA_MODES];
+  uint16_t filter_intra_mode_skip_mask = (1 << FILTER_INTRA_MODES) - 1;
+  const int src_stride = x->plane[0].src.stride;
+  const uint8_t *src = x->plane[0].src.buf;
+  int beat_best_rd = 0;
+#endif  // CONFIG_EXT_INTRA
+  TX_TYPE best_tx_type = DCT_DCT;
+  int *bmode_costs;
+  PALETTE_MODE_INFO palette_mode_info;
+  PALETTE_MODE_INFO *const pmi = &mic->mbmi.palette_mode_info;
+  uint8_t *best_palette_color_map =
+      cpi->common.allow_screen_content_tools
+          ? x->palette_buffer->best_palette_color_map
+          : NULL;
+  const int rows = 4 * num_4x4_blocks_high_lookup[bsize];
+  const int cols = 4 * num_4x4_blocks_wide_lookup[bsize];
+  int palette_ctx = 0;
+  const MODE_INFO *above_mi = xd->above_mi;
+  const MODE_INFO *left_mi = xd->left_mi;
+  const PREDICTION_MODE A = vp10_above_block_mode(mic, above_mi, 0);
+  const PREDICTION_MODE L = vp10_left_block_mode(mic, left_mi, 0);
+  const PREDICTION_MODE FINAL_MODE_SEARCH = TM_PRED + 1;
+  const TX_SIZE max_tx_size = max_txsize_lookup[bsize];
+  bmode_costs = cpi->y_mode_costs[A][L];
+
+#if CONFIG_EXT_INTRA
+  ext_intra_mode_info.use_ext_intra_mode[0] = 0;
+  mic->mbmi.ext_intra_mode_info.use_ext_intra_mode[0] = 0;
+  mic->mbmi.angle_delta[0] = 0;
+  memset(directional_mode_skip_mask, 0,
+         sizeof(directional_mode_skip_mask[0]) * INTRA_MODES);
+#if CONFIG_VP9_HIGHBITDEPTH
+  if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
+    highbd_angle_estimation(src, src_stride, rows, cols,
+                            directional_mode_skip_mask);
+  else
+#endif
+    angle_estimation(src, src_stride, rows, cols, directional_mode_skip_mask);
+#endif  // CONFIG_EXT_INTRA
+  palette_mode_info.palette_size[0] = 0;
+  pmi->palette_size[0] = 0;
+  if (above_mi)
+    palette_ctx += (above_mi->mbmi.palette_mode_info.palette_size[0] > 0);
+  if (left_mi)
+    palette_ctx += (left_mi->mbmi.palette_mode_info.palette_size[0] > 0);
+
+  if (cpi->sf.tx_type_search.fast_intra_tx_type_search)
+    x->use_default_intra_tx_type = 1;
+  else
+    x->use_default_intra_tx_type = 0;
+
+  /* Y Search for intra prediction mode */
+  for (mode_idx = DC_PRED; mode_idx <= FINAL_MODE_SEARCH; ++mode_idx) {
+    if (mode_idx == FINAL_MODE_SEARCH) {
+      if (x->use_default_intra_tx_type == 0) break;
+      mic->mbmi.mode = mode_selected;
+      x->use_default_intra_tx_type = 0;
+    } else {
+      mic->mbmi.mode = mode_idx;
+    }
+#if CONFIG_EXT_INTRA
+    is_directional_mode =
+        (mic->mbmi.mode != DC_PRED && mic->mbmi.mode != TM_PRED);
+    if (is_directional_mode && directional_mode_skip_mask[mic->mbmi.mode])
+      continue;
+    if (is_directional_mode) {
+      rate_overhead = bmode_costs[mic->mbmi.mode] +
+                      write_uniform_cost(2 * MAX_ANGLE_DELTAS + 1, 0);
+      this_rate_tokenonly = INT_MAX;
+      this_rd = rd_pick_intra_angle_sby(cpi, x, &this_rate,
+                                        &this_rate_tokenonly, &this_distortion,
+                                        &s, bsize, rate_overhead, best_rd);
+    } else {
+      mic->mbmi.angle_delta[0] = 0;
+      super_block_yrd(cpi, x, &this_rate_tokenonly, &this_distortion, &s, NULL,
+                      bsize, best_rd);
+    }
+#else
+    super_block_yrd(cpi, x, &this_rate_tokenonly, &this_distortion, &s, NULL,
+                    bsize, best_rd);
+#endif  // CONFIG_EXT_INTRA
+
+    if (this_rate_tokenonly == INT_MAX) continue;
+
+    this_rate = this_rate_tokenonly + bmode_costs[mic->mbmi.mode];
+
+    if (!xd->lossless[xd->mi[0]->mbmi.segment_id]) {
+      // super_block_yrd above includes the cost of the tx_size in the
+      // tokenonly rate, but for intra blocks, tx_size is always coded
+      // (prediction granularity), so we account for it in the full rate,
+      // not the tokenonly rate.
+      this_rate_tokenonly -=
+          cpi->tx_size_cost[max_tx_size -
+                            TX_8X8][get_tx_size_context(xd)][mic->mbmi.tx_size];
+    }
+    if (cpi->common.allow_screen_content_tools && mic->mbmi.mode == DC_PRED)
+      this_rate += vp10_cost_bit(
+          vp10_default_palette_y_mode_prob[bsize - BLOCK_8X8][palette_ctx], 0);
+#if CONFIG_EXT_INTRA
+    if (mic->mbmi.mode == DC_PRED && ALLOW_FILTER_INTRA_MODES)
+      this_rate += vp10_cost_bit(cpi->common.fc->ext_intra_probs[0], 0);
+    if (is_directional_mode) {
+      int p_angle;
+      this_rate +=
+          write_uniform_cost(2 * MAX_ANGLE_DELTAS + 1,
+                             MAX_ANGLE_DELTAS + mic->mbmi.angle_delta[0]);
+      p_angle = mode_to_angle_map[mic->mbmi.mode] +
+                mic->mbmi.angle_delta[0] * ANGLE_STEP;
+      if (vp10_is_intra_filter_switchable(p_angle))
+        this_rate +=
+            cpi->intra_filter_cost[intra_filter_ctx][mic->mbmi.intra_filter];
+    }
+#endif  // CONFIG_EXT_INTRA
+    this_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_distortion);
+#if CONFIG_EXT_INTRA
+    if (best_rd == INT64_MAX || this_rd < (best_rd + (best_rd >> 4))) {
+      filter_intra_mode_skip_mask ^= (1 << mic->mbmi.mode);
+    }
+#endif  // CONFIG_EXT_INTRA
+
+    if (this_rd < best_rd) {
+      mode_selected = mic->mbmi.mode;
+      best_rd = this_rd;
+      best_tx = mic->mbmi.tx_size;
+#if CONFIG_EXT_INTRA
+      best_angle_delta = mic->mbmi.angle_delta[0];
+      best_filter = mic->mbmi.intra_filter;
+      beat_best_rd = 1;
+#endif  // CONFIG_EXT_INTRA
+      best_tx_type = mic->mbmi.tx_type;
+      *rate = this_rate;
+      *rate_tokenonly = this_rate_tokenonly;
+      *distortion = this_distortion;
+      *skippable = s;
+    }
+  }
+
+  if (cpi->common.allow_screen_content_tools)
+    rd_pick_palette_intra_sby(cpi, x, bsize, palette_ctx, bmode_costs[DC_PRED],
+                              &palette_mode_info, best_palette_color_map,
+                              &best_tx, &best_tx_type, &mode_selected,
+                              &best_rd);
+
+#if CONFIG_EXT_INTRA
+  if (ALLOW_FILTER_INTRA_MODES && beat_best_rd) {
+    if (rd_pick_ext_intra_sby(cpi, x, rate, rate_tokenonly, distortion,
+                              skippable, bsize, bmode_costs[DC_PRED], &best_rd,
+                              filter_intra_mode_skip_mask)) {
+      mode_selected = mic->mbmi.mode;
+      best_tx = mic->mbmi.tx_size;
+      ext_intra_mode_info = mic->mbmi.ext_intra_mode_info;
+      best_tx_type = mic->mbmi.tx_type;
+    }
+  }
+
+  mic->mbmi.ext_intra_mode_info.use_ext_intra_mode[0] =
+      ext_intra_mode_info.use_ext_intra_mode[0];
+  if (ext_intra_mode_info.use_ext_intra_mode[0]) {
+    mic->mbmi.ext_intra_mode_info.ext_intra_mode[0] =
+        ext_intra_mode_info.ext_intra_mode[0];
+    palette_mode_info.palette_size[0] = 0;
+  }
+#endif  // CONFIG_EXT_INTRA
+
+  mic->mbmi.mode = mode_selected;
+  mic->mbmi.tx_size = best_tx;
+#if CONFIG_EXT_INTRA
+  mic->mbmi.angle_delta[0] = best_angle_delta;
+  mic->mbmi.intra_filter = best_filter;
+#endif  // CONFIG_EXT_INTRA
+  mic->mbmi.tx_type = best_tx_type;
+  pmi->palette_size[0] = palette_mode_info.palette_size[0];
+  if (palette_mode_info.palette_size[0] > 0) {
+    memcpy(pmi->palette_colors, palette_mode_info.palette_colors,
+           PALETTE_MAX_SIZE * sizeof(palette_mode_info.palette_colors[0]));
+    memcpy(xd->plane[0].color_index_map, best_palette_color_map,
+           rows * cols * sizeof(best_palette_color_map[0]));
+  }
+
+  return best_rd;
+}
+
+#if CONFIG_VAR_TX
+void vp10_tx_block_rd_b(const VP10_COMP *cpi, MACROBLOCK *x, TX_SIZE tx_size,
+                        int blk_row, int blk_col, int plane, int block,
+                        int plane_bsize, int coeff_ctx, int *rate,
+                        int64_t *dist, int64_t *bsse, int *skip) {
+  MACROBLOCKD *xd = &x->e_mbd;
+  const struct macroblock_plane *const p = &x->plane[plane];
+  struct macroblockd_plane *const pd = &xd->plane[plane];
+  int64_t tmp;
+  tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
+  PLANE_TYPE plane_type = (plane == 0) ? PLANE_TYPE_Y : PLANE_TYPE_UV;
+  TX_TYPE tx_type = get_tx_type(plane_type, xd, block, tx_size);
+  const scan_order *const scan_order =
+      get_scan(tx_size, tx_type, is_inter_block(&xd->mi[0]->mbmi));
+
+  BLOCK_SIZE txm_bsize = txsize_to_bsize[tx_size];
+  int bh = 4 * num_4x4_blocks_wide_lookup[txm_bsize];
+  int src_stride = p->src.stride;
+  uint8_t *src = &p->src.buf[4 * blk_row * src_stride + 4 * blk_col];
+  uint8_t *dst = &pd->dst.buf[4 * blk_row * pd->dst.stride + 4 * blk_col];
+#if CONFIG_VP9_HIGHBITDEPTH
+  DECLARE_ALIGNED(16, uint16_t, rec_buffer16[MAX_TX_SQUARE]);
+  uint8_t *rec_buffer;
+#else
+  DECLARE_ALIGNED(16, uint8_t, rec_buffer[MAX_TX_SQUARE]);
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+  const int diff_stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
+  const int16_t *diff = &p->src_diff[4 * (blk_row * diff_stride + blk_col)];
+
+  int max_blocks_high = num_4x4_blocks_high_lookup[plane_bsize];
+  int max_blocks_wide = num_4x4_blocks_wide_lookup[plane_bsize];
+
+#if CONFIG_EXT_TX
+  assert(tx_size < TX_SIZES);
+#endif  // CONFIG_EXT_TX
+
+  if (xd->mb_to_bottom_edge < 0)
+    max_blocks_high += xd->mb_to_bottom_edge >> (5 + pd->subsampling_y);
+  if (xd->mb_to_right_edge < 0)
+    max_blocks_wide += xd->mb_to_right_edge >> (5 + pd->subsampling_x);
+
+#if CONFIG_NEW_QUANT
+  vp10_xform_quant_fp_nuq(x, plane, block, blk_row, blk_col, plane_bsize,
+                          tx_size, coeff_ctx);
+#else
+  vp10_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size,
+                   VP10_XFORM_QUANT_FP);
+#endif  // CONFIG_NEW_QUANT
+
+  vp10_optimize_b(x, plane, block, tx_size, coeff_ctx);
+
+// TODO(any): Use dist_block to compute distortion
+#if CONFIG_VP9_HIGHBITDEPTH
+  if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+    rec_buffer = CONVERT_TO_BYTEPTR(rec_buffer16);
+    vpx_highbd_convolve_copy(dst, pd->dst.stride, rec_buffer, MAX_TX_SIZE, NULL,
+                             0, NULL, 0, bh, bh, xd->bd);
+  } else {
+    rec_buffer = (uint8_t *)rec_buffer16;
+    vpx_convolve_copy(dst, pd->dst.stride, rec_buffer, MAX_TX_SIZE, NULL, 0,
+                      NULL, 0, bh, bh);
+  }
+#else
+  vpx_convolve_copy(dst, pd->dst.stride, rec_buffer, MAX_TX_SIZE, NULL, 0, NULL,
+                    0, bh, bh);
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+  if (blk_row + (bh >> 2) > max_blocks_high ||
+      blk_col + (bh >> 2) > max_blocks_wide) {
+    int idx, idy;
+    int blocks_height = VPXMIN(bh >> 2, max_blocks_high - blk_row);
+    int blocks_width = VPXMIN(bh >> 2, max_blocks_wide - blk_col);
+    tmp = 0;
+    for (idy = 0; idy < blocks_height; idy += 2) {
+      for (idx = 0; idx < blocks_width; idx += 2) {
+        const int16_t *d = diff + 4 * idy * diff_stride + 4 * idx;
+        tmp += vpx_sum_squares_2d_i16(d, diff_stride, 8);
+      }
+    }
+  } else {
+    tmp = vpx_sum_squares_2d_i16(diff, diff_stride, bh);
+  }
+
+#if CONFIG_VP9_HIGHBITDEPTH
+  if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
+    tmp = ROUND_POWER_OF_TWO(tmp, (xd->bd - 8) * 2);
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+  *bsse += tmp * 16;
+
+  if (p->eobs[block] > 0) {
+    INV_TXFM_PARAM inv_txfm_param;
+    inv_txfm_param.tx_type = tx_type;
+    inv_txfm_param.tx_size = tx_size;
+    inv_txfm_param.eob = p->eobs[block];
+    inv_txfm_param.lossless = xd->lossless[xd->mi[0]->mbmi.segment_id];
+#if CONFIG_VP9_HIGHBITDEPTH
+    if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+      inv_txfm_param.bd = xd->bd;
+      highbd_inv_txfm_add(dqcoeff, rec_buffer, MAX_TX_SIZE, &inv_txfm_param);
+    } else {
+      inv_txfm_add(dqcoeff, rec_buffer, MAX_TX_SIZE, &inv_txfm_param);
+    }
+#else   // CONFIG_VP9_HIGHBITDEPTH
+    inv_txfm_add(dqcoeff, rec_buffer, MAX_TX_SIZE, &inv_txfm_param);
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+    if ((bh >> 2) + blk_col > max_blocks_wide ||
+        (bh >> 2) + blk_row > max_blocks_high) {
+      int idx, idy;
+      unsigned int this_dist;
+      int blocks_height = VPXMIN(bh >> 2, max_blocks_high - blk_row);
+      int blocks_width = VPXMIN(bh >> 2, max_blocks_wide - blk_col);
+      tmp = 0;
+      for (idy = 0; idy < blocks_height; idy += 2) {
+        for (idx = 0; idx < blocks_width; idx += 2) {
+          uint8_t *const s = src + 4 * idy * src_stride + 4 * idx;
+          uint8_t *const r = rec_buffer + 4 * idy * MAX_TX_SIZE + 4 * idx;
+          cpi->fn_ptr[BLOCK_8X8].vf(s, src_stride, r, MAX_TX_SIZE, &this_dist);
+          tmp += this_dist;
+        }
+      }
+    } else {
+      uint32_t this_dist;
+      cpi->fn_ptr[txm_bsize].vf(src, src_stride, rec_buffer, MAX_TX_SIZE,
+                                &this_dist);
+      tmp = this_dist;
+    }
+  }
+  *dist += tmp * 16;
+  *rate += cost_coeffs(x, plane, block, coeff_ctx, tx_size, scan_order->scan,
+                       scan_order->neighbors, 0);
+  *skip &= (p->eobs[block] == 0);
+}
+
+static void select_tx_block(const VP10_COMP *cpi, MACROBLOCK *x, int blk_row,
+                            int blk_col, int plane, int block, TX_SIZE tx_size,
+                            BLOCK_SIZE plane_bsize, ENTROPY_CONTEXT *ta,
+                            ENTROPY_CONTEXT *tl, TXFM_CONTEXT *tx_above,
+                            TXFM_CONTEXT *tx_left, int *rate, int64_t *dist,
+                            int64_t *bsse, int *skip, int64_t ref_best_rd,
+                            int *is_cost_valid) {
+  MACROBLOCKD *const xd = &x->e_mbd;
+  MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
+  struct macroblock_plane *const p = &x->plane[plane];
+  struct macroblockd_plane *const pd = &xd->plane[plane];
+  const int tx_row = blk_row >> (1 - pd->subsampling_y);
+  const int tx_col = blk_col >> (1 - pd->subsampling_x);
+  TX_SIZE (*const inter_tx_size)
+  [MAX_MIB_SIZE] =
+      (TX_SIZE(*)[MAX_MIB_SIZE]) & mbmi->inter_tx_size[tx_row][tx_col];
+  const int bw = num_4x4_blocks_wide_lookup[plane_bsize];
+  int max_blocks_high = num_4x4_blocks_high_lookup[plane_bsize];
+  int max_blocks_wide = bw;
+  int64_t this_rd = INT64_MAX;
+  ENTROPY_CONTEXT *pta = ta + blk_col;
+  ENTROPY_CONTEXT *ptl = tl + blk_row;
+  ENTROPY_CONTEXT stxa = 0, stxl = 0;
+  int coeff_ctx, i;
+  int ctx = txfm_partition_context(tx_above + (blk_col >> 1),
+                                   tx_left + (blk_row >> 1), tx_size);
+
+  int64_t sum_dist = 0, sum_bsse = 0;
+  int64_t sum_rd = INT64_MAX;
+  int sum_rate = vp10_cost_bit(cpi->common.fc->txfm_partition_prob[ctx], 1);
+  int all_skip = 1;
+  int tmp_eob = 0;
+  int zero_blk_rate;
+
+#if CONFIG_EXT_TX
+  assert(tx_size < TX_SIZES);
+#endif  // CONFIG_EXT_TX
+
+  if (ref_best_rd < 0) {
+    *is_cost_valid = 0;
+    return;
+  }
+
+  switch (tx_size) {
+    case TX_4X4:
+      stxa = pta[0];
+      stxl = ptl[0];
+      break;
+    case TX_8X8:
+      stxa = !!*(const uint16_t *)&pta[0];
+      stxl = !!*(const uint16_t *)&ptl[0];
+      break;
+    case TX_16X16:
+      stxa = !!*(const uint32_t *)&pta[0];
+      stxl = !!*(const uint32_t *)&ptl[0];
+      break;
+    case TX_32X32:
+      stxa = !!*(const uint64_t *)&pta[0];
+      stxl = !!*(const uint64_t *)&ptl[0];
+      break;
+    default: assert(0 && "Invalid transform size."); break;
+  }
+  coeff_ctx = combine_entropy_contexts(stxa, stxl);
+
+  if (xd->mb_to_bottom_edge < 0)
+    max_blocks_high += xd->mb_to_bottom_edge >> (5 + pd->subsampling_y);
+  if (xd->mb_to_right_edge < 0)
+    max_blocks_wide += xd->mb_to_right_edge >> (5 + pd->subsampling_x);
+
+  *rate = 0;
+  *dist = 0;
+  *bsse = 0;
+  *skip = 1;
+
+  if (blk_row >= max_blocks_high || blk_col >= max_blocks_wide) return;
+
+  zero_blk_rate =
+      x->token_costs[tx_size][pd->plane_type][1][0][0][coeff_ctx][EOB_TOKEN];
+
+  if (cpi->common.tx_mode == TX_MODE_SELECT || tx_size == TX_4X4) {
+    inter_tx_size[0][0] = tx_size;
+    vp10_tx_block_rd_b(cpi, x, tx_size, blk_row, blk_col, plane, block,
+                       plane_bsize, coeff_ctx, rate, dist, bsse, skip);
+
+    if ((RDCOST(x->rdmult, x->rddiv, *rate, *dist) >=
+             RDCOST(x->rdmult, x->rddiv, zero_blk_rate, *bsse) ||
+         *skip == 1) &&
+        !xd->lossless[mbmi->segment_id]) {
+      *rate = zero_blk_rate;
+      *dist = *bsse;
+      *skip = 1;
+      x->blk_skip[plane][blk_row * bw + blk_col] = 1;
+      p->eobs[block] = 0;
+    } else {
+      x->blk_skip[plane][blk_row * bw + blk_col] = 0;
+      *skip = 0;
+    }
+
+    if (tx_size > TX_4X4)
+      *rate += vp10_cost_bit(cpi->common.fc->txfm_partition_prob[ctx], 0);
+    this_rd = RDCOST(x->rdmult, x->rddiv, *rate, *dist);
+    tmp_eob = p->eobs[block];
+  }
+
+  if (tx_size > TX_4X4) {
+    BLOCK_SIZE bsize = txsize_to_bsize[tx_size];
+    int bsl = b_height_log2_lookup[bsize];
+    int sub_step = num_4x4_blocks_txsize_lookup[tx_size - 1];
+    int i;
+    int this_rate;
+    int64_t this_dist;
+    int64_t this_bsse;
+    int this_skip;
+    int this_cost_valid = 1;
+    int64_t tmp_rd = 0;
+
+#if CONFIG_EXT_TX
+    assert(tx_size < TX_SIZES);
+#endif  // CONFIG_EXT_TX
+    --bsl;
+    for (i = 0; i < 4 && this_cost_valid; ++i) {
+      int offsetr = (i >> 1) << bsl;
+      int offsetc = (i & 0x01) << bsl;
+      select_tx_block(cpi, x, blk_row + offsetr, blk_col + offsetc, plane,
+                      block + i * sub_step, tx_size - 1, plane_bsize, ta, tl,
+                      tx_above, tx_left, &this_rate, &this_dist, &this_bsse,
+                      &this_skip, ref_best_rd - tmp_rd, &this_cost_valid);
+      sum_rate += this_rate;
+      sum_dist += this_dist;
+      sum_bsse += this_bsse;
+      all_skip &= this_skip;
+      tmp_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
+      if (this_rd < tmp_rd) break;
+    }
+    if (this_cost_valid) sum_rd = tmp_rd;
+  }
+
+  if (this_rd < sum_rd) {
+    int idx, idy;
+    for (i = 0; i < num_4x4_blocks_wide_txsize_lookup[tx_size]; ++i)
+      pta[i] = !(tmp_eob == 0);
+    for (i = 0; i < num_4x4_blocks_high_txsize_lookup[tx_size]; ++i)
+      ptl[i] = !(tmp_eob == 0);
+    txfm_partition_update(tx_above + (blk_col >> 1), tx_left + (blk_row >> 1),
+                          tx_size);
+    inter_tx_size[0][0] = tx_size;
+    for (idy = 0; idy < num_4x4_blocks_high_txsize_lookup[tx_size] / 2; ++idy)
+      for (idx = 0; idx < num_4x4_blocks_wide_txsize_lookup[tx_size] / 2; ++idx)
+        inter_tx_size[idy][idx] = tx_size;
+    mbmi->tx_size = tx_size;
+    if (this_rd == INT64_MAX) *is_cost_valid = 0;
+    x->blk_skip[plane][blk_row * bw + blk_col] = *skip;
+  } else {
+    *rate = sum_rate;
+    *dist = sum_dist;
+    *bsse = sum_bsse;
+    *skip = all_skip;
+    if (sum_rd == INT64_MAX) *is_cost_valid = 0;
+  }
+}
+
+static void inter_block_yrd(const VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+                            int64_t *distortion, int *skippable, int64_t *sse,
+                            BLOCK_SIZE bsize, int64_t ref_best_rd) {
+  MACROBLOCKD *const xd = &x->e_mbd;
+  int is_cost_valid = 1;
+  int64_t this_rd = 0;
+
+  if (ref_best_rd < 0) is_cost_valid = 0;
+
+  *rate = 0;
+  *distortion = 0;
+  *sse = 0;
+  *skippable = 1;
+
+  if (is_cost_valid) {
+    const struct macroblockd_plane *const pd = &xd->plane[0];
+    const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd);
+    const int mi_width = num_4x4_blocks_wide_lookup[plane_bsize];
+    const int mi_height = num_4x4_blocks_high_lookup[plane_bsize];
+    BLOCK_SIZE txb_size = txsize_to_bsize[max_txsize_lookup[plane_bsize]];
+    int bh = num_4x4_blocks_wide_lookup[txb_size];
+    int idx, idy;
+    int block = 0;
+    int step = 1 << (max_txsize_lookup[plane_bsize] * 2);
+    ENTROPY_CONTEXT ctxa[2 * MAX_MIB_SIZE];
+    ENTROPY_CONTEXT ctxl[2 * MAX_MIB_SIZE];
+    TXFM_CONTEXT tx_above[MAX_MIB_SIZE];
+    TXFM_CONTEXT tx_left[MAX_MIB_SIZE];
+
+    int pnrate = 0, pnskip = 1;
+    int64_t pndist = 0, pnsse = 0;
+
+    vp10_get_entropy_contexts(bsize, TX_4X4, pd, ctxa, ctxl);
+    memcpy(tx_above, xd->above_txfm_context,
+           sizeof(TXFM_CONTEXT) * (mi_width >> 1));
+    memcpy(tx_left, xd->left_txfm_context,
+           sizeof(TXFM_CONTEXT) * (mi_height >> 1));
+
+    for (idy = 0; idy < mi_height; idy += bh) {
+      for (idx = 0; idx < mi_width; idx += bh) {
+        select_tx_block(cpi, x, idy, idx, 0, block,
+                        max_txsize_lookup[plane_bsize], plane_bsize, ctxa, ctxl,
+                        tx_above, tx_left, &pnrate, &pndist, &pnsse, &pnskip,
+                        ref_best_rd - this_rd, &is_cost_valid);
+        *rate += pnrate;
+        *distortion += pndist;
+        *sse += pnsse;
+        *skippable &= pnskip;
+        this_rd += VPXMIN(RDCOST(x->rdmult, x->rddiv, pnrate, pndist),
+                          RDCOST(x->rdmult, x->rddiv, 0, pnsse));
+        block += step;
+      }
+    }
+  }
+
+  this_rd = VPXMIN(RDCOST(x->rdmult, x->rddiv, *rate, *distortion),
+                   RDCOST(x->rdmult, x->rddiv, 0, *sse));
+  if (this_rd > ref_best_rd) is_cost_valid = 0;
+
+  if (!is_cost_valid) {
+    // reset cost value
+    *rate = INT_MAX;
+    *distortion = INT64_MAX;
+    *sse = INT64_MAX;
+    *skippable = 0;
+  }
+}
+
+static int64_t select_tx_size_fix_type(const VP10_COMP *cpi, MACROBLOCK *x,
+                                       int *rate, int64_t *dist, int *skippable,
+                                       int64_t *sse, BLOCK_SIZE bsize,
+                                       int64_t ref_best_rd, TX_TYPE tx_type) {
+  const VP10_COMMON *const cm = &cpi->common;
+  MACROBLOCKD *const xd = &x->e_mbd;
+  MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
+  const TX_SIZE max_tx_size = max_txsize_lookup[bsize];
+  const int is_inter = is_inter_block(mbmi);
+#if CONFIG_EXT_TX
+  int ext_tx_set = get_ext_tx_set(max_tx_size, bsize, is_inter);
+#endif  // CONFIG_EXT_TX
+  vpx_prob skip_prob = vp10_get_skip_prob(cm, xd);
+  int s0 = vp10_cost_bit(skip_prob, 0);
+  int s1 = vp10_cost_bit(skip_prob, 1);
+  int64_t rd;
+
+  mbmi->tx_type = tx_type;
+  inter_block_yrd(cpi, x, rate, dist, skippable, sse, bsize, ref_best_rd);
+
+  if (*rate == INT_MAX) return INT64_MAX;
+
+#if CONFIG_EXT_TX
+  if (get_ext_tx_types(max_tx_size, bsize, is_inter) > 1 &&
+      !xd->lossless[xd->mi[0]->mbmi.segment_id]) {
+    if (is_inter) {
+      if (ext_tx_set > 0)
+        *rate +=
+            cpi->inter_tx_type_costs[ext_tx_set][max_tx_size][mbmi->tx_type];
+    } else {
+      if (ext_tx_set > 0 && ALLOW_INTRA_EXT_TX)
+        *rate += cpi->intra_tx_type_costs[ext_tx_set][max_tx_size][mbmi->mode]
+                                         [mbmi->tx_type];
+    }
+  }
+#else   // CONFIG_EXT_TX
+  if (max_tx_size < TX_32X32 && !xd->lossless[xd->mi[0]->mbmi.segment_id]) {
+    if (is_inter)
+      *rate += cpi->inter_tx_type_costs[max_tx_size][mbmi->tx_type];
+    else
+      *rate += cpi->intra_tx_type_costs
+                   [max_tx_size][intra_mode_to_tx_type_context[mbmi->mode]]
+                   [mbmi->tx_type];
+  }
+#endif  // CONFIG_EXT_TX
+
+  if (*skippable)
+    rd = RDCOST(x->rdmult, x->rddiv, s1, *sse);
+  else
+    rd = RDCOST(x->rdmult, x->rddiv, *rate + s0, *dist);
+
+  if (is_inter && !xd->lossless[xd->mi[0]->mbmi.segment_id] && !(*skippable))
+    rd = VPXMIN(rd, RDCOST(x->rdmult, x->rddiv, s1, *sse));
+
+  return rd;
+}
+
+static void select_tx_type_yrd(const VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+                               int64_t *distortion, int *skippable,
+                               int64_t *sse, BLOCK_SIZE bsize,
+                               int64_t ref_best_rd) {
+  const TX_SIZE max_tx_size = max_txsize_lookup[bsize];
+  MACROBLOCKD *const xd = &x->e_mbd;
+  MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
+  int64_t rd = INT64_MAX;
+  int64_t best_rd = INT64_MAX;
+  TX_TYPE tx_type, best_tx_type = DCT_DCT;
+  const int is_inter = is_inter_block(mbmi);
+  TX_SIZE best_tx_size[MAX_MIB_SIZE][MAX_MIB_SIZE];
+  TX_SIZE best_tx = TX_SIZES;
+  uint8_t best_blk_skip[MAX_MIB_SIZE * MAX_MIB_SIZE * 4];
+  const int n4 = 1 << (num_pels_log2_lookup[bsize] - 4);
+  int idx, idy;
+  int prune = 0;
+#if CONFIG_EXT_TX
+  int ext_tx_set = get_ext_tx_set(max_tx_size, bsize, is_inter);
+#endif  // CONFIG_EXT_TX
+
+  if (is_inter && cpi->sf.tx_type_search.prune_mode > NO_PRUNE)
+#if CONFIG_EXT_TX
+    prune = prune_tx_types(cpi, bsize, x, xd, ext_tx_set);
+#else
+    prune = prune_tx_types(cpi, bsize, x, xd, 0);
+#endif
+
+  *distortion = INT64_MAX;
+  *rate = INT_MAX;
+  *skippable = 0;
+  *sse = INT64_MAX;
+
+  for (tx_type = DCT_DCT; tx_type < TX_TYPES; ++tx_type) {
+    int this_rate = 0;
+    int this_skip = 1;
+    int64_t this_dist = 0;
+    int64_t this_sse = 0;
+#if CONFIG_EXT_TX
+    if (is_inter) {
+      if (!ext_tx_used_inter[ext_tx_set][tx_type]) continue;
+      if (cpi->sf.tx_type_search.prune_mode > NO_PRUNE) {
+        if (!do_tx_type_search(tx_type, prune)) continue;
+      }
+    } else {
+      if (!ALLOW_INTRA_EXT_TX && bsize >= BLOCK_8X8) {
+        if (tx_type != intra_mode_to_tx_type_context[mbmi->mode]) continue;
+      }
+      if (!ext_tx_used_intra[ext_tx_set][tx_type]) continue;
+    }
+#else   // CONFIG_EXT_TX
+    if (max_tx_size >= TX_32X32 && tx_type != DCT_DCT) continue;
+    if (is_inter && cpi->sf.tx_type_search.prune_mode > NO_PRUNE &&
+        !do_tx_type_search(tx_type, prune))
+      continue;
+#endif  // CONFIG_EXT_TX
+    if (is_inter && x->use_default_inter_tx_type &&
+        tx_type != get_default_tx_type(0, xd, 0, max_tx_size))
+      continue;
+
+    rd = select_tx_size_fix_type(cpi, x, &this_rate, &this_dist, &this_skip,
+                                 &this_sse, bsize, ref_best_rd, tx_type);
+
+    if (rd < best_rd) {
+      best_rd = rd;
+      *distortion = this_dist;
+      *rate = this_rate;
+      *skippable = this_skip;
+      *sse = this_sse;
+      best_tx_type = mbmi->tx_type;
+      best_tx = mbmi->tx_size;
+      memcpy(best_blk_skip, x->blk_skip[0], sizeof(best_blk_skip[0]) * n4);
+      for (idy = 0; idy < xd->n8_h; ++idy)
+        for (idx = 0; idx < xd->n8_w; ++idx)
+          best_tx_size[idy][idx] = mbmi->inter_tx_size[idy][idx];
+    }
+  }
+
+  mbmi->tx_type = best_tx_type;
+  for (idy = 0; idy < xd->n8_h; ++idy)
+    for (idx = 0; idx < xd->n8_w; ++idx)
+      mbmi->inter_tx_size[idy][idx] = best_tx_size[idy][idx];
+  mbmi->tx_size = best_tx;
+  memcpy(x->blk_skip[0], best_blk_skip, sizeof(best_blk_skip[0]) * n4);
+}
+
+static void tx_block_rd(const VP10_COMP *cpi, MACROBLOCK *x, int blk_row,
+                        int blk_col, int plane, int block, TX_SIZE tx_size,
+                        BLOCK_SIZE plane_bsize, ENTROPY_CONTEXT *above_ctx,
+                        ENTROPY_CONTEXT *left_ctx, int *rate, int64_t *dist,
+                        int64_t *bsse, int *skip) {
+  MACROBLOCKD *const xd = &x->e_mbd;
+  MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
+  struct macroblock_plane *const p = &x->plane[plane];
+  struct macroblockd_plane *const pd = &xd->plane[plane];
+  BLOCK_SIZE bsize = txsize_to_bsize[tx_size];
+  const int tx_row = blk_row >> (1 - pd->subsampling_y);
+  const int tx_col = blk_col >> (1 - pd->subsampling_x);
+  TX_SIZE plane_tx_size;
+  int max_blocks_high = num_4x4_blocks_high_lookup[plane_bsize];
+  int max_blocks_wide = num_4x4_blocks_wide_lookup[plane_bsize];
+
+#if CONFIG_EXT_TX
+  assert(tx_size < TX_SIZES);
+#endif  // CONFIG_EXT_TX
+
+  if (xd->mb_to_bottom_edge < 0)
+    max_blocks_high += xd->mb_to_bottom_edge >> (5 + pd->subsampling_y);
+  if (xd->mb_to_right_edge < 0)
+    max_blocks_wide += xd->mb_to_right_edge >> (5 + pd->subsampling_x);
+
+  if (blk_row >= max_blocks_high || blk_col >= max_blocks_wide) return;
+
+  plane_tx_size = plane ? get_uv_tx_size_impl(
+                              mbmi->inter_tx_size[tx_row][tx_col], bsize, 0, 0)
+                        : mbmi->inter_tx_size[tx_row][tx_col];
+
+  if (tx_size == plane_tx_size) {
+    int coeff_ctx, i;
+    ENTROPY_CONTEXT *ta = above_ctx + blk_col;
+    ENTROPY_CONTEXT *tl = left_ctx + blk_row;
+    switch (tx_size) {
+      case TX_4X4: break;
+      case TX_8X8:
+        ta[0] = !!*(const uint16_t *)&ta[0];
+        tl[0] = !!*(const uint16_t *)&tl[0];
+        break;
+      case TX_16X16:
+        ta[0] = !!*(const uint32_t *)&ta[0];
+        tl[0] = !!*(const uint32_t *)&tl[0];
+        break;
+      case TX_32X32:
+        ta[0] = !!*(const uint64_t *)&ta[0];
+        tl[0] = !!*(const uint64_t *)&tl[0];
+        break;
+      default: assert(0 && "Invalid transform size."); break;
+    }
+    coeff_ctx = combine_entropy_contexts(ta[0], tl[0]);
+    vp10_tx_block_rd_b(cpi, x, tx_size, blk_row, blk_col, plane, block,
+                       plane_bsize, coeff_ctx, rate, dist, bsse, skip);
+    for (i = 0; i < num_4x4_blocks_wide_txsize_lookup[tx_size]; ++i)
+      ta[i] = !(p->eobs[block] == 0);
+    for (i = 0; i < num_4x4_blocks_high_txsize_lookup[tx_size]; ++i)
+      tl[i] = !(p->eobs[block] == 0);
+  } else {
+    int bsl = b_width_log2_lookup[bsize];
+    int step = num_4x4_blocks_txsize_lookup[tx_size - 1];
+    int i;
+
+    assert(bsl > 0);
+    --bsl;
+
+    for (i = 0; i < 4; ++i) {
+      int offsetr = (i >> 1) << bsl;
+      int offsetc = (i & 0x01) << bsl;
+      tx_block_rd(cpi, x, blk_row + offsetr, blk_col + offsetc, plane,
+                  block + i * step, tx_size - 1, plane_bsize, above_ctx,
+                  left_ctx, rate, dist, bsse, skip);
+    }
+  }
+}
+
+// Return value 0: early termination triggered, no valid rd cost available;
+//              1: rd cost values are valid.
+static int inter_block_uvrd(const VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+                            int64_t *distortion, int *skippable, int64_t *sse,
+                            BLOCK_SIZE bsize, int64_t ref_best_rd) {
+  MACROBLOCKD *const xd = &x->e_mbd;
+  MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
+  int plane;
+  int is_cost_valid = 1;
+  int64_t this_rd;
+
+  if (ref_best_rd < 0) is_cost_valid = 0;
+
+  if (is_inter_block(mbmi) && is_cost_valid) {
+    int plane;
+    for (plane = 1; plane < MAX_MB_PLANE; ++plane)
+      vp10_subtract_plane(x, bsize, plane);
+  }
+
+  *rate = 0;
+  *distortion = 0;
+  *sse = 0;
+  *skippable = 1;
+
+  for (plane = 1; plane < MAX_MB_PLANE; ++plane) {
+    const struct macroblockd_plane *const pd = &xd->plane[plane];
+    const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd);
+    const int mi_width = num_4x4_blocks_wide_lookup[plane_bsize];
+    const int mi_height = num_4x4_blocks_high_lookup[plane_bsize];
+    BLOCK_SIZE txb_size = txsize_to_bsize[max_txsize_lookup[plane_bsize]];
+    int bh = num_4x4_blocks_wide_lookup[txb_size];
+    int idx, idy;
+    int block = 0;
+    int step = 1 << (max_txsize_lookup[plane_bsize] * 2);
+    int pnrate = 0, pnskip = 1;
+    int64_t pndist = 0, pnsse = 0;
+    ENTROPY_CONTEXT ta[2 * MAX_MIB_SIZE];
+    ENTROPY_CONTEXT tl[2 * MAX_MIB_SIZE];
+
+    vp10_get_entropy_contexts(bsize, TX_4X4, pd, ta, tl);
+
+    for (idy = 0; idy < mi_height; idy += bh) {
+      for (idx = 0; idx < mi_width; idx += bh) {
+        tx_block_rd(cpi, x, idy, idx, plane, block,
+                    max_txsize_lookup[plane_bsize], plane_bsize, ta, tl,
+                    &pnrate, &pndist, &pnsse, &pnskip);
+        block += step;
+      }
+    }
+
+    if (pnrate == INT_MAX) {
+      is_cost_valid = 0;
+      break;
+    }
+
+    *rate += pnrate;
+    *distortion += pndist;
+    *sse += pnsse;
+    *skippable &= pnskip;
+
+    this_rd = VPXMIN(RDCOST(x->rdmult, x->rddiv, *rate, *distortion),
+                     RDCOST(x->rdmult, x->rddiv, 0, *sse));
+
+    if (this_rd > ref_best_rd) {
+      is_cost_valid = 0;
+      break;
+    }
+  }
+
+  if (!is_cost_valid) {
+    // reset cost value
+    *rate = INT_MAX;
+    *distortion = INT64_MAX;
+    *sse = INT64_MAX;
+    *skippable = 0;
+  }
+
+  return is_cost_valid;
+}
+#endif  // CONFIG_VAR_TX
+
+// Return value 0: early termination triggered, no valid rd cost available;
+//              1: rd cost values are valid.
+static int super_block_uvrd(const VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+                            int64_t *distortion, int *skippable, int64_t *sse,
+                            BLOCK_SIZE bsize, int64_t ref_best_rd) {
+  MACROBLOCKD *const xd = &x->e_mbd;
+  MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
+  const TX_SIZE uv_tx_size = get_uv_tx_size(mbmi, &xd->plane[1]);
+  int plane;
+  int pnrate = 0, pnskip = 1;
+  int64_t pndist = 0, pnsse = 0;
+  int is_cost_valid = 1;
+
+  if (ref_best_rd < 0) is_cost_valid = 0;
+
+  if (is_inter_block(mbmi) && is_cost_valid) {
+    int plane;
+    for (plane = 1; plane < MAX_MB_PLANE; ++plane)
+      vp10_subtract_plane(x, bsize, plane);
+  }
+
+  *rate = 0;
+  *distortion = 0;
+  *sse = 0;
+  *skippable = 1;
+
+  for (plane = 1; plane < MAX_MB_PLANE; ++plane) {
+    txfm_rd_in_plane(x, cpi, &pnrate, &pndist, &pnskip, &pnsse, ref_best_rd,
+                     plane, bsize, uv_tx_size, cpi->sf.use_fast_coef_costing);
+    if (pnrate == INT_MAX) {
+      is_cost_valid = 0;
+      break;
+    }
+    *rate += pnrate;
+    *distortion += pndist;
+    *sse += pnsse;
+    *skippable &= pnskip;
+    if (RDCOST(x->rdmult, x->rddiv, *rate, *distortion) > ref_best_rd &&
+        RDCOST(x->rdmult, x->rddiv, 0, *sse) > ref_best_rd) {
+      is_cost_valid = 0;
+      break;
+    }
+  }
+
+  if (!is_cost_valid) {
+    // reset cost value
+    *rate = INT_MAX;
+    *distortion = INT64_MAX;
+    *sse = INT64_MAX;
+    *skippable = 0;
+  }
+
+  return is_cost_valid;
+}
+
+static void rd_pick_palette_intra_sbuv(
+    VP10_COMP *cpi, MACROBLOCK *x, int dc_mode_cost,
+    PALETTE_MODE_INFO *palette_mode_info, uint8_t *best_palette_color_map,
+    PREDICTION_MODE *mode_selected, int64_t *best_rd, int *rate,
+    int *rate_tokenonly, int64_t *distortion, int *skippable) {
+  MACROBLOCKD *const xd = &x->e_mbd;
+  MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
+  const BLOCK_SIZE bsize = mbmi->sb_type;
+  const int rows =
+      (4 * num_4x4_blocks_high_lookup[bsize]) >> (xd->plane[1].subsampling_y);
+  const int cols =
+      (4 * num_4x4_blocks_wide_lookup[bsize]) >> (xd->plane[1].subsampling_x);
+  int this_rate, this_rate_tokenonly, s;
+  int64_t this_distortion, this_rd;
+  int colors_u, colors_v, colors;
+  const int src_stride = x->plane[1].src.stride;
+  const uint8_t *const src_u = x->plane[1].src.buf;
+  const uint8_t *const src_v = x->plane[2].src.buf;
+
+  if (rows * cols > PALETTE_MAX_BLOCK_SIZE) return;
+
+#if CONFIG_EXT_INTRA
+  mbmi->ext_intra_mode_info.use_ext_intra_mode[1] = 0;
+#endif  // CONFIG_EXT_INTRA
+
+#if CONFIG_VP9_HIGHBITDEPTH
+  if (cpi->common.use_highbitdepth) {
+    colors_u = vp10_count_colors_highbd(src_u, src_stride, rows, cols,
+                                        cpi->common.bit_depth);
+    colors_v = vp10_count_colors_highbd(src_v, src_stride, rows, cols,
+                                        cpi->common.bit_depth);
+  } else {
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+    colors_u = vp10_count_colors(src_u, src_stride, rows, cols);
+    colors_v = vp10_count_colors(src_v, src_stride, rows, cols);
+#if CONFIG_VP9_HIGHBITDEPTH
+  }
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+  colors = colors_u > colors_v ? colors_u : colors_v;
+  if (colors > 1 && colors <= 64) {
+    int r, c, n, i, j;
+    const int max_itr = 50;
+    int color_ctx, color_idx = 0;
+    int color_order[PALETTE_MAX_SIZE];
+    int64_t this_sse;
+    float lb_u, ub_u, val_u;
+    float lb_v, ub_v, val_v;
+    float *const data = x->palette_buffer->kmeans_data_buf;
+    float centroids[2 * PALETTE_MAX_SIZE];
+    uint8_t *const color_map = xd->plane[1].color_index_map;
+    PALETTE_MODE_INFO *const pmi = &mbmi->palette_mode_info;
+
+#if CONFIG_VP9_HIGHBITDEPTH
+    uint16_t *src_u16 = CONVERT_TO_SHORTPTR(src_u);
+    uint16_t *src_v16 = CONVERT_TO_SHORTPTR(src_v);
+    if (cpi->common.use_highbitdepth) {
+      lb_u = src_u16[0];
+      ub_u = src_u16[0];
+      lb_v = src_v16[0];
+      ub_v = src_v16[0];
+    } else {
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+      lb_u = src_u[0];
+      ub_u = src_u[0];
+      lb_v = src_v[0];
+      ub_v = src_v[0];
+#if CONFIG_VP9_HIGHBITDEPTH
+    }
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+    mbmi->uv_mode = DC_PRED;
+#if CONFIG_EXT_INTRA
+    mbmi->ext_intra_mode_info.use_ext_intra_mode[1] = 0;
+#endif  // CONFIG_EXT_INTRA
+    for (r = 0; r < rows; ++r) {
+      for (c = 0; c < cols; ++c) {
+#if CONFIG_VP9_HIGHBITDEPTH
+        if (cpi->common.use_highbitdepth) {
+          val_u = src_u16[r * src_stride + c];
+          val_v = src_v16[r * src_stride + c];
+          data[(r * cols + c) * 2] = val_u;
+          data[(r * cols + c) * 2 + 1] = val_v;
+        } else {
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+          val_u = src_u[r * src_stride + c];
+          val_v = src_v[r * src_stride + c];
+          data[(r * cols + c) * 2] = val_u;
+          data[(r * cols + c) * 2 + 1] = val_v;
+#if CONFIG_VP9_HIGHBITDEPTH
+        }
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+        if (val_u < lb_u)
+          lb_u = val_u;
+        else if (val_u > ub_u)
+          ub_u = val_u;
+        if (val_v < lb_v)
+          lb_v = val_v;
+        else if (val_v > ub_v)
+          ub_v = val_v;
+      }
+    }
+
+    for (n = colors > PALETTE_MAX_SIZE ? PALETTE_MAX_SIZE : colors; n >= 2;
+         --n) {
+      for (i = 0; i < n; ++i) {
+        centroids[i * 2] = lb_u + (2 * i + 1) * (ub_u - lb_u) / n / 2;
+        centroids[i * 2 + 1] = lb_v + (2 * i + 1) * (ub_v - lb_v) / n / 2;
+      }
+      vp10_k_means(data, centroids, color_map, rows * cols, n, 2, max_itr);
+      pmi->palette_size[1] = n;
+      for (i = 1; i < 3; ++i) {
+        for (j = 0; j < n; ++j) {
+#if CONFIG_VP9_HIGHBITDEPTH
+          if (cpi->common.use_highbitdepth)
+            pmi->palette_colors[i * PALETTE_MAX_SIZE + j] = clip_pixel_highbd(
+                (int)centroids[j * 2 + i - 1], cpi->common.bit_depth);
+          else
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+            pmi->palette_colors[i * PALETTE_MAX_SIZE + j] =
+                clip_pixel((int)centroids[j * 2 + i - 1]);
+        }
+      }
+
+      super_block_uvrd(cpi, x, &this_rate_tokenonly, &this_distortion, &s,
+                       &this_sse, bsize, *best_rd);
+      if (this_rate_tokenonly == INT_MAX) continue;
+      this_rate =
+          this_rate_tokenonly + dc_mode_cost +
+          2 * cpi->common.bit_depth * n * vp10_cost_bit(128, 0) +
+          cpi->palette_uv_size_cost[bsize - BLOCK_8X8][n - 2] +
+          write_uniform_cost(n, color_map[0]) +
+          vp10_cost_bit(
+              vp10_default_palette_uv_mode_prob[pmi->palette_size[0] > 0], 1);
+
+      for (i = 0; i < rows; ++i) {
+        for (j = (i == 0 ? 1 : 0); j < cols; ++j) {
+          color_ctx = vp10_get_palette_color_context(color_map, cols, i, j, n,
+                                                     color_order);
+          for (r = 0; r < n; ++r)
+            if (color_map[i * cols + j] == color_order[r]) {
+              color_idx = r;
+              break;
+            }
+          assert(color_idx >= 0 && color_idx < n);
+          this_rate += cpi->palette_uv_color_cost[n - 2][color_ctx][color_idx];
+        }
+      }
+
+      this_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_distortion);
+      if (this_rd < *best_rd) {
+        *best_rd = this_rd;
+        *palette_mode_info = *pmi;
+        memcpy(best_palette_color_map, color_map,
+               rows * cols * sizeof(best_palette_color_map[0]));
+        *mode_selected = DC_PRED;
+        *rate = this_rate;
+        *distortion = this_distortion;
+        *rate_tokenonly = this_rate_tokenonly;
+        *skippable = s;
+      }
+    }
+  }
+}
+
+#if CONFIG_EXT_INTRA
+// Return 1 if an ext intra mode is selected; return 0 otherwise.
+static int rd_pick_ext_intra_sbuv(VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+                                  int *rate_tokenonly, int64_t *distortion,
+                                  int *skippable, BLOCK_SIZE bsize,
+                                  int64_t *best_rd) {
+  MACROBLOCKD *const xd = &x->e_mbd;
+  MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
+  int ext_intra_selected_flag = 0;
+  int this_rate_tokenonly, this_rate, s;
+  int64_t this_distortion, this_sse, this_rd;
+  EXT_INTRA_MODE mode;
+  EXT_INTRA_MODE_INFO ext_intra_mode_info;
+
+  vp10_zero(ext_intra_mode_info);
+  mbmi->ext_intra_mode_info.use_ext_intra_mode[1] = 1;
+  mbmi->uv_mode = DC_PRED;
+  mbmi->palette_mode_info.palette_size[1] = 0;
+
+  for (mode = 0; mode < FILTER_INTRA_MODES; ++mode) {
+    mbmi->ext_intra_mode_info.ext_intra_mode[1] = mode;
+    if (!super_block_uvrd(cpi, x, &this_rate_tokenonly, &this_distortion, &s,
+                          &this_sse, bsize, *best_rd))
+      continue;
+
+    this_rate = this_rate_tokenonly +
+                vp10_cost_bit(cpi->common.fc->ext_intra_probs[1], 1) +
+                cpi->intra_uv_mode_cost[mbmi->mode][mbmi->uv_mode] +
+                write_uniform_cost(FILTER_INTRA_MODES, mode);
+    this_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_distortion);
+    if (this_rd < *best_rd) {
+      *best_rd = this_rd;
+      *rate = this_rate;
+      *rate_tokenonly = this_rate_tokenonly;
+      *distortion = this_distortion;
+      *skippable = s;
+      ext_intra_mode_info = mbmi->ext_intra_mode_info;
+      ext_intra_selected_flag = 1;
+    }
+  }
+
+  if (ext_intra_selected_flag) {
+    mbmi->uv_mode = DC_PRED;
+    mbmi->ext_intra_mode_info.use_ext_intra_mode[1] =
+        ext_intra_mode_info.use_ext_intra_mode[1];
+    mbmi->ext_intra_mode_info.ext_intra_mode[1] =
+        ext_intra_mode_info.ext_intra_mode[1];
+    return 1;
+  } else {
+    return 0;
+  }
+}
+
+static void pick_intra_angle_routine_sbuv(VP10_COMP *cpi, MACROBLOCK *x,
+                                          int *rate, int *rate_tokenonly,
+                                          int64_t *distortion, int *skippable,
+                                          int *best_angle_delta,
+                                          BLOCK_SIZE bsize, int rate_overhead,
+                                          int64_t *best_rd) {
+  MB_MODE_INFO *mbmi = &x->e_mbd.mi[0]->mbmi;
+  int this_rate_tokenonly, this_rate, s;
+  int64_t this_distortion, this_sse, this_rd;
+
+  if (!super_block_uvrd(cpi, x, &this_rate_tokenonly, &this_distortion, &s,
+                        &this_sse, bsize, *best_rd))
+    return;
+
+  this_rate = this_rate_tokenonly + rate_overhead;
+  this_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_distortion);
+  if (this_rd < *best_rd) {
+    *best_rd = this_rd;
+    *best_angle_delta = mbmi->angle_delta[1];
+    *rate = this_rate;
+    *rate_tokenonly = this_rate_tokenonly;
+    *distortion = this_distortion;
+    *skippable = s;
+  }
+}
+
+static int rd_pick_intra_angle_sbuv(VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+                                    int *rate_tokenonly, int64_t *distortion,
+                                    int *skippable, BLOCK_SIZE bsize,
+                                    int rate_overhead, int64_t best_rd) {
+  MACROBLOCKD *const xd = &x->e_mbd;
+  MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
+  int this_rate_tokenonly, this_rate, s;
+  int64_t this_distortion, this_sse, this_rd;
+  int angle_delta, best_angle_delta = 0;
+  const double rd_adjust = 1.2;
+
+  *rate_tokenonly = INT_MAX;
+  if (ANGLE_FAST_SEARCH) {
+    int deltas_level1[3] = { 0, -2, 2 };
+    int deltas_level2[3][2] = {
+      { -1, 1 }, { -3, -1 }, { 1, 3 },
+    };
+    const int level1 = 3, level2 = 2;
+    int i, j, best_i = -1;
+
+    for (i = 0; i < level1; ++i) {
+      int64_t tmp_best_rd;
+      mbmi->angle_delta[1] = deltas_level1[i];
+      tmp_best_rd = (i == 0 && best_rd < INT64_MAX)
+                        ? (int64_t)(best_rd * rd_adjust)
+                        : best_rd;
+      if (!super_block_uvrd(cpi, x, &this_rate_tokenonly, &this_distortion, &s,
+                            &this_sse, bsize, tmp_best_rd)) {
+        if (i == 0)
+          break;
+        else
+          continue;
+      }
+      this_rate = this_rate_tokenonly + rate_overhead;
+      this_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_distortion);
+      if (i == 0 && best_rd < INT64_MAX && this_rd > best_rd * rd_adjust) break;
+      if (this_rd < best_rd) {
+        best_i = i;
+        best_rd = this_rd;
+        best_angle_delta = mbmi->angle_delta[1];
+        *rate = this_rate;
+        *rate_tokenonly = this_rate_tokenonly;
+        *distortion = this_distortion;
+        *skippable = s;
+      }
+    }
+
+    if (best_i >= 0) {
+      for (j = 0; j < level2; ++j) {
+        mbmi->angle_delta[1] = deltas_level2[best_i][j];
+        pick_intra_angle_routine_sbuv(cpi, x, rate, rate_tokenonly, distortion,
+                                      skippable, &best_angle_delta, bsize,
+                                      rate_overhead, &best_rd);
+      }
+    }
+  } else {
+    for (angle_delta = -MAX_ANGLE_DELTAS; angle_delta <= MAX_ANGLE_DELTAS;
+         ++angle_delta) {
+      mbmi->angle_delta[1] = angle_delta;
+      pick_intra_angle_routine_sbuv(cpi, x, rate, rate_tokenonly, distortion,
+                                    skippable, &best_angle_delta, bsize,
+                                    rate_overhead, &best_rd);
+    }
+  }
+
+  mbmi->angle_delta[1] = best_angle_delta;
+  return *rate_tokenonly != INT_MAX;
+}
+#endif  // CONFIG_EXT_INTRA
+
+static int64_t rd_pick_intra_sbuv_mode(VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+                                       int *rate_tokenonly, int64_t *distortion,
+                                       int *skippable, BLOCK_SIZE bsize,
+                                       TX_SIZE max_tx_size) {
+  MACROBLOCKD *xd = &x->e_mbd;
+  MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
+  PREDICTION_MODE mode;
+  PREDICTION_MODE mode_selected = DC_PRED;
+  int64_t best_rd = INT64_MAX, this_rd;
+  int this_rate_tokenonly, this_rate, s;
+  int64_t this_distortion, this_sse;
+  const int rows =
+      (4 * num_4x4_blocks_high_lookup[bsize]) >> (xd->plane[1].subsampling_y);
+  const int cols =
+      (4 * num_4x4_blocks_wide_lookup[bsize]) >> (xd->plane[1].subsampling_x);
+  PALETTE_MODE_INFO palette_mode_info;
+  PALETTE_MODE_INFO *const pmi = &xd->mi[0]->mbmi.palette_mode_info;
+  uint8_t *best_palette_color_map = NULL;
+#if CONFIG_EXT_INTRA
+  int is_directional_mode, rate_overhead, best_angle_delta = 0;
+  EXT_INTRA_MODE_INFO ext_intra_mode_info;
+
+  ext_intra_mode_info.use_ext_intra_mode[1] = 0;
+  mbmi->ext_intra_mode_info.use_ext_intra_mode[1] = 0;
+#endif  // CONFIG_EXT_INTRA
+  palette_mode_info.palette_size[1] = 0;
+  pmi->palette_size[1] = 0;
+  for (mode = DC_PRED; mode <= TM_PRED; ++mode) {
+    if (!(cpi->sf.intra_uv_mode_mask[max_tx_size] & (1 << mode))) continue;
+
+    mbmi->uv_mode = mode;
+#if CONFIG_EXT_INTRA
+    is_directional_mode = (mode != DC_PRED && mode != TM_PRED);
+    rate_overhead = cpi->intra_uv_mode_cost[mbmi->mode][mode] +
+                    write_uniform_cost(2 * MAX_ANGLE_DELTAS + 1, 0);
+    mbmi->angle_delta[1] = 0;
+    if (mbmi->sb_type >= BLOCK_8X8 && is_directional_mode) {
+      if (!rd_pick_intra_angle_sbuv(cpi, x, &this_rate, &this_rate_tokenonly,
+                                    &this_distortion, &s, bsize, rate_overhead,
+                                    best_rd))
+        continue;
+    } else {
+      if (!super_block_uvrd(cpi, x, &this_rate_tokenonly, &this_distortion, &s,
+                            &this_sse, bsize, best_rd))
+        continue;
+    }
+    this_rate = this_rate_tokenonly + cpi->intra_uv_mode_cost[mbmi->mode][mode];
+    if (mbmi->sb_type >= BLOCK_8X8 && is_directional_mode)
+      this_rate += write_uniform_cost(2 * MAX_ANGLE_DELTAS + 1,
+                                      MAX_ANGLE_DELTAS + mbmi->angle_delta[1]);
+    if (mbmi->sb_type >= BLOCK_8X8 && mode == DC_PRED &&
+        ALLOW_FILTER_INTRA_MODES)
+      this_rate += vp10_cost_bit(cpi->common.fc->ext_intra_probs[1], 0);
+#else
+    if (!super_block_uvrd(cpi, x, &this_rate_tokenonly, &this_distortion, &s,
+                          &this_sse, bsize, best_rd))
+      continue;
+    this_rate = this_rate_tokenonly + cpi->intra_uv_mode_cost[mbmi->mode][mode];
+#endif  // CONFIG_EXT_INTRA
+    if (cpi->common.allow_screen_content_tools && mbmi->sb_type >= BLOCK_8X8 &&
+        mode == DC_PRED)
+      this_rate += vp10_cost_bit(
+          vp10_default_palette_uv_mode_prob[pmi->palette_size[0] > 0], 0);
+
+    this_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_distortion);
+
+    if (this_rd < best_rd) {
+      mode_selected = mode;
+#if CONFIG_EXT_INTRA
+      best_angle_delta = mbmi->angle_delta[1];
+#endif  // CONFIG_EXT_INTRA
+      best_rd = this_rd;
+      *rate = this_rate;
+      *rate_tokenonly = this_rate_tokenonly;
+      *distortion = this_distortion;
+      *skippable = s;
+    }
+  }
+
+  if (cpi->common.allow_screen_content_tools && mbmi->sb_type >= BLOCK_8X8) {
+    best_palette_color_map = x->palette_buffer->best_palette_color_map;
+    rd_pick_palette_intra_sbuv(
+        cpi, x, cpi->intra_uv_mode_cost[mbmi->mode][DC_PRED],
+        &palette_mode_info, best_palette_color_map, &mode_selected, &best_rd,
+        rate, rate_tokenonly, distortion, skippable);
+  }
+
+#if CONFIG_EXT_INTRA
+  if (mbmi->sb_type >= BLOCK_8X8 && ALLOW_FILTER_INTRA_MODES) {
+    if (rd_pick_ext_intra_sbuv(cpi, x, rate, rate_tokenonly, distortion,
+                               skippable, bsize, &best_rd)) {
+      mode_selected = mbmi->uv_mode;
+      ext_intra_mode_info = mbmi->ext_intra_mode_info;
+    }
+  }
+
+  mbmi->ext_intra_mode_info.use_ext_intra_mode[1] =
+      ext_intra_mode_info.use_ext_intra_mode[1];
+  if (ext_intra_mode_info.use_ext_intra_mode[1]) {
+    mbmi->ext_intra_mode_info.ext_intra_mode[1] =
+        ext_intra_mode_info.ext_intra_mode[1];
+    palette_mode_info.palette_size[1] = 0;
+  }
+  mbmi->angle_delta[1] = best_angle_delta;
+#endif  // CONFIG_EXT_INTRA
+  mbmi->uv_mode = mode_selected;
+  pmi->palette_size[1] = palette_mode_info.palette_size[1];
+  if (palette_mode_info.palette_size[1] > 0) {
+    memcpy(pmi->palette_colors + PALETTE_MAX_SIZE,
+           palette_mode_info.palette_colors + PALETTE_MAX_SIZE,
+           2 * PALETTE_MAX_SIZE * sizeof(palette_mode_info.palette_colors[0]));
+    memcpy(xd->plane[1].color_index_map, best_palette_color_map,
+           rows * cols * sizeof(best_palette_color_map[0]));
+  }
+
+  return best_rd;
+}
+
+static int64_t rd_sbuv_dcpred(const VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+                              int *rate_tokenonly, int64_t *distortion,
+                              int *skippable, BLOCK_SIZE bsize) {
+  int64_t unused;
+
+  x->e_mbd.mi[0]->mbmi.uv_mode = DC_PRED;
+  super_block_uvrd(cpi, x, rate_tokenonly, distortion, skippable, &unused,
+                   bsize, INT64_MAX);
+  *rate = *rate_tokenonly +
+          cpi->intra_uv_mode_cost[x->e_mbd.mi[0]->mbmi.mode][DC_PRED];
+  return RDCOST(x->rdmult, x->rddiv, *rate, *distortion);
+}
+
+static void choose_intra_uv_mode(VP10_COMP *cpi, MACROBLOCK *const x,
+                                 PICK_MODE_CONTEXT *ctx, BLOCK_SIZE bsize,
+                                 TX_SIZE max_tx_size, int *rate_uv,
+                                 int *rate_uv_tokenonly, int64_t *dist_uv,
+                                 int *skip_uv, PREDICTION_MODE *mode_uv) {
+  // Use an estimated rd for uv_intra based on DC_PRED if the
+  // appropriate speed flag is set.
+  if (cpi->sf.use_uv_intra_rd_estimate) {
+    rd_sbuv_dcpred(cpi, x, rate_uv, rate_uv_tokenonly, dist_uv, skip_uv,
+                   bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize);
+    // Else do a proper rd search for each possible transform size that may
+    // be considered in the main rd loop.
+  } else {
+    (void)ctx;
+    rd_pick_intra_sbuv_mode(cpi, x, rate_uv, rate_uv_tokenonly, dist_uv,
+                            skip_uv, bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize,
+                            max_tx_size);
+  }
+  *mode_uv = x->e_mbd.mi[0]->mbmi.uv_mode;
+}
+
+static int cost_mv_ref(const VP10_COMP *cpi, PREDICTION_MODE mode,
+#if CONFIG_REF_MV && CONFIG_EXT_INTER
+                       int is_compound,
+#endif  // CONFIG_REF_MV && CONFIG_EXT_INTER
+                       int16_t mode_context) {
+#if CONFIG_REF_MV
+  int mode_cost = 0;
+#if CONFIG_EXT_INTER
+  int16_t mode_ctx =
+      is_compound ? mode_context : (mode_context & NEWMV_CTX_MASK);
+#else
+  int16_t mode_ctx = mode_context & NEWMV_CTX_MASK;
+#endif  // CONFIG_EXT_INTER
+  int16_t is_all_zero_mv = mode_context & (1 << ALL_ZERO_FLAG_OFFSET);
+
+  assert(is_inter_mode(mode));
+
+#if CONFIG_EXT_INTER
+  if (is_compound) {
+    return cpi
+        ->inter_compound_mode_cost[mode_context][INTER_COMPOUND_OFFSET(mode)];
+  } else {
+    if (mode == NEWMV || mode == NEWFROMNEARMV) {
+#else
+  if (mode == NEWMV) {
+#endif  // CONFIG_EXT_INTER
+      mode_cost = cpi->newmv_mode_cost[mode_ctx][0];
+#if CONFIG_EXT_INTER
+      if (!is_compound)
+        mode_cost += cpi->new2mv_mode_cost[mode == NEWFROMNEARMV];
+#endif  // CONFIG_EXT_INTER
+      return mode_cost;
+    } else {
+      mode_cost = cpi->newmv_mode_cost[mode_ctx][1];
+      mode_ctx = (mode_context >> ZEROMV_OFFSET) & ZEROMV_CTX_MASK;
+
+      if (is_all_zero_mv) return mode_cost;
+
+      if (mode == ZEROMV) {
+        mode_cost += cpi->zeromv_mode_cost[mode_ctx][0];
+        return mode_cost;
+      } else {
+        mode_cost += cpi->zeromv_mode_cost[mode_ctx][1];
+        mode_ctx = (mode_context >> REFMV_OFFSET) & REFMV_CTX_MASK;
+
+        if (mode_context & (1 << SKIP_NEARESTMV_OFFSET)) mode_ctx = 6;
+        if (mode_context & (1 << SKIP_NEARMV_OFFSET)) mode_ctx = 7;
+        if (mode_context & (1 << SKIP_NEARESTMV_SUB8X8_OFFSET)) mode_ctx = 8;
+
+        mode_cost += cpi->refmv_mode_cost[mode_ctx][mode != NEARESTMV];
+        return mode_cost;
+      }
+    }
+#if CONFIG_EXT_INTER
+  }
+#endif  // CONFIG_EXT_INTER
+#else
+  assert(is_inter_mode(mode));
+#if CONFIG_EXT_INTER
+  if (is_inter_compound_mode(mode)) {
+    return cpi
+        ->inter_compound_mode_cost[mode_context][INTER_COMPOUND_OFFSET(mode)];
+  } else {
+#endif  // CONFIG_EXT_INTER
+    return cpi->inter_mode_cost[mode_context][INTER_OFFSET(mode)];
+#if CONFIG_EXT_INTER
+  }
+#endif  // CONFIG_EXT_INTER
+#endif
+}
+
+static int set_and_cost_bmi_mvs(
+    VP10_COMP *cpi, MACROBLOCK *x, MACROBLOCKD *xd, int i, PREDICTION_MODE mode,
+    int_mv this_mv[2], int_mv frame_mv[MB_MODE_COUNT][TOTAL_REFS_PER_FRAME],
+    int_mv seg_mvs[TOTAL_REFS_PER_FRAME],
+#if CONFIG_EXT_INTER
+    int_mv compound_seg_newmvs[2],
+#endif  // CONFIG_EXT_INTER
+    int_mv *best_ref_mv[2], const int *mvjcost, int *mvcost[2]) {
+  MODE_INFO *const mic = xd->mi[0];
+  const MB_MODE_INFO *const mbmi = &mic->mbmi;
+  const MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
+  int thismvcost = 0;
+  int idx, idy;
+  const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[mbmi->sb_type];
+  const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[mbmi->sb_type];
+  const int is_compound = has_second_ref(mbmi);
+  int mode_ctx = mbmi_ext->mode_context[mbmi->ref_frame[0]];
+
+  switch (mode) {
+    case NEWMV:
+#if CONFIG_EXT_INTER
+    case NEWFROMNEARMV:
+#endif  // CONFIG_EXT_INTER
+      this_mv[0].as_int = seg_mvs[mbmi->ref_frame[0]].as_int;
+#if CONFIG_EXT_INTER
+      if (!cpi->common.allow_high_precision_mv ||
+          !vp10_use_mv_hp(&best_ref_mv[0]->as_mv))
+        lower_mv_precision(&this_mv[0].as_mv, 0);
+#endif  // CONFIG_EXT_INTER
+
+#if CONFIG_REF_MV
+      for (idx = 0; idx < 1 + is_compound; ++idx) {
+        this_mv[idx] = seg_mvs[mbmi->ref_frame[idx]];
+        vp10_set_mvcost(x, mbmi->ref_frame[idx]);
+        thismvcost +=
+            vp10_mv_bit_cost(&this_mv[idx].as_mv, &best_ref_mv[idx]->as_mv,
+                             x->nmvjointcost, x->mvcost, MV_COST_WEIGHT_SUB);
+      }
+      (void)mvjcost;
+      (void)mvcost;
+#else
+      thismvcost += vp10_mv_bit_cost(&this_mv[0].as_mv, &best_ref_mv[0]->as_mv,
+                                     mvjcost, mvcost, MV_COST_WEIGHT_SUB);
+#if !CONFIG_EXT_INTER
+      if (is_compound) {
+        this_mv[1].as_int = seg_mvs[mbmi->ref_frame[1]].as_int;
+        thismvcost +=
+            vp10_mv_bit_cost(&this_mv[1].as_mv, &best_ref_mv[1]->as_mv, mvjcost,
+                             mvcost, MV_COST_WEIGHT_SUB);
+      }
+#endif  // !CONFIG_EXT_INTER
+#endif
+      break;
+    case NEARMV:
+    case NEARESTMV:
+      this_mv[0].as_int = frame_mv[mode][mbmi->ref_frame[0]].as_int;
+      if (is_compound)
+        this_mv[1].as_int = frame_mv[mode][mbmi->ref_frame[1]].as_int;
+      break;
+    case ZEROMV:
+      this_mv[0].as_int = 0;
+      if (is_compound) this_mv[1].as_int = 0;
+      break;
+#if CONFIG_EXT_INTER
+    case NEW_NEWMV:
+      if (compound_seg_newmvs[0].as_int == INVALID_MV ||
+          compound_seg_newmvs[1].as_int == INVALID_MV) {
+        this_mv[0].as_int = seg_mvs[mbmi->ref_frame[0]].as_int;
+        this_mv[1].as_int = seg_mvs[mbmi->ref_frame[1]].as_int;
+      } else {
+        this_mv[0].as_int = compound_seg_newmvs[0].as_int;
+        this_mv[1].as_int = compound_seg_newmvs[1].as_int;
+      }
+      if (!cpi->common.allow_high_precision_mv ||
+          !vp10_use_mv_hp(&best_ref_mv[0]->as_mv))
+        lower_mv_precision(&this_mv[0].as_mv, 0);
+      if (!cpi->common.allow_high_precision_mv ||
+          !vp10_use_mv_hp(&best_ref_mv[1]->as_mv))
+        lower_mv_precision(&this_mv[1].as_mv, 0);
+      thismvcost += vp10_mv_bit_cost(&this_mv[0].as_mv, &best_ref_mv[0]->as_mv,
+                                     mvjcost, mvcost, MV_COST_WEIGHT_SUB);
+      thismvcost += vp10_mv_bit_cost(&this_mv[1].as_mv, &best_ref_mv[1]->as_mv,
+                                     mvjcost, mvcost, MV_COST_WEIGHT_SUB);
+      break;
+    case NEW_NEARMV:
+    case NEW_NEARESTMV:
+      this_mv[0].as_int = seg_mvs[mbmi->ref_frame[0]].as_int;
+      if (!cpi->common.allow_high_precision_mv ||
+          !vp10_use_mv_hp(&best_ref_mv[0]->as_mv))
+        lower_mv_precision(&this_mv[0].as_mv, 0);
+      thismvcost += vp10_mv_bit_cost(&this_mv[0].as_mv, &best_ref_mv[0]->as_mv,
+                                     mvjcost, mvcost, MV_COST_WEIGHT_SUB);
+      this_mv[1].as_int = frame_mv[mode][mbmi->ref_frame[1]].as_int;
+      break;
+    case NEAR_NEWMV:
+    case NEAREST_NEWMV:
+      this_mv[0].as_int = frame_mv[mode][mbmi->ref_frame[0]].as_int;
+      this_mv[1].as_int = seg_mvs[mbmi->ref_frame[1]].as_int;
+      if (!cpi->common.allow_high_precision_mv ||
+          !vp10_use_mv_hp(&best_ref_mv[1]->as_mv))
+        lower_mv_precision(&this_mv[1].as_mv, 0);
+      thismvcost += vp10_mv_bit_cost(&this_mv[1].as_mv, &best_ref_mv[1]->as_mv,
+                                     mvjcost, mvcost, MV_COST_WEIGHT_SUB);
+      break;
+    case NEAREST_NEARMV:
+    case NEAR_NEARESTMV:
+    case NEAREST_NEARESTMV:
+    case NEAR_NEARMV:
+      this_mv[0].as_int = frame_mv[mode][mbmi->ref_frame[0]].as_int;
+      this_mv[1].as_int = frame_mv[mode][mbmi->ref_frame[1]].as_int;
+      break;
+    case ZERO_ZEROMV:
+      this_mv[0].as_int = 0;
+      this_mv[1].as_int = 0;
+      break;
+#endif  // CONFIG_EXT_INTER
+    default: break;
+  }
+
+  mic->bmi[i].as_mv[0].as_int = this_mv[0].as_int;
+  if (is_compound) mic->bmi[i].as_mv[1].as_int = this_mv[1].as_int;
+
+  mic->bmi[i].as_mode = mode;
+
+#if CONFIG_REF_MV
+  if (mode == NEWMV) {
+    mic->bmi[i].pred_mv_s8[0].as_int = best_ref_mv[0]->as_int;
+    if (is_compound) mic->bmi[i].pred_mv_s8[1].as_int = best_ref_mv[1]->as_int;
+  } else {
+    mic->bmi[i].pred_mv_s8[0].as_int = this_mv[0].as_int;
+    if (is_compound) mic->bmi[i].pred_mv_s8[1].as_int = this_mv[1].as_int;
+  }
+#endif
+
+  for (idy = 0; idy < num_4x4_blocks_high; ++idy)
+    for (idx = 0; idx < num_4x4_blocks_wide; ++idx)
+      memmove(&mic->bmi[i + idy * 2 + idx], &mic->bmi[i], sizeof(mic->bmi[i]));
+
+#if CONFIG_REF_MV
+#if CONFIG_EXT_INTER
+  if (is_compound)
+    mode_ctx = mbmi_ext->compound_mode_context[mbmi->ref_frame[0]];
+  else
+#endif  // CONFIG_EXT_INTER
+    mode_ctx = vp10_mode_context_analyzer(mbmi_ext->mode_context,
+                                          mbmi->ref_frame, mbmi->sb_type, i);
+#endif
+#if CONFIG_REF_MV && CONFIG_EXT_INTER
+  return cost_mv_ref(cpi, mode, is_compound, mode_ctx) + thismvcost;
+#else
+  return cost_mv_ref(cpi, mode, mode_ctx) + thismvcost;
+#endif  // CONFIG_REF_MV && CONFIG_EXT_INTER
+}
+
+static int64_t encode_inter_mb_segment(VP10_COMP *cpi, MACROBLOCK *x,
+                                       int64_t best_yrd, int i, int *labelyrate,
+                                       int64_t *distortion, int64_t *sse,
+                                       ENTROPY_CONTEXT *ta, ENTROPY_CONTEXT *tl,
+                                       int ir, int ic, int mi_row, int mi_col) {
+  int k;
+  MACROBLOCKD *xd = &x->e_mbd;
+  struct macroblockd_plane *const pd = &xd->plane[0];
+  struct macroblock_plane *const p = &x->plane[0];
+  MODE_INFO *const mi = xd->mi[0];
+  const BLOCK_SIZE plane_bsize = get_plane_block_size(mi->mbmi.sb_type, pd);
+  const int width = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
+  const int height = 4 * num_4x4_blocks_high_lookup[plane_bsize];
+  int idx, idy;
+  const uint8_t *const src =
+      &p->src.buf[vp10_raster_block_offset(BLOCK_8X8, i, p->src.stride)];
+  uint8_t *const dst =
+      &pd->dst.buf[vp10_raster_block_offset(BLOCK_8X8, i, pd->dst.stride)];
+  int64_t thisdistortion = 0, thissse = 0;
+  int thisrate = 0;
+  TX_SIZE tx_size = mi->mbmi.tx_size;
+
+  TX_TYPE tx_type = get_tx_type(PLANE_TYPE_Y, xd, i, tx_size);
+  const scan_order *so = get_scan(tx_size, tx_type, 1);
+  const int num_4x4_w = num_4x4_blocks_wide_txsize_lookup[tx_size];
+  const int num_4x4_h = num_4x4_blocks_high_txsize_lookup[tx_size];
+
+#if CONFIG_EXT_TX && CONFIG_RECT_TX
+  assert(IMPLIES(xd->lossless[mi->mbmi.segment_id], tx_size == TX_4X4));
+  assert(IMPLIES(!xd->lossless[mi->mbmi.segment_id],
+                 tx_size == max_txsize_rect_lookup[mi->mbmi.sb_type]));
+#else
+  assert(tx_size == TX_4X4);
+#endif  // CONFIG_EXT_TX && CONFIG_RECT_TX
+  assert(tx_type == DCT_DCT);
+
+  vp10_build_inter_predictor_sub8x8(xd, 0, i, ir, ic, mi_row, mi_col);
+
+#if CONFIG_VP9_HIGHBITDEPTH
+  if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+    vpx_highbd_subtract_block(
+        height, width,
+        vp10_raster_block_offset_int16(BLOCK_8X8, i, p->src_diff), 8, src,
+        p->src.stride, dst, pd->dst.stride, xd->bd);
+  } else {
+    vpx_subtract_block(height, width, vp10_raster_block_offset_int16(
+                                          BLOCK_8X8, i, p->src_diff),
+                       8, src, p->src.stride, dst, pd->dst.stride);
+  }
+#else
+  vpx_subtract_block(height, width,
+                     vp10_raster_block_offset_int16(BLOCK_8X8, i, p->src_diff),
+                     8, src, p->src.stride, dst, pd->dst.stride);
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+  k = i;
+  for (idy = 0; idy < height / 4; idy += num_4x4_h) {
+    for (idx = 0; idx < width / 4; idx += num_4x4_w) {
+      int64_t dist, ssz, rd, rd1, rd2;
+      int block;
+      int coeff_ctx;
+      k += (idy * 2 + idx);
+      if (tx_size == TX_4X4)
+        block = k;
+      else
+        block = (i ? 2 : 0);
+      coeff_ctx = combine_entropy_contexts(*(ta + (k & 1)), *(tl + (k >> 1)));
+#if CONFIG_NEW_QUANT
+      vp10_xform_quant_fp_nuq(x, 0, block, idy + (i >> 1), idx + (i & 0x01),
+                              BLOCK_8X8, tx_size, coeff_ctx);
+#else
+      vp10_xform_quant(x, 0, block, idy + (i >> 1), idx + (i & 0x01), BLOCK_8X8,
+                       tx_size, VP10_XFORM_QUANT_FP);
+#endif  // CONFIG_NEW_QUANT
+      if (xd->lossless[xd->mi[0]->mbmi.segment_id] == 0)
+        vp10_optimize_b(x, 0, block, tx_size, coeff_ctx);
+      dist_block(cpi, x, 0, block, idy + (i >> 1), idx + (i & 0x1), tx_size,
+                 &dist, &ssz);
+      thisdistortion += dist;
+      thissse += ssz;
+#if CONFIG_VAR_TX
+      thisrate += cost_coeffs(x, 0, block, coeff_ctx, tx_size, so->scan,
+                              so->neighbors, cpi->sf.use_fast_coef_costing);
+      *(ta + (k & 1)) = !(p->eobs[block] == 0);
+      *(tl + (k >> 1)) = !(p->eobs[block] == 0);
+#else
+      thisrate +=
+          cost_coeffs(x, 0, block, ta + (k & 1), tl + (k >> 1), tx_size,
+                      so->scan, so->neighbors, cpi->sf.use_fast_coef_costing);
+#if CONFIG_EXT_TX
+      if (tx_size == TX_8X4) {
+        *(ta + (k & 1) + 1) = *(ta + (k & 1));
+      }
+      if (tx_size == TX_4X8) {
+        *(tl + (k >> 1) + 1) = *(tl + (k >> 1));
+      }
+#endif  // CONFIG_EXT_TX
+#endif  // CONFIG_VAR_TX
+      rd1 = RDCOST(x->rdmult, x->rddiv, thisrate, thisdistortion);
+      rd2 = RDCOST(x->rdmult, x->rddiv, 0, thissse);
+      rd = VPXMIN(rd1, rd2);
+      if (rd >= best_yrd) return INT64_MAX;
+    }
+  }
+
+  *distortion = thisdistortion;
+  *labelyrate = thisrate;
+  *sse = thissse;
+
+  return RDCOST(x->rdmult, x->rddiv, *labelyrate, *distortion);
+}
+
+typedef struct {
+  int eobs;
+  int brate;
+  int byrate;
+  int64_t bdist;
+  int64_t bsse;
+  int64_t brdcost;
+  int_mv mvs[2];
+#if CONFIG_REF_MV
+  int_mv pred_mv[2];
+#endif
+#if CONFIG_EXT_INTER
+  int_mv ref_mv[2];
+#endif  // CONFIG_EXT_INTER
+  ENTROPY_CONTEXT ta[2];
+  ENTROPY_CONTEXT tl[2];
+} SEG_RDSTAT;
+
+typedef struct {
+  int_mv *ref_mv[2];
+  int_mv mvp;
+
+  int64_t segment_rd;
+  int r;
+  int64_t d;
+  int64_t sse;
+  int segment_yrate;
+  PREDICTION_MODE modes[4];
+#if CONFIG_EXT_INTER
+  SEG_RDSTAT rdstat[4][INTER_MODES + INTER_COMPOUND_MODES];
+#else
+  SEG_RDSTAT rdstat[4][INTER_MODES];
+#endif  // CONFIG_EXT_INTER
+  int mvthresh;
+} BEST_SEG_INFO;
+
+static INLINE int mv_check_bounds(const MACROBLOCK *x, const MV *mv) {
+  return (mv->row >> 3) < x->mv_row_min || (mv->row >> 3) > x->mv_row_max ||
+         (mv->col >> 3) < x->mv_col_min || (mv->col >> 3) > x->mv_col_max;
+}
+
+static INLINE void mi_buf_shift(MACROBLOCK *x, int i) {
+  MB_MODE_INFO *const mbmi = &x->e_mbd.mi[0]->mbmi;
+  struct macroblock_plane *const p = &x->plane[0];
+  struct macroblockd_plane *const pd = &x->e_mbd.plane[0];
+
+  p->src.buf =
+      &p->src.buf[vp10_raster_block_offset(BLOCK_8X8, i, p->src.stride)];
+  assert(((intptr_t)pd->pre[0].buf & 0x7) == 0);
+  pd->pre[0].buf =
+      &pd->pre[0]
+           .buf[vp10_raster_block_offset(BLOCK_8X8, i, pd->pre[0].stride)];
+  if (has_second_ref(mbmi))
+    pd->pre[1].buf =
+        &pd->pre[1]
+             .buf[vp10_raster_block_offset(BLOCK_8X8, i, pd->pre[1].stride)];
+}
+
+static INLINE void mi_buf_restore(MACROBLOCK *x, struct buf_2d orig_src,
+                                  struct buf_2d orig_pre[2]) {
+  MB_MODE_INFO *mbmi = &x->e_mbd.mi[0]->mbmi;
+  x->plane[0].src = orig_src;
+  x->e_mbd.plane[0].pre[0] = orig_pre[0];
+  if (has_second_ref(mbmi)) x->e_mbd.plane[0].pre[1] = orig_pre[1];
+}
+
+// Check if NEARESTMV/NEARMV/ZEROMV is the cheapest way encode zero motion.
+// TODO(aconverse): Find out if this is still productive then clean up or remove
+static int check_best_zero_mv(
+    const VP10_COMP *cpi, const int16_t mode_context[TOTAL_REFS_PER_FRAME],
+#if CONFIG_REF_MV && CONFIG_EXT_INTER
+    const int16_t compound_mode_context[TOTAL_REFS_PER_FRAME],
+#endif  // CONFIG_REF_MV && CONFIG_EXT_INTER
+    int_mv frame_mv[MB_MODE_COUNT][TOTAL_REFS_PER_FRAME], int this_mode,
+    const MV_REFERENCE_FRAME ref_frames[2], const BLOCK_SIZE bsize, int block) {
+
+#if !CONFIG_EXT_INTER
+  assert(ref_frames[1] != INTRA_FRAME);  // Just sanity check
+#endif
+
+  if ((this_mode == NEARMV || this_mode == NEARESTMV || this_mode == ZEROMV) &&
+      frame_mv[this_mode][ref_frames[0]].as_int == 0 &&
+      (ref_frames[1] <= INTRA_FRAME ||
+       frame_mv[this_mode][ref_frames[1]].as_int == 0)) {
+#if CONFIG_REF_MV
+    int16_t rfc =
+        vp10_mode_context_analyzer(mode_context, ref_frames, bsize, block);
+#else
+    int16_t rfc = mode_context[ref_frames[0]];
+#endif
+#if CONFIG_REF_MV && CONFIG_EXT_INTER
+    int c1 = cost_mv_ref(cpi, NEARMV, ref_frames[1] > INTRA_FRAME, rfc);
+    int c2 = cost_mv_ref(cpi, NEARESTMV, ref_frames[1] > INTRA_FRAME, rfc);
+    int c3 = cost_mv_ref(cpi, ZEROMV, ref_frames[1] > INTRA_FRAME, rfc);
+#else
+    int c1 = cost_mv_ref(cpi, NEARMV, rfc);
+    int c2 = cost_mv_ref(cpi, NEARESTMV, rfc);
+    int c3 = cost_mv_ref(cpi, ZEROMV, rfc);
+#endif  // CONFIG_REF_MV && CONFIG_EXT_INTER
+
+#if !CONFIG_REF_MV
+    (void)bsize;
+    (void)block;
+#endif
+
+    if (this_mode == NEARMV) {
+      if (c1 > c3) return 0;
+    } else if (this_mode == NEARESTMV) {
+      if (c2 > c3) return 0;
+    } else {
+      assert(this_mode == ZEROMV);
+      if (ref_frames[1] <= INTRA_FRAME) {
+        if ((c3 >= c2 && frame_mv[NEARESTMV][ref_frames[0]].as_int == 0) ||
+            (c3 >= c1 && frame_mv[NEARMV][ref_frames[0]].as_int == 0))
+          return 0;
+      } else {
+        if ((c3 >= c2 && frame_mv[NEARESTMV][ref_frames[0]].as_int == 0 &&
+             frame_mv[NEARESTMV][ref_frames[1]].as_int == 0) ||
+            (c3 >= c1 && frame_mv[NEARMV][ref_frames[0]].as_int == 0 &&
+             frame_mv[NEARMV][ref_frames[1]].as_int == 0))
+          return 0;
+      }
+    }
+  }
+#if CONFIG_EXT_INTER
+  else if ((this_mode == NEAREST_NEARESTMV || this_mode == NEAREST_NEARMV ||
+            this_mode == NEAR_NEARESTMV || this_mode == NEAR_NEARMV ||
+            this_mode == ZERO_ZEROMV) &&
+           frame_mv[this_mode][ref_frames[0]].as_int == 0 &&
+           frame_mv[this_mode][ref_frames[1]].as_int == 0) {
+#if CONFIG_REF_MV
+    int16_t rfc = compound_mode_context[ref_frames[0]];
+    int c1 = cost_mv_ref(cpi, NEAREST_NEARMV, 1, rfc);
+    int c2 = cost_mv_ref(cpi, NEAREST_NEARESTMV, 1, rfc);
+    int c3 = cost_mv_ref(cpi, ZERO_ZEROMV, 1, rfc);
+    int c4 = cost_mv_ref(cpi, NEAR_NEARESTMV, 1, rfc);
+    int c5 = cost_mv_ref(cpi, NEAR_NEARMV, 1, rfc);
+#else
+    int16_t rfc = mode_context[ref_frames[0]];
+    int c1 = cost_mv_ref(cpi, NEAREST_NEARMV, rfc);
+    int c2 = cost_mv_ref(cpi, NEAREST_NEARESTMV, rfc);
+    int c3 = cost_mv_ref(cpi, ZERO_ZEROMV, rfc);
+    int c4 = cost_mv_ref(cpi, NEAR_NEARESTMV, rfc);
+    int c5 = cost_mv_ref(cpi, NEAR_NEARMV, rfc);
+#endif
+
+    if (this_mode == NEAREST_NEARMV) {
+      if (c1 > c3) return 0;
+    } else if (this_mode == NEAREST_NEARESTMV) {
+      if (c2 > c3) return 0;
+    } else if (this_mode == NEAR_NEARESTMV) {
+      if (c4 > c3) return 0;
+    } else if (this_mode == NEAR_NEARMV) {
+      if (c5 > c3) return 0;
+    } else {
+      assert(this_mode == ZERO_ZEROMV);
+      if ((c3 >= c2 && frame_mv[NEAREST_NEARESTMV][ref_frames[0]].as_int == 0 &&
+           frame_mv[NEAREST_NEARESTMV][ref_frames[1]].as_int == 0) ||
+          (c3 >= c1 && frame_mv[NEAREST_NEARMV][ref_frames[0]].as_int == 0 &&
+           frame_mv[NEAREST_NEARMV][ref_frames[1]].as_int == 0) ||
+          (c3 >= c5 && frame_mv[NEAR_NEARMV][ref_frames[0]].as_int == 0 &&
+           frame_mv[NEAR_NEARMV][ref_frames[1]].as_int == 0) ||
+          (c3 >= c4 && frame_mv[NEAR_NEARESTMV][ref_frames[0]].as_int == 0 &&
+           frame_mv[NEAR_NEARESTMV][ref_frames[1]].as_int == 0))
+        return 0;
+    }
+  }
+#endif  // CONFIG_EXT_INTER
+  return 1;
+}
+
+static void joint_motion_search(VP10_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
+                                int_mv *frame_mv, int mi_row, int mi_col,
+#if CONFIG_EXT_INTER
+                                int_mv *ref_mv_sub8x8[2],
+#endif
+                                int_mv single_newmv[TOTAL_REFS_PER_FRAME],
+                                int *rate_mv, const int block) {
+  const VP10_COMMON *const cm = &cpi->common;
+  const int pw = 4 * num_4x4_blocks_wide_lookup[bsize];
+  const int ph = 4 * num_4x4_blocks_high_lookup[bsize];
+  MACROBLOCKD *xd = &x->e_mbd;
+  MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
+  const int refs[2] = { mbmi->ref_frame[0],
+                        mbmi->ref_frame[1] < 0 ? 0 : mbmi->ref_frame[1] };
+  int_mv ref_mv[2];
+  int ite, ref;
+#if CONFIG_DUAL_FILTER
+  INTERP_FILTER interp_filter[4] = {
+    mbmi->interp_filter[0], mbmi->interp_filter[1], mbmi->interp_filter[2],
+    mbmi->interp_filter[3],
+  };
+#else
+  const INTERP_FILTER interp_filter = mbmi->interp_filter;
+#endif
+  struct scale_factors sf;
+
+  // Do joint motion search in compound mode to get more accurate mv.
+  struct buf_2d backup_yv12[2][MAX_MB_PLANE];
+  int last_besterr[2] = { INT_MAX, INT_MAX };
+  const YV12_BUFFER_CONFIG *const scaled_ref_frame[2] = {
+    vp10_get_scaled_ref_frame(cpi, mbmi->ref_frame[0]),
+    vp10_get_scaled_ref_frame(cpi, mbmi->ref_frame[1])
+  };
+
+// Prediction buffer from second frame.
+#if CONFIG_VP9_HIGHBITDEPTH
+  DECLARE_ALIGNED(16, uint16_t, second_pred_alloc_16[MAX_SB_SQUARE]);
+  uint8_t *second_pred;
+#else
+  DECLARE_ALIGNED(16, uint8_t, second_pred[MAX_SB_SQUARE]);
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+  for (ref = 0; ref < 2; ++ref) {
+#if CONFIG_EXT_INTER
+    if (bsize < BLOCK_8X8 && ref_mv_sub8x8 != NULL)
+      ref_mv[ref].as_int = ref_mv_sub8x8[ref]->as_int;
+    else
+#endif  // CONFIG_EXT_INTER
+      ref_mv[ref] = x->mbmi_ext->ref_mvs[refs[ref]][0];
+
+    if (scaled_ref_frame[ref]) {
+      int i;
+      // Swap out the reference frame for a version that's been scaled to
+      // match the resolution of the current frame, allowing the existing
+      // motion search code to be used without additional modifications.
+      for (i = 0; i < MAX_MB_PLANE; i++)
+        backup_yv12[ref][i] = xd->plane[i].pre[ref];
+      vp10_setup_pre_planes(xd, ref, scaled_ref_frame[ref], mi_row, mi_col,
+                            NULL);
+    }
+
+    frame_mv[refs[ref]].as_int = single_newmv[refs[ref]].as_int;
+  }
+
+// Since we have scaled the reference frames to match the size of the current
+// frame we must use a unit scaling factor during mode selection.
+#if CONFIG_VP9_HIGHBITDEPTH
+  vp10_setup_scale_factors_for_frame(&sf, cm->width, cm->height, cm->width,
+                                     cm->height, cm->use_highbitdepth);
+#else
+  vp10_setup_scale_factors_for_frame(&sf, cm->width, cm->height, cm->width,
+                                     cm->height);
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+  // Allow joint search multiple times iteratively for each reference frame
+  // and break out of the search loop if it couldn't find a better mv.
+  for (ite = 0; ite < 4; ite++) {
+    struct buf_2d ref_yv12[2];
+    int bestsme = INT_MAX;
+    int sadpb = x->sadperbit16;
+    MV *const best_mv = &x->best_mv.as_mv;
+    int search_range = 3;
+
+    int tmp_col_min = x->mv_col_min;
+    int tmp_col_max = x->mv_col_max;
+    int tmp_row_min = x->mv_row_min;
+    int tmp_row_max = x->mv_row_max;
+    int id = ite % 2;  // Even iterations search in the first reference frame,
+                       // odd iterations search in the second. The predictor
+                       // found for the 'other' reference frame is factored in.
+
+    // Initialized here because of compiler problem in Visual Studio.
+    ref_yv12[0] = xd->plane[0].pre[0];
+    ref_yv12[1] = xd->plane[0].pre[1];
+
+#if CONFIG_DUAL_FILTER
+    // reload the filter types
+    interp_filter[0] =
+        (id == 0) ? mbmi->interp_filter[2] : mbmi->interp_filter[0];
+    interp_filter[1] =
+        (id == 0) ? mbmi->interp_filter[3] : mbmi->interp_filter[1];
+#endif
+
+// Get the prediction block from the 'other' reference frame.
+#if CONFIG_VP9_HIGHBITDEPTH
+    if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+      second_pred = CONVERT_TO_BYTEPTR(second_pred_alloc_16);
+      vp10_highbd_build_inter_predictor(
+          ref_yv12[!id].buf, ref_yv12[!id].stride, second_pred, pw,
+          &frame_mv[refs[!id]].as_mv, &sf, pw, ph, 0, interp_filter,
+          MV_PRECISION_Q3, mi_col * MI_SIZE, mi_row * MI_SIZE, xd->bd);
+    } else {
+      second_pred = (uint8_t *)second_pred_alloc_16;
+      vp10_build_inter_predictor(ref_yv12[!id].buf, ref_yv12[!id].stride,
+                                 second_pred, pw, &frame_mv[refs[!id]].as_mv,
+                                 &sf, pw, ph, 0, interp_filter, MV_PRECISION_Q3,
+                                 mi_col * MI_SIZE, mi_row * MI_SIZE);
+    }
+#else
+    vp10_build_inter_predictor(ref_yv12[!id].buf, ref_yv12[!id].stride,
+                               second_pred, pw, &frame_mv[refs[!id]].as_mv, &sf,
+                               pw, ph, 0, interp_filter, MV_PRECISION_Q3,
+                               mi_col * MI_SIZE, mi_row * MI_SIZE);
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+    // Do compound motion search on the current reference frame.
+    if (id) xd->plane[0].pre[0] = ref_yv12[id];
+    vp10_set_mv_search_range(x, &ref_mv[id].as_mv);
+
+    // Use the mv result from the single mode as mv predictor.
+    *best_mv = frame_mv[refs[id]].as_mv;
+
+    best_mv->col >>= 3;
+    best_mv->row >>= 3;
+
+#if CONFIG_REF_MV
+    vp10_set_mvcost(x, refs[id]);
+#endif
+
+    // Small-range full-pixel motion search.
+    bestsme =
+        vp10_refining_search_8p_c(x, sadpb, search_range, &cpi->fn_ptr[bsize],
+                                  &ref_mv[id].as_mv, second_pred);
+    if (bestsme < INT_MAX)
+      bestsme = vp10_get_mvpred_av_var(x, best_mv, &ref_mv[id].as_mv,
+                                       second_pred, &cpi->fn_ptr[bsize], 1);
+
+    x->mv_col_min = tmp_col_min;
+    x->mv_col_max = tmp_col_max;
+    x->mv_row_min = tmp_row_min;
+    x->mv_row_max = tmp_row_max;
+
+    if (bestsme < INT_MAX) {
+      int dis; /* TODO: use dis in distortion calculation later. */
+      unsigned int sse;
+      if (cpi->sf.use_upsampled_references) {
+        // Use up-sampled reference frames.
+        struct macroblockd_plane *const pd = &xd->plane[0];
+        struct buf_2d backup_pred = pd->pre[0];
+        const YV12_BUFFER_CONFIG *upsampled_ref =
+            get_upsampled_ref(cpi, refs[id]);
+
+        // Set pred for Y plane
+        setup_pred_plane(&pd->pre[0], upsampled_ref->y_buffer,
+                         upsampled_ref->y_crop_width,
+                         upsampled_ref->y_crop_height, upsampled_ref->y_stride,
+                         (mi_row << 3), (mi_col << 3), NULL, pd->subsampling_x,
+                         pd->subsampling_y);
+
+        // If bsize < BLOCK_8X8, adjust pred pointer for this block
+        if (bsize < BLOCK_8X8)
+          pd->pre[0].buf =
+              &pd->pre[0].buf[(vp10_raster_block_offset(BLOCK_8X8, block,
+                                                        pd->pre[0].stride))
+                              << 3];
+
+        bestsme = cpi->find_fractional_mv_step(
+            x, &ref_mv[id].as_mv, cpi->common.allow_high_precision_mv,
+            x->errorperbit, &cpi->fn_ptr[bsize], 0,
+            cpi->sf.mv.subpel_iters_per_step, NULL, x->nmvjointcost, x->mvcost,
+            &dis, &sse, second_pred, pw, ph, 1);
+
+        // Restore the reference frames.
+        pd->pre[0] = backup_pred;
+      } else {
+        (void)block;
+        bestsme = cpi->find_fractional_mv_step(
+            x, &ref_mv[id].as_mv, cpi->common.allow_high_precision_mv,
+            x->errorperbit, &cpi->fn_ptr[bsize], 0,
+            cpi->sf.mv.subpel_iters_per_step, NULL, x->nmvjointcost, x->mvcost,
+            &dis, &sse, second_pred, pw, ph, 0);
+      }
+    }
+
+    // Restore the pointer to the first (possibly scaled) prediction buffer.
+    if (id) xd->plane[0].pre[0] = ref_yv12[0];
+
+    if (bestsme < last_besterr[id]) {
+      frame_mv[refs[id]].as_mv = *best_mv;
+      last_besterr[id] = bestsme;
+    } else {
+      break;
+    }
+  }
+
+  *rate_mv = 0;
+
+  for (ref = 0; ref < 2; ++ref) {
+    if (scaled_ref_frame[ref]) {
+      // Restore the prediction frame pointers to their unscaled versions.
+      int i;
+      for (i = 0; i < MAX_MB_PLANE; i++)
+        xd->plane[i].pre[ref] = backup_yv12[ref][i];
+    }
+#if CONFIG_REF_MV
+    vp10_set_mvcost(x, refs[ref]);
+#endif
+#if CONFIG_EXT_INTER
+    if (bsize >= BLOCK_8X8)
+#endif  // CONFIG_EXT_INTER
+      *rate_mv += vp10_mv_bit_cost(&frame_mv[refs[ref]].as_mv,
+                                   &x->mbmi_ext->ref_mvs[refs[ref]][0].as_mv,
+                                   x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
+#if CONFIG_EXT_INTER
+    else
+      *rate_mv += vp10_mv_bit_cost(&frame_mv[refs[ref]].as_mv,
+                                   &ref_mv_sub8x8[ref]->as_mv, x->nmvjointcost,
+                                   x->mvcost, MV_COST_WEIGHT);
+#endif  // CONFIG_EXT_INTER
+  }
+}
+
+static int64_t rd_pick_best_sub8x8_mode(
+    VP10_COMP *cpi, MACROBLOCK *x, int_mv *best_ref_mv,
+    int_mv *second_best_ref_mv, int64_t best_rd, int *returntotrate,
+    int *returnyrate, int64_t *returndistortion, int *skippable, int64_t *psse,
+    int mvthresh,
+#if CONFIG_EXT_INTER
+    int_mv seg_mvs[4][2][TOTAL_REFS_PER_FRAME],
+    int_mv compound_seg_newmvs[4][2],
+#else
+    int_mv seg_mvs[4][TOTAL_REFS_PER_FRAME],
+#endif  // CONFIG_EXT_INTER
+    BEST_SEG_INFO *bsi_buf, int filter_idx, int mi_row, int mi_col) {
+  BEST_SEG_INFO *bsi = bsi_buf + filter_idx;
+#if CONFIG_REF_MV
+  int_mv tmp_ref_mv[2];
+#endif
+  MACROBLOCKD *xd = &x->e_mbd;
+  MODE_INFO *mi = xd->mi[0];
+  MB_MODE_INFO *mbmi = &mi->mbmi;
+  int mode_idx;
+  int k, br = 0, idx, idy;
+  int64_t bd = 0, block_sse = 0;
+  PREDICTION_MODE this_mode;
+  VP10_COMMON *cm = &cpi->common;
+  struct macroblock_plane *const p = &x->plane[0];
+  struct macroblockd_plane *const pd = &xd->plane[0];
+  const int label_count = 4;
+  int64_t this_segment_rd = 0;
+  int label_mv_thresh;
+  int segmentyrate = 0;
+  const BLOCK_SIZE bsize = mbmi->sb_type;
+  const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
+  const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
+  ENTROPY_CONTEXT t_above[2], t_left[2];
+  int subpelmv = 1, have_ref = 0;
+  const int has_second_rf = has_second_ref(mbmi);
+  const int inter_mode_mask = cpi->sf.inter_mode_mask[bsize];
+  MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
+#if CONFIG_EXT_TX && CONFIG_RECT_TX
+  mbmi->tx_size =
+      xd->lossless[mbmi->segment_id] ? TX_4X4 : max_txsize_rect_lookup[bsize];
+#else
+  mbmi->tx_size = TX_4X4;
+#endif  // CONFIG_EXT_TX && CONFIG_RECT_TX
+
+  vp10_zero(*bsi);
+
+  bsi->segment_rd = best_rd;
+  bsi->ref_mv[0] = best_ref_mv;
+  bsi->ref_mv[1] = second_best_ref_mv;
+  bsi->mvp.as_int = best_ref_mv->as_int;
+  bsi->mvthresh = mvthresh;
+
+  for (idx = 0; idx < 4; ++idx) bsi->modes[idx] = ZEROMV;
+
+#if CONFIG_REFMV
+  for (idx = 0; idx < 4; ++idx) {
+    for (k = NEARESTMV; k <= NEWMV; ++k) {
+      bsi->rdstat[idx][INTER_OFFSET(k)].pred_mv[0].as_int = INVALID_MV;
+      bsi->rdstat[idx][INTER_OFFSET(k)].pred_mv[1].as_int = INVALID_MV;
+
+      bsi->rdstat[idx][INTER_OFFSET(k)].mvs[0].as_int = INVALID_MV;
+      bsi->rdstat[idx][INTER_OFFSET(k)].mvs[1].as_int = INVALID_MV;
+    }
+  }
+#endif
+
+  memcpy(t_above, pd->above_context, sizeof(t_above));
+  memcpy(t_left, pd->left_context, sizeof(t_left));
+
+  // 64 makes this threshold really big effectively
+  // making it so that we very rarely check mvs on
+  // segments.   setting this to 1 would make mv thresh
+  // roughly equal to what it is for macroblocks
+  label_mv_thresh = 1 * bsi->mvthresh / label_count;
+
+  // Segmentation method overheads
+  for (idy = 0; idy < 2; idy += num_4x4_blocks_high) {
+    for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) {
+      // TODO(jingning,rbultje): rewrite the rate-distortion optimization
+      // loop for 4x4/4x8/8x4 block coding. to be replaced with new rd loop
+      int_mv mode_mv[MB_MODE_COUNT][2];
+      int_mv frame_mv[MB_MODE_COUNT][TOTAL_REFS_PER_FRAME];
+      PREDICTION_MODE mode_selected = ZEROMV;
+      int64_t best_rd = INT64_MAX;
+      const int i = idy * 2 + idx;
+      int ref;
+#if CONFIG_REF_MV
+      CANDIDATE_MV ref_mv_stack[2][MAX_REF_MV_STACK_SIZE];
+      uint8_t ref_mv_count[2];
+#endif
+#if CONFIG_EXT_INTER
+      int mv_idx;
+      int_mv ref_mvs_sub8x8[2][2];
+#endif  // CONFIG_EXT_INTER
+
+      for (ref = 0; ref < 1 + has_second_rf; ++ref) {
+        const MV_REFERENCE_FRAME frame = mbmi->ref_frame[ref];
+#if CONFIG_EXT_INTER
+        int_mv mv_ref_list[MAX_MV_REF_CANDIDATES];
+        vp10_update_mv_context(xd, mi, frame, mv_ref_list, i, mi_row, mi_col,
+                               NULL);
+#endif  // CONFIG_EXT_INTER
+        frame_mv[ZEROMV][frame].as_int = 0;
+        vp10_append_sub8x8_mvs_for_idx(cm, xd, i, ref, mi_row, mi_col,
+#if CONFIG_REF_MV
+                                       ref_mv_stack[ref], &ref_mv_count[ref],
+#endif
+#if CONFIG_EXT_INTER
+                                       mv_ref_list,
+#endif  // CONFIG_EXT_INTER
+                                       &frame_mv[NEARESTMV][frame],
+                                       &frame_mv[NEARMV][frame]);
+
+#if CONFIG_REF_MV
+        tmp_ref_mv[ref] = frame_mv[NEARESTMV][mbmi->ref_frame[ref]];
+        lower_mv_precision(&tmp_ref_mv[ref].as_mv, cm->allow_high_precision_mv);
+        bsi->ref_mv[ref] = &tmp_ref_mv[ref];
+        mbmi_ext->ref_mvs[frame][0] = tmp_ref_mv[ref];
+#endif
+
+#if CONFIG_EXT_INTER
+        mv_ref_list[0].as_int = frame_mv[NEARESTMV][frame].as_int;
+        mv_ref_list[1].as_int = frame_mv[NEARMV][frame].as_int;
+        vp10_find_best_ref_mvs(cm->allow_high_precision_mv, mv_ref_list,
+                               &ref_mvs_sub8x8[0][ref],
+                               &ref_mvs_sub8x8[1][ref]);
+
+        if (has_second_rf) {
+          frame_mv[ZERO_ZEROMV][frame].as_int = 0;
+          frame_mv[NEAREST_NEARESTMV][frame].as_int =
+              frame_mv[NEARESTMV][frame].as_int;
+
+          if (ref == 0) {
+            frame_mv[NEAREST_NEARMV][frame].as_int =
+                frame_mv[NEARESTMV][frame].as_int;
+            frame_mv[NEAR_NEARESTMV][frame].as_int =
+                frame_mv[NEARMV][frame].as_int;
+            frame_mv[NEAREST_NEWMV][frame].as_int =
+                frame_mv[NEARESTMV][frame].as_int;
+            frame_mv[NEAR_NEWMV][frame].as_int = frame_mv[NEARMV][frame].as_int;
+            frame_mv[NEAR_NEARMV][frame].as_int =
+                frame_mv[NEARMV][frame].as_int;
+          } else if (ref == 1) {
+            frame_mv[NEAREST_NEARMV][frame].as_int =
+                frame_mv[NEARMV][frame].as_int;
+            frame_mv[NEAR_NEARESTMV][frame].as_int =
+                frame_mv[NEARESTMV][frame].as_int;
+            frame_mv[NEW_NEARESTMV][frame].as_int =
+                frame_mv[NEARESTMV][frame].as_int;
+            frame_mv[NEW_NEARMV][frame].as_int = frame_mv[NEARMV][frame].as_int;
+            frame_mv[NEAR_NEARMV][frame].as_int =
+                frame_mv[NEARMV][frame].as_int;
+          }
+        }
+#endif  // CONFIG_EXT_INTER
+      }
+
+// search for the best motion vector on this segment
+#if CONFIG_EXT_INTER
+      for (this_mode = (has_second_rf ? NEAREST_NEARESTMV : NEARESTMV);
+           this_mode <= (has_second_rf ? NEW_NEWMV : NEWFROMNEARMV);
+           ++this_mode)
+#else
+      for (this_mode = NEARESTMV; this_mode <= NEWMV; ++this_mode)
+#endif  // CONFIG_EXT_INTER
+      {
+        const struct buf_2d orig_src = x->plane[0].src;
+        struct buf_2d orig_pre[2];
+        // This flag controls if the motion estimation will kick off. When it
+        // is set to a non-zero value, the encoder will force motion estimation.
+        int run_mv_search = 0;
+
+        mode_idx = INTER_OFFSET(this_mode);
+#if CONFIG_EXT_INTER
+        mv_idx = (this_mode == NEWFROMNEARMV) ? 1 : 0;
+
+        for (ref = 0; ref < 1 + has_second_rf; ++ref)
+          bsi->ref_mv[ref]->as_int = ref_mvs_sub8x8[mv_idx][ref].as_int;
+#endif  // CONFIG_EXT_INTER
+        bsi->rdstat[i][mode_idx].brdcost = INT64_MAX;
+        if (!(inter_mode_mask & (1 << this_mode))) continue;
+
+#if CONFIG_REF_MV
+        run_mv_search = 2;
+#if !CONFIG_EXT_INTER
+        if (filter_idx > 0 && this_mode == NEWMV) {
+          BEST_SEG_INFO *ref_bsi = bsi_buf;
+          SEG_RDSTAT *ref_rdstat = &ref_bsi->rdstat[i][mode_idx];
+
+          if (has_second_rf) {
+            if (seg_mvs[i][mbmi->ref_frame[0]].as_int ==
+                    ref_rdstat->mvs[0].as_int &&
+                ref_rdstat->mvs[0].as_int != INVALID_MV)
+              if (bsi->ref_mv[0]->as_int == ref_rdstat->pred_mv[0].as_int)
+                --run_mv_search;
+
+            if (seg_mvs[i][mbmi->ref_frame[1]].as_int ==
+                    ref_rdstat->mvs[1].as_int &&
+                ref_rdstat->mvs[1].as_int != INVALID_MV)
+              if (bsi->ref_mv[1]->as_int == ref_rdstat->pred_mv[1].as_int)
+                --run_mv_search;
+          } else {
+            if (bsi->ref_mv[0]->as_int == ref_rdstat->pred_mv[0].as_int &&
+                ref_rdstat->mvs[0].as_int != INVALID_MV) {
+              run_mv_search = 0;
+              seg_mvs[i][mbmi->ref_frame[0]].as_int = ref_rdstat->mvs[0].as_int;
+            }
+          }
+
+          if (run_mv_search != 0 && filter_idx > 1) {
+            ref_bsi = bsi_buf + 1;
+            ref_rdstat = &ref_bsi->rdstat[i][mode_idx];
+            run_mv_search = 2;
+
+            if (has_second_rf) {
+              if (seg_mvs[i][mbmi->ref_frame[0]].as_int ==
+                      ref_rdstat->mvs[0].as_int &&
+                  ref_rdstat->mvs[0].as_int != INVALID_MV)
+                if (bsi->ref_mv[0]->as_int == ref_rdstat->pred_mv[0].as_int)
+                  --run_mv_search;
+
+              if (seg_mvs[i][mbmi->ref_frame[1]].as_int ==
+                      ref_rdstat->mvs[1].as_int &&
+                  ref_rdstat->mvs[1].as_int != INVALID_MV)
+                if (bsi->ref_mv[1]->as_int == ref_rdstat->pred_mv[1].as_int)
+                  --run_mv_search;
+            } else {
+              if (bsi->ref_mv[0]->as_int == ref_rdstat->pred_mv[0].as_int &&
+                  ref_rdstat->mvs[0].as_int != INVALID_MV) {
+                run_mv_search = 0;
+                seg_mvs[i][mbmi->ref_frame[0]].as_int =
+                    ref_rdstat->mvs[0].as_int;
+              }
+            }
+          }
+        }
+#endif  // CONFIG_EXT_INTER
+#endif  // CONFIG_REF_MV
+
+        if (!check_best_zero_mv(cpi, mbmi_ext->mode_context,
+#if CONFIG_REF_MV && CONFIG_EXT_INTER
+                                mbmi_ext->compound_mode_context,
+#endif  // CONFIG_REF_MV && CONFIG_EXT_INTER
+                                frame_mv, this_mode, mbmi->ref_frame, bsize, i))
+          continue;
+
+        memcpy(orig_pre, pd->pre, sizeof(orig_pre));
+        memcpy(bsi->rdstat[i][mode_idx].ta, t_above,
+               sizeof(bsi->rdstat[i][mode_idx].ta));
+        memcpy(bsi->rdstat[i][mode_idx].tl, t_left,
+               sizeof(bsi->rdstat[i][mode_idx].tl));
+
+        // motion search for newmv (single predictor case only)
+        if (!has_second_rf &&
+#if CONFIG_EXT_INTER
+            have_newmv_in_inter_mode(this_mode) &&
+            (seg_mvs[i][mv_idx][mbmi->ref_frame[0]].as_int == INVALID_MV ||
+             vp10_use_mv_hp(&bsi->ref_mv[0]->as_mv) == 0)
+#else
+            this_mode == NEWMV &&
+            (seg_mvs[i][mbmi->ref_frame[0]].as_int == INVALID_MV ||
+             run_mv_search)
+#endif  // CONFIG_EXT_INTER
+                ) {
+          int step_param = 0;
+          int bestsme = INT_MAX;
+          int sadpb = x->sadperbit4;
+          MV mvp_full;
+          int max_mv;
+          int cost_list[5];
+          int tmp_col_min = x->mv_col_min;
+          int tmp_col_max = x->mv_col_max;
+          int tmp_row_min = x->mv_row_min;
+          int tmp_row_max = x->mv_row_max;
+
+          /* Is the best so far sufficiently good that we cant justify doing
+           * and new motion search. */
+          if (best_rd < label_mv_thresh) break;
+
+          if (cpi->oxcf.mode != BEST) {
+#if CONFIG_EXT_INTER
+            bsi->mvp.as_int = bsi->ref_mv[0]->as_int;
+#else
+// use previous block's result as next block's MV predictor.
+#if !CONFIG_REF_MV
+            if (i > 0) {
+              bsi->mvp.as_int = mi->bmi[i - 1].as_mv[0].as_int;
+              if (i == 2) bsi->mvp.as_int = mi->bmi[i - 2].as_mv[0].as_int;
+            }
+#endif
+#endif  // CONFIG_EXT_INTER
+          }
+          if (i == 0)
+            max_mv = x->max_mv_context[mbmi->ref_frame[0]];
+          else
+            max_mv =
+                VPXMAX(abs(bsi->mvp.as_mv.row), abs(bsi->mvp.as_mv.col)) >> 3;
+
+          if (cpi->sf.mv.auto_mv_step_size && cm->show_frame) {
+            // Take wtd average of the step_params based on the last frame's
+            // max mv magnitude and the best ref mvs of the current block for
+            // the given reference.
+            step_param =
+                (vp10_init_search_range(max_mv) + cpi->mv_step_param) / 2;
+          } else {
+            step_param = cpi->mv_step_param;
+          }
+
+#if CONFIG_REF_MV
+          mvp_full.row = bsi->ref_mv[0]->as_mv.row >> 3;
+          mvp_full.col = bsi->ref_mv[0]->as_mv.col >> 3;
+#else
+          mvp_full.row = bsi->mvp.as_mv.row >> 3;
+          mvp_full.col = bsi->mvp.as_mv.col >> 3;
+#endif
+
+          if (cpi->sf.adaptive_motion_search) {
+            mvp_full.row = x->pred_mv[mbmi->ref_frame[0]].row >> 3;
+            mvp_full.col = x->pred_mv[mbmi->ref_frame[0]].col >> 3;
+            step_param = VPXMAX(step_param, 8);
+          }
+
+          // adjust src pointer for this block
+          mi_buf_shift(x, i);
+
+          vp10_set_mv_search_range(x, &bsi->ref_mv[0]->as_mv);
+
+          x->best_mv.as_int = x->second_best_mv.as_int = INVALID_MV;
+
+#if CONFIG_REF_MV
+          vp10_set_mvcost(x, mbmi->ref_frame[0]);
+#endif
+          bestsme = vp10_full_pixel_search(
+              cpi, x, bsize, &mvp_full, step_param, sadpb,
+              cpi->sf.mv.subpel_search_method != SUBPEL_TREE ? cost_list : NULL,
+              &bsi->ref_mv[0]->as_mv, INT_MAX, 1);
+
+          x->mv_col_min = tmp_col_min;
+          x->mv_col_max = tmp_col_max;
+          x->mv_row_min = tmp_row_min;
+          x->mv_row_max = tmp_row_max;
+
+          if (bestsme < INT_MAX) {
+            int distortion;
+            if (cpi->sf.use_upsampled_references) {
+              int best_mv_var;
+              const int try_second =
+                  x->second_best_mv.as_int != INVALID_MV &&
+                  x->second_best_mv.as_int != x->best_mv.as_int;
+              const int pw = 4 * num_4x4_blocks_wide_lookup[bsize];
+              const int ph = 4 * num_4x4_blocks_high_lookup[bsize];
+              // Use up-sampled reference frames.
+              struct macroblockd_plane *const pd = &xd->plane[0];
+              struct buf_2d backup_pred = pd->pre[0];
+              const YV12_BUFFER_CONFIG *upsampled_ref =
+                  get_upsampled_ref(cpi, mbmi->ref_frame[0]);
+
+              // Set pred for Y plane
+              setup_pred_plane(
+                  &pd->pre[0], upsampled_ref->y_buffer,
+                  upsampled_ref->y_crop_width, upsampled_ref->y_crop_height,
+                  upsampled_ref->y_stride, (mi_row << 3), (mi_col << 3), NULL,
+                  pd->subsampling_x, pd->subsampling_y);
+
+              // adjust pred pointer for this block
+              pd->pre[0].buf =
+                  &pd->pre[0].buf[(vp10_raster_block_offset(BLOCK_8X8, i,
+                                                            pd->pre[0].stride))
+                                  << 3];
+
+              best_mv_var = cpi->find_fractional_mv_step(
+                  x, &bsi->ref_mv[0]->as_mv, cm->allow_high_precision_mv,
+                  x->errorperbit, &cpi->fn_ptr[bsize],
+                  cpi->sf.mv.subpel_force_stop,
+                  cpi->sf.mv.subpel_iters_per_step,
+                  cond_cost_list(cpi, cost_list), x->nmvjointcost, x->mvcost,
+                  &distortion, &x->pred_sse[mbmi->ref_frame[0]], NULL, pw, ph,
+                  1);
+
+              if (try_second) {
+                int this_var;
+                MV best_mv = x->best_mv.as_mv;
+                const MV ref_mv = bsi->ref_mv[0]->as_mv;
+                const int minc = VPXMAX(x->mv_col_min * 8, ref_mv.col - MV_MAX);
+                const int maxc = VPXMIN(x->mv_col_max * 8, ref_mv.col + MV_MAX);
+                const int minr = VPXMAX(x->mv_row_min * 8, ref_mv.row - MV_MAX);
+                const int maxr = VPXMIN(x->mv_row_max * 8, ref_mv.row + MV_MAX);
+
+                x->best_mv = x->second_best_mv;
+                if (x->best_mv.as_mv.row * 8 <= maxr &&
+                    x->best_mv.as_mv.row * 8 >= minr &&
+                    x->best_mv.as_mv.col * 8 <= maxc &&
+                    x->best_mv.as_mv.col * 8 >= minc) {
+                  this_var = cpi->find_fractional_mv_step(
+                      x, &bsi->ref_mv[0]->as_mv, cm->allow_high_precision_mv,
+                      x->errorperbit, &cpi->fn_ptr[bsize],
+                      cpi->sf.mv.subpel_force_stop,
+                      cpi->sf.mv.subpel_iters_per_step,
+                      cond_cost_list(cpi, cost_list), x->nmvjointcost,
+                      x->mvcost, &distortion, &x->pred_sse[mbmi->ref_frame[0]],
+                      NULL, pw, ph, 1);
+                  if (this_var < best_mv_var) best_mv = x->best_mv.as_mv;
+                  x->best_mv.as_mv = best_mv;
+                }
+              }
+
+              // Restore the reference frames.
+              pd->pre[0] = backup_pred;
+            } else {
+              cpi->find_fractional_mv_step(
+                  x, &bsi->ref_mv[0]->as_mv, cm->allow_high_precision_mv,
+                  x->errorperbit, &cpi->fn_ptr[bsize],
+                  cpi->sf.mv.subpel_force_stop,
+                  cpi->sf.mv.subpel_iters_per_step,
+                  cond_cost_list(cpi, cost_list), x->nmvjointcost, x->mvcost,
+                  &distortion, &x->pred_sse[mbmi->ref_frame[0]], NULL, 0, 0, 0);
+            }
+
+// save motion search result for use in compound prediction
+#if CONFIG_EXT_INTER
+            seg_mvs[i][mv_idx][mbmi->ref_frame[0]].as_mv = x->best_mv.as_mv;
+#else
+            seg_mvs[i][mbmi->ref_frame[0]].as_mv = x->best_mv.as_mv;
+#endif  // CONFIG_EXT_INTER
+          }
+
+          if (cpi->sf.adaptive_motion_search)
+            x->pred_mv[mbmi->ref_frame[0]] = x->best_mv.as_mv;
+
+#if CONFIG_EXT_INTER
+          mode_mv[this_mode][0] = x->best_mv;
+#else
+          mode_mv[NEWMV][0] = x->best_mv;
+#endif  // CONFIG_EXT_INTER
+
+          // restore src pointers
+          mi_buf_restore(x, orig_src, orig_pre);
+        }
+
+        if (has_second_rf) {
+#if CONFIG_EXT_INTER
+          if (seg_mvs[i][mv_idx][mbmi->ref_frame[1]].as_int == INVALID_MV ||
+              seg_mvs[i][mv_idx][mbmi->ref_frame[0]].as_int == INVALID_MV)
+#else
+          if (seg_mvs[i][mbmi->ref_frame[1]].as_int == INVALID_MV ||
+              seg_mvs[i][mbmi->ref_frame[0]].as_int == INVALID_MV)
+#endif  // CONFIG_EXT_INTER
+            continue;
+        }
+
+#if CONFIG_DUAL_FILTER
+        (void)run_mv_search;
+#endif
+
+        if (has_second_rf &&
+#if CONFIG_EXT_INTER
+            this_mode == NEW_NEWMV &&
+#else
+            this_mode == NEWMV &&
+#endif  // CONFIG_EXT_INTER
+#if CONFIG_DUAL_FILTER
+            (mbmi->interp_filter[0] == EIGHTTAP_REGULAR || run_mv_search))
+#else
+            (mbmi->interp_filter == EIGHTTAP_REGULAR || run_mv_search))
+#endif
+        {
+          // adjust src pointers
+          mi_buf_shift(x, i);
+          if (cpi->sf.comp_inter_joint_search_thresh <= bsize) {
+            int rate_mv;
+            joint_motion_search(cpi, x, bsize, frame_mv[this_mode], mi_row,
+                                mi_col,
+#if CONFIG_EXT_INTER
+                                bsi->ref_mv, seg_mvs[i][mv_idx],
+#else
+                                seg_mvs[i],
+#endif  // CONFIG_EXT_INTER
+                                &rate_mv, i);
+#if CONFIG_EXT_INTER
+            compound_seg_newmvs[i][0].as_int =
+                frame_mv[this_mode][mbmi->ref_frame[0]].as_int;
+            compound_seg_newmvs[i][1].as_int =
+                frame_mv[this_mode][mbmi->ref_frame[1]].as_int;
+#else
+            seg_mvs[i][mbmi->ref_frame[0]].as_int =
+                frame_mv[this_mode][mbmi->ref_frame[0]].as_int;
+            seg_mvs[i][mbmi->ref_frame[1]].as_int =
+                frame_mv[this_mode][mbmi->ref_frame[1]].as_int;
+#endif  // CONFIG_EXT_INTER
+          }
+          // restore src pointers
+          mi_buf_restore(x, orig_src, orig_pre);
+        }
+
+        bsi->rdstat[i][mode_idx].brate = set_and_cost_bmi_mvs(
+            cpi, x, xd, i, this_mode, mode_mv[this_mode], frame_mv,
+#if CONFIG_EXT_INTER
+            seg_mvs[i][mv_idx], compound_seg_newmvs[i],
+#else
+            seg_mvs[i],
+#endif  // CONFIG_EXT_INTER
+            bsi->ref_mv, x->nmvjointcost, x->mvcost);
+
+        for (ref = 0; ref < 1 + has_second_rf; ++ref) {
+          bsi->rdstat[i][mode_idx].mvs[ref].as_int =
+              mode_mv[this_mode][ref].as_int;
+          if (num_4x4_blocks_wide > 1)
+            bsi->rdstat[i + 1][mode_idx].mvs[ref].as_int =
+                mode_mv[this_mode][ref].as_int;
+          if (num_4x4_blocks_high > 1)
+            bsi->rdstat[i + 2][mode_idx].mvs[ref].as_int =
+                mode_mv[this_mode][ref].as_int;
+#if CONFIG_REF_MV
+          bsi->rdstat[i][mode_idx].pred_mv[ref].as_int =
+              mi->bmi[i].pred_mv_s8[ref].as_int;
+          if (num_4x4_blocks_wide > 1)
+            bsi->rdstat[i + 1][mode_idx].pred_mv[ref].as_int =
+                mi->bmi[i].pred_mv_s8[ref].as_int;
+          if (num_4x4_blocks_high > 1)
+            bsi->rdstat[i + 2][mode_idx].pred_mv[ref].as_int =
+                mi->bmi[i].pred_mv_s8[ref].as_int;
+#endif
+#if CONFIG_EXT_INTER
+          bsi->rdstat[i][mode_idx].ref_mv[ref].as_int =
+              bsi->ref_mv[ref]->as_int;
+          if (num_4x4_blocks_wide > 1)
+            bsi->rdstat[i + 1][mode_idx].ref_mv[ref].as_int =
+                bsi->ref_mv[ref]->as_int;
+          if (num_4x4_blocks_high > 1)
+            bsi->rdstat[i + 2][mode_idx].ref_mv[ref].as_int =
+                bsi->ref_mv[ref]->as_int;
+#endif  // CONFIG_EXT_INTER
+        }
+
+        // Trap vectors that reach beyond the UMV borders
+        if (mv_check_bounds(x, &mode_mv[this_mode][0].as_mv) ||
+            (has_second_rf && mv_check_bounds(x, &mode_mv[this_mode][1].as_mv)))
+          continue;
+
+        if (filter_idx > 0) {
+          BEST_SEG_INFO *ref_bsi = bsi_buf;
+          subpelmv = 0;
+          have_ref = 1;
+
+          for (ref = 0; ref < 1 + has_second_rf; ++ref) {
+            subpelmv |= mv_has_subpel(&mode_mv[this_mode][ref].as_mv);
+#if CONFIG_EXT_INTER
+            if (have_newmv_in_inter_mode(this_mode))
+              have_ref &= ((mode_mv[this_mode][ref].as_int ==
+                            ref_bsi->rdstat[i][mode_idx].mvs[ref].as_int) &&
+                           (bsi->ref_mv[ref]->as_int ==
+                            ref_bsi->rdstat[i][mode_idx].ref_mv[ref].as_int));
+            else
+#endif  // CONFIG_EXT_INTER
+              have_ref &= mode_mv[this_mode][ref].as_int ==
+                          ref_bsi->rdstat[i][mode_idx].mvs[ref].as_int;
+          }
+
+          have_ref &= ref_bsi->rdstat[i][mode_idx].brate > 0;
+
+          if (filter_idx > 1 && !subpelmv && !have_ref) {
+            ref_bsi = bsi_buf + 1;
+            have_ref = 1;
+            for (ref = 0; ref < 1 + has_second_rf; ++ref)
+#if CONFIG_EXT_INTER
+              if (have_newmv_in_inter_mode(this_mode))
+                have_ref &= ((mode_mv[this_mode][ref].as_int ==
+                              ref_bsi->rdstat[i][mode_idx].mvs[ref].as_int) &&
+                             (bsi->ref_mv[ref]->as_int ==
+                              ref_bsi->rdstat[i][mode_idx].ref_mv[ref].as_int));
+              else
+#endif  // CONFIG_EXT_INTER
+                have_ref &= mode_mv[this_mode][ref].as_int ==
+                            ref_bsi->rdstat[i][mode_idx].mvs[ref].as_int;
+
+            have_ref &= ref_bsi->rdstat[i][mode_idx].brate > 0;
+          }
+
+          if (!subpelmv && have_ref &&
+              ref_bsi->rdstat[i][mode_idx].brdcost < INT64_MAX) {
+#if CONFIG_REF_MV
+            bsi->rdstat[i][mode_idx].byrate =
+                ref_bsi->rdstat[i][mode_idx].byrate;
+            bsi->rdstat[i][mode_idx].bdist = ref_bsi->rdstat[i][mode_idx].bdist;
+            bsi->rdstat[i][mode_idx].bsse = ref_bsi->rdstat[i][mode_idx].bsse;
+            bsi->rdstat[i][mode_idx].brate +=
+                ref_bsi->rdstat[i][mode_idx].byrate;
+            bsi->rdstat[i][mode_idx].eobs = ref_bsi->rdstat[i][mode_idx].eobs;
+
+            bsi->rdstat[i][mode_idx].brdcost =
+                RDCOST(x->rdmult, x->rddiv, bsi->rdstat[i][mode_idx].brate,
+                       bsi->rdstat[i][mode_idx].bdist);
+
+            memcpy(bsi->rdstat[i][mode_idx].ta, ref_bsi->rdstat[i][mode_idx].ta,
+                   sizeof(bsi->rdstat[i][mode_idx].ta));
+            memcpy(bsi->rdstat[i][mode_idx].tl, ref_bsi->rdstat[i][mode_idx].tl,
+                   sizeof(bsi->rdstat[i][mode_idx].tl));
+#else
+            memcpy(&bsi->rdstat[i][mode_idx], &ref_bsi->rdstat[i][mode_idx],
+                   sizeof(SEG_RDSTAT));
+#endif
+            if (num_4x4_blocks_wide > 1)
+              bsi->rdstat[i + 1][mode_idx].eobs =
+                  ref_bsi->rdstat[i + 1][mode_idx].eobs;
+            if (num_4x4_blocks_high > 1)
+              bsi->rdstat[i + 2][mode_idx].eobs =
+                  ref_bsi->rdstat[i + 2][mode_idx].eobs;
+
+            if (bsi->rdstat[i][mode_idx].brdcost < best_rd) {
+#if CONFIG_REF_MV
+              // If the NEWMV mode is using the same motion vector as the
+              // NEARESTMV mode, skip the rest rate-distortion calculations
+              // and use the inferred motion vector modes.
+              if (this_mode == NEWMV) {
+                if (has_second_rf) {
+                  if (bsi->rdstat[i][mode_idx].mvs[0].as_int ==
+                          bsi->ref_mv[0]->as_int &&
+                      bsi->rdstat[i][mode_idx].mvs[1].as_int ==
+                          bsi->ref_mv[1]->as_int)
+                    continue;
+                } else {
+                  if (bsi->rdstat[i][mode_idx].mvs[0].as_int ==
+                      bsi->ref_mv[0]->as_int)
+                    continue;
+                }
+              }
+#endif
+              mode_selected = this_mode;
+              best_rd = bsi->rdstat[i][mode_idx].brdcost;
+            }
+            continue;
+          }
+        }
+
+        bsi->rdstat[i][mode_idx].brdcost = encode_inter_mb_segment(
+            cpi, x, bsi->segment_rd - this_segment_rd, i,
+            &bsi->rdstat[i][mode_idx].byrate, &bsi->rdstat[i][mode_idx].bdist,
+            &bsi->rdstat[i][mode_idx].bsse, bsi->rdstat[i][mode_idx].ta,
+            bsi->rdstat[i][mode_idx].tl, idy, idx, mi_row, mi_col);
+
+        if (bsi->rdstat[i][mode_idx].brdcost < INT64_MAX) {
+          bsi->rdstat[i][mode_idx].brdcost +=
+              RDCOST(x->rdmult, x->rddiv, bsi->rdstat[i][mode_idx].brate, 0);
+          bsi->rdstat[i][mode_idx].brate += bsi->rdstat[i][mode_idx].byrate;
+          bsi->rdstat[i][mode_idx].eobs = p->eobs[i];
+          if (num_4x4_blocks_wide > 1)
+            bsi->rdstat[i + 1][mode_idx].eobs = p->eobs[i + 1];
+          if (num_4x4_blocks_high > 1)
+            bsi->rdstat[i + 2][mode_idx].eobs = p->eobs[i + 2];
+        }
+
+        if (bsi->rdstat[i][mode_idx].brdcost < best_rd) {
+#if CONFIG_REF_MV
+          // If the NEWMV mode is using the same motion vector as the
+          // NEARESTMV mode, skip the rest rate-distortion calculations
+          // and use the inferred motion vector modes.
+          if (this_mode == NEWMV) {
+            if (has_second_rf) {
+              if (bsi->rdstat[i][mode_idx].mvs[0].as_int ==
+                      bsi->ref_mv[0]->as_int &&
+                  bsi->rdstat[i][mode_idx].mvs[1].as_int ==
+                      bsi->ref_mv[1]->as_int)
+                continue;
+            } else {
+              if (bsi->rdstat[i][mode_idx].mvs[0].as_int ==
+                  bsi->ref_mv[0]->as_int)
+                continue;
+            }
+          }
+#endif
+          mode_selected = this_mode;
+          best_rd = bsi->rdstat[i][mode_idx].brdcost;
+        }
+      } /*for each 4x4 mode*/
+
+      if (best_rd == INT64_MAX) {
+        int iy, midx;
+        for (iy = i + 1; iy < 4; ++iy)
+#if CONFIG_EXT_INTER
+          for (midx = 0; midx < INTER_MODES + INTER_COMPOUND_MODES; ++midx)
+#else
+          for (midx = 0; midx < INTER_MODES; ++midx)
+#endif  // CONFIG_EXT_INTER
+            bsi->rdstat[iy][midx].brdcost = INT64_MAX;
+        bsi->segment_rd = INT64_MAX;
+        return INT64_MAX;
+      }
+
+      mode_idx = INTER_OFFSET(mode_selected);
+      memcpy(t_above, bsi->rdstat[i][mode_idx].ta, sizeof(t_above));
+      memcpy(t_left, bsi->rdstat[i][mode_idx].tl, sizeof(t_left));
+
+#if CONFIG_EXT_INTER
+      mv_idx = (mode_selected == NEWFROMNEARMV) ? 1 : 0;
+      bsi->ref_mv[0]->as_int = bsi->rdstat[i][mode_idx].ref_mv[0].as_int;
+      if (has_second_rf)
+        bsi->ref_mv[1]->as_int = bsi->rdstat[i][mode_idx].ref_mv[1].as_int;
+#endif  // CONFIG_EXT_INTER
+      set_and_cost_bmi_mvs(cpi, x, xd, i, mode_selected, mode_mv[mode_selected],
+                           frame_mv,
+#if CONFIG_EXT_INTER
+                           seg_mvs[i][mv_idx], compound_seg_newmvs[i],
+#else
+                           seg_mvs[i],
+#endif  // CONFIG_EXT_INTER
+                           bsi->ref_mv, x->nmvjointcost, x->mvcost);
+
+      br += bsi->rdstat[i][mode_idx].brate;
+      bd += bsi->rdstat[i][mode_idx].bdist;
+      block_sse += bsi->rdstat[i][mode_idx].bsse;
+      segmentyrate += bsi->rdstat[i][mode_idx].byrate;
+      this_segment_rd += bsi->rdstat[i][mode_idx].brdcost;
+
+      if (this_segment_rd > bsi->segment_rd) {
+        int iy, midx;
+        for (iy = i + 1; iy < 4; ++iy)
+#if CONFIG_EXT_INTER
+          for (midx = 0; midx < INTER_MODES + INTER_COMPOUND_MODES; ++midx)
+#else
+          for (midx = 0; midx < INTER_MODES; ++midx)
+#endif  // CONFIG_EXT_INTER
+            bsi->rdstat[iy][midx].brdcost = INT64_MAX;
+        bsi->segment_rd = INT64_MAX;
+        return INT64_MAX;
+      }
+    }
+  } /* for each label */
+
+  bsi->r = br;
+  bsi->d = bd;
+  bsi->segment_yrate = segmentyrate;
+  bsi->segment_rd = this_segment_rd;
+  bsi->sse = block_sse;
+
+  // update the coding decisions
+  for (k = 0; k < 4; ++k) bsi->modes[k] = mi->bmi[k].as_mode;
+
+  if (bsi->segment_rd > best_rd) return INT64_MAX;
+  /* set it to the best */
+  for (idx = 0; idx < 4; idx++) {
+    mode_idx = INTER_OFFSET(bsi->modes[idx]);
+    mi->bmi[idx].as_mv[0].as_int = bsi->rdstat[idx][mode_idx].mvs[0].as_int;
+    if (has_second_ref(mbmi))
+      mi->bmi[idx].as_mv[1].as_int = bsi->rdstat[idx][mode_idx].mvs[1].as_int;
+#if CONFIG_REF_MV
+    mi->bmi[idx].pred_mv_s8[0] = bsi->rdstat[idx][mode_idx].pred_mv[0];
+    if (has_second_ref(mbmi))
+      mi->bmi[idx].pred_mv_s8[1] = bsi->rdstat[idx][mode_idx].pred_mv[1];
+#endif
+#if CONFIG_EXT_INTER
+    mi->bmi[idx].ref_mv[0].as_int = bsi->rdstat[idx][mode_idx].ref_mv[0].as_int;
+    if (has_second_rf)
+      mi->bmi[idx].ref_mv[1].as_int =
+          bsi->rdstat[idx][mode_idx].ref_mv[1].as_int;
+#endif  // CONFIG_EXT_INTER
+    x->plane[0].eobs[idx] = bsi->rdstat[idx][mode_idx].eobs;
+    mi->bmi[idx].as_mode = bsi->modes[idx];
+  }
+
+  /*
+   * used to set mbmi->mv.as_int
+   */
+  *returntotrate = bsi->r;
+  *returndistortion = bsi->d;
+  *returnyrate = bsi->segment_yrate;
+  *skippable = vp10_is_skippable_in_plane(x, BLOCK_8X8, 0);
+  *psse = bsi->sse;
+  mbmi->mode = bsi->modes[3];
+
+  return bsi->segment_rd;
+}
+
+static void estimate_ref_frame_costs(const VP10_COMMON *cm,
+                                     const MACROBLOCKD *xd, int segment_id,
+                                     unsigned int *ref_costs_single,
+                                     unsigned int *ref_costs_comp,
+                                     vpx_prob *comp_mode_p) {
+  int seg_ref_active =
+      segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME);
+  if (seg_ref_active) {
+    memset(ref_costs_single, 0,
+           TOTAL_REFS_PER_FRAME * sizeof(*ref_costs_single));
+    memset(ref_costs_comp, 0, TOTAL_REFS_PER_FRAME * sizeof(*ref_costs_comp));
+    *comp_mode_p = 128;
+  } else {
+    vpx_prob intra_inter_p = vp10_get_intra_inter_prob(cm, xd);
+    vpx_prob comp_inter_p = 128;
+
+    if (cm->reference_mode == REFERENCE_MODE_SELECT) {
+      comp_inter_p = vp10_get_reference_mode_prob(cm, xd);
+      *comp_mode_p = comp_inter_p;
+    } else {
+      *comp_mode_p = 128;
+    }
+
+    ref_costs_single[INTRA_FRAME] = vp10_cost_bit(intra_inter_p, 0);
+
+    if (cm->reference_mode != COMPOUND_REFERENCE) {
+      vpx_prob ref_single_p1 = vp10_get_pred_prob_single_ref_p1(cm, xd);
+      vpx_prob ref_single_p2 = vp10_get_pred_prob_single_ref_p2(cm, xd);
+#if CONFIG_EXT_REFS
+      vpx_prob ref_single_p3 = vp10_get_pred_prob_single_ref_p3(cm, xd);
+      vpx_prob ref_single_p4 = vp10_get_pred_prob_single_ref_p4(cm, xd);
+      vpx_prob ref_single_p5 = vp10_get_pred_prob_single_ref_p5(cm, xd);
+#endif  // CONFIG_EXT_REFS
+
+      unsigned int base_cost = vp10_cost_bit(intra_inter_p, 1);
+
+      ref_costs_single[LAST_FRAME] =
+#if CONFIG_EXT_REFS
+          ref_costs_single[LAST2_FRAME] = ref_costs_single[LAST3_FRAME] =
+              ref_costs_single[BWDREF_FRAME] =
+#endif  // CONFIG_EXT_REFS
+                  ref_costs_single[GOLDEN_FRAME] =
+                      ref_costs_single[ALTREF_FRAME] = base_cost;
+
+#if CONFIG_EXT_REFS
+      ref_costs_single[LAST_FRAME] += vp10_cost_bit(ref_single_p1, 0);
+      ref_costs_single[LAST2_FRAME] += vp10_cost_bit(ref_single_p1, 0);
+      ref_costs_single[LAST3_FRAME] += vp10_cost_bit(ref_single_p1, 0);
+      ref_costs_single[GOLDEN_FRAME] += vp10_cost_bit(ref_single_p1, 0);
+      ref_costs_single[BWDREF_FRAME] += vp10_cost_bit(ref_single_p1, 1);
+      ref_costs_single[ALTREF_FRAME] += vp10_cost_bit(ref_single_p1, 1);
+
+      ref_costs_single[LAST_FRAME] += vp10_cost_bit(ref_single_p3, 0);
+      ref_costs_single[LAST2_FRAME] += vp10_cost_bit(ref_single_p3, 0);
+      ref_costs_single[LAST3_FRAME] += vp10_cost_bit(ref_single_p3, 1);
+      ref_costs_single[GOLDEN_FRAME] += vp10_cost_bit(ref_single_p3, 1);
+
+      ref_costs_single[BWDREF_FRAME] += vp10_cost_bit(ref_single_p2, 0);
+      ref_costs_single[ALTREF_FRAME] += vp10_cost_bit(ref_single_p2, 1);
+
+      ref_costs_single[LAST_FRAME] += vp10_cost_bit(ref_single_p4, 0);
+      ref_costs_single[LAST2_FRAME] += vp10_cost_bit(ref_single_p4, 1);
+
+      ref_costs_single[LAST3_FRAME] += vp10_cost_bit(ref_single_p5, 0);
+      ref_costs_single[GOLDEN_FRAME] += vp10_cost_bit(ref_single_p5, 1);
+#else
+      ref_costs_single[LAST_FRAME] += vp10_cost_bit(ref_single_p1, 0);
+      ref_costs_single[GOLDEN_FRAME] += vp10_cost_bit(ref_single_p1, 1);
+      ref_costs_single[ALTREF_FRAME] += vp10_cost_bit(ref_single_p1, 1);
+
+      ref_costs_single[GOLDEN_FRAME] += vp10_cost_bit(ref_single_p2, 0);
+      ref_costs_single[ALTREF_FRAME] += vp10_cost_bit(ref_single_p2, 1);
+#endif  // CONFIG_EXT_REFS
+    } else {
+      ref_costs_single[LAST_FRAME] = 512;
+#if CONFIG_EXT_REFS
+      ref_costs_single[LAST2_FRAME] = 512;
+      ref_costs_single[LAST3_FRAME] = 512;
+      ref_costs_single[BWDREF_FRAME] = 512;
+#endif  // CONFIG_EXT_REFS
+      ref_costs_single[GOLDEN_FRAME] = 512;
+      ref_costs_single[ALTREF_FRAME] = 512;
+    }
+
+    if (cm->reference_mode != SINGLE_REFERENCE) {
+      vpx_prob ref_comp_p = vp10_get_pred_prob_comp_ref_p(cm, xd);
+#if CONFIG_EXT_REFS
+      vpx_prob ref_comp_p1 = vp10_get_pred_prob_comp_ref_p1(cm, xd);
+      vpx_prob ref_comp_p2 = vp10_get_pred_prob_comp_ref_p2(cm, xd);
+      vpx_prob bwdref_comp_p = vp10_get_pred_prob_comp_bwdref_p(cm, xd);
+#endif  // CONFIG_EXT_REFS
+
+      unsigned int base_cost = vp10_cost_bit(intra_inter_p, 1);
+
+      ref_costs_comp[LAST_FRAME] =
+#if CONFIG_EXT_REFS
+          ref_costs_comp[LAST2_FRAME] = ref_costs_comp[LAST3_FRAME] =
+#endif  // CONFIG_EXT_REFS
+              ref_costs_comp[GOLDEN_FRAME] = base_cost;
+
+#if CONFIG_EXT_REFS
+      ref_costs_comp[BWDREF_FRAME] = ref_costs_comp[ALTREF_FRAME] = 0;
+#endif  // CONFIG_EXT_REFS
+
+#if CONFIG_EXT_REFS
+      ref_costs_comp[LAST_FRAME] += vp10_cost_bit(ref_comp_p, 0);
+      ref_costs_comp[LAST2_FRAME] += vp10_cost_bit(ref_comp_p, 0);
+      ref_costs_comp[LAST3_FRAME] += vp10_cost_bit(ref_comp_p, 1);
+      ref_costs_comp[GOLDEN_FRAME] += vp10_cost_bit(ref_comp_p, 1);
+
+      ref_costs_comp[LAST_FRAME] += vp10_cost_bit(ref_comp_p1, 1);
+      ref_costs_comp[LAST2_FRAME] += vp10_cost_bit(ref_comp_p1, 0);
+
+      ref_costs_comp[LAST3_FRAME] += vp10_cost_bit(ref_comp_p2, 0);
+      ref_costs_comp[GOLDEN_FRAME] += vp10_cost_bit(ref_comp_p2, 1);
+
+      // NOTE(zoeliu): BWDREF and ALTREF each add an extra cost by coding 1
+      //               more bit.
+      ref_costs_comp[BWDREF_FRAME] += vp10_cost_bit(bwdref_comp_p, 0);
+      ref_costs_comp[ALTREF_FRAME] += vp10_cost_bit(bwdref_comp_p, 1);
+#else
+      ref_costs_comp[LAST_FRAME] += vp10_cost_bit(ref_comp_p, 0);
+      ref_costs_comp[GOLDEN_FRAME] += vp10_cost_bit(ref_comp_p, 1);
+#endif  // CONFIG_EXT_REFS
+    } else {
+      ref_costs_comp[LAST_FRAME] = 512;
+#if CONFIG_EXT_REFS
+      ref_costs_comp[LAST2_FRAME] = 512;
+      ref_costs_comp[LAST3_FRAME] = 512;
+      ref_costs_comp[BWDREF_FRAME] = 512;
+      ref_costs_comp[ALTREF_FRAME] = 512;
+#endif  // CONFIG_EXT_REFS
+      ref_costs_comp[GOLDEN_FRAME] = 512;
+    }
+  }
+}
+
+static void store_coding_context(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx,
+                                 int mode_index,
+                                 int64_t comp_pred_diff[REFERENCE_MODES],
+                                 int skippable) {
+  MACROBLOCKD *const xd = &x->e_mbd;
+
+  // Take a snapshot of the coding context so it can be
+  // restored if we decide to encode this way
+  ctx->skip = x->skip;
+  ctx->skippable = skippable;
+  ctx->best_mode_index = mode_index;
+  ctx->mic = *xd->mi[0];
+  ctx->mbmi_ext = *x->mbmi_ext;
+  ctx->single_pred_diff = (int)comp_pred_diff[SINGLE_REFERENCE];
+  ctx->comp_pred_diff = (int)comp_pred_diff[COMPOUND_REFERENCE];
+  ctx->hybrid_pred_diff = (int)comp_pred_diff[REFERENCE_MODE_SELECT];
+}
+
+static void setup_buffer_inter(
+    VP10_COMP *cpi, MACROBLOCK *x, MV_REFERENCE_FRAME ref_frame,
+    BLOCK_SIZE block_size, int mi_row, int mi_col,
+    int_mv frame_nearest_mv[TOTAL_REFS_PER_FRAME],
+    int_mv frame_near_mv[TOTAL_REFS_PER_FRAME],
+    struct buf_2d yv12_mb[TOTAL_REFS_PER_FRAME][MAX_MB_PLANE]) {
+  const VP10_COMMON *cm = &cpi->common;
+  const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, ref_frame);
+  MACROBLOCKD *const xd = &x->e_mbd;
+  MODE_INFO *const mi = xd->mi[0];
+  int_mv *const candidates = x->mbmi_ext->ref_mvs[ref_frame];
+  const struct scale_factors *const sf = &cm->frame_refs[ref_frame - 1].sf;
+  MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
+
+  assert(yv12 != NULL);
+
+  // TODO(jkoleszar): Is the UV buffer ever used here? If so, need to make this
+  // use the UV scaling factors.
+  vp10_setup_pred_block(xd, yv12_mb[ref_frame], yv12, mi_row, mi_col, sf, sf);
+
+  // Gets an initial list of candidate vectors from neighbours and orders them
+  vp10_find_mv_refs(
+      cm, xd, mi, ref_frame,
+#if CONFIG_REF_MV
+      &mbmi_ext->ref_mv_count[ref_frame], mbmi_ext->ref_mv_stack[ref_frame],
+#if CONFIG_EXT_INTER
+      mbmi_ext->compound_mode_context,
+#endif  // CONFIG_EXT_INTER
+#endif
+      candidates, mi_row, mi_col, NULL, NULL, mbmi_ext->mode_context);
+
+  // Candidate refinement carried out at encoder and decoder
+  vp10_find_best_ref_mvs(cm->allow_high_precision_mv, candidates,
+                         &frame_nearest_mv[ref_frame],
+                         &frame_near_mv[ref_frame]);
+
+  // Further refinement that is encode side only to test the top few candidates
+  // in full and choose the best as the centre point for subsequent searches.
+  // The current implementation doesn't support scaling.
+  if (!vp10_is_scaled(sf) && block_size >= BLOCK_8X8)
+    vp10_mv_pred(cpi, x, yv12_mb[ref_frame][0].buf, yv12->y_stride, ref_frame,
+                 block_size);
+}
+
+static void single_motion_search(VP10_COMP *cpi, MACROBLOCK *x,
+                                 BLOCK_SIZE bsize, int mi_row, int mi_col,
+#if CONFIG_EXT_INTER
+                                 int ref_idx, int mv_idx,
+#endif  // CONFIG_EXT_INTER
+                                 int *rate_mv) {
+  MACROBLOCKD *xd = &x->e_mbd;
+  const VP10_COMMON *cm = &cpi->common;
+  MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
+  struct buf_2d backup_yv12[MAX_MB_PLANE] = { { 0, 0, 0, 0, 0 } };
+  int bestsme = INT_MAX;
+  int step_param;
+  int sadpb = x->sadperbit16;
+  MV mvp_full;
+#if CONFIG_EXT_INTER
+  int ref = mbmi->ref_frame[ref_idx];
+  MV ref_mv = x->mbmi_ext->ref_mvs[ref][mv_idx].as_mv;
+#else
+  int ref = mbmi->ref_frame[0];
+  MV ref_mv = x->mbmi_ext->ref_mvs[ref][0].as_mv;
+  int ref_idx = 0;
+#endif  // CONFIG_EXT_INTER
+
+  int tmp_col_min = x->mv_col_min;
+  int tmp_col_max = x->mv_col_max;
+  int tmp_row_min = x->mv_row_min;
+  int tmp_row_max = x->mv_row_max;
+  int cost_list[5];
+
+  const YV12_BUFFER_CONFIG *scaled_ref_frame =
+      vp10_get_scaled_ref_frame(cpi, ref);
+
+  MV pred_mv[3];
+  pred_mv[0] = x->mbmi_ext->ref_mvs[ref][0].as_mv;
+  pred_mv[1] = x->mbmi_ext->ref_mvs[ref][1].as_mv;
+  pred_mv[2] = x->pred_mv[ref];
+
+#if CONFIG_REF_MV
+  vp10_set_mvcost(x, ref);
+#endif
+
+  if (scaled_ref_frame) {
+    int i;
+    // Swap out the reference frame for a version that's been scaled to
+    // match the resolution of the current frame, allowing the existing
+    // motion search code to be used without additional modifications.
+    for (i = 0; i < MAX_MB_PLANE; i++)
+      backup_yv12[i] = xd->plane[i].pre[ref_idx];
+
+    vp10_setup_pre_planes(xd, ref_idx, scaled_ref_frame, mi_row, mi_col, NULL);
+  }
+
+  // Work out the size of the first step in the mv step search.
+  // 0 here is maximum length first step. 1 is VPXMAX >> 1 etc.
+  if (cpi->sf.mv.auto_mv_step_size && cm->show_frame) {
+    // Take wtd average of the step_params based on the last frame's
+    // max mv magnitude and that based on the best ref mvs of the current
+    // block for the given reference.
+    step_param =
+        (vp10_init_search_range(x->max_mv_context[ref]) + cpi->mv_step_param) /
+        2;
+  } else {
+    step_param = cpi->mv_step_param;
+  }
+
+  if (cpi->sf.adaptive_motion_search && bsize < cm->sb_size) {
+    int boffset =
+        2 * (b_width_log2_lookup[cm->sb_size] -
+             VPXMIN(b_height_log2_lookup[bsize], b_width_log2_lookup[bsize]));
+    step_param = VPXMAX(step_param, boffset);
+  }
+
+  if (cpi->sf.adaptive_motion_search) {
+    int bwl = b_width_log2_lookup[bsize];
+    int bhl = b_height_log2_lookup[bsize];
+    int tlevel = x->pred_mv_sad[ref] >> (bwl + bhl + 4);
+
+    if (tlevel < 5) step_param += 2;
+
+    // prev_mv_sad is not setup for dynamically scaled frames.
+    if (cpi->oxcf.resize_mode != RESIZE_DYNAMIC) {
+      int i;
+      for (i = LAST_FRAME; i <= ALTREF_FRAME && cm->show_frame; ++i) {
+        if ((x->pred_mv_sad[ref] >> 3) > x->pred_mv_sad[i]) {
+          x->pred_mv[ref].row = 0;
+          x->pred_mv[ref].col = 0;
+          x->best_mv.as_int = INVALID_MV;
+
+          if (scaled_ref_frame) {
+            int i;
+            for (i = 0; i < MAX_MB_PLANE; ++i)
+              xd->plane[i].pre[ref_idx] = backup_yv12[i];
+          }
+          return;
+        }
+      }
+    }
+  }
+
+  vp10_set_mv_search_range(x, &ref_mv);
+
+  mvp_full = pred_mv[x->mv_best_ref_index[ref]];
+
+  mvp_full.col >>= 3;
+  mvp_full.row >>= 3;
+
+  x->best_mv.as_int = x->second_best_mv.as_int = INVALID_MV;
+
+  bestsme = vp10_full_pixel_search(cpi, x, bsize, &mvp_full, step_param, sadpb,
+                                   cond_cost_list(cpi, cost_list), &ref_mv,
+                                   INT_MAX, 1);
+
+  x->mv_col_min = tmp_col_min;
+  x->mv_col_max = tmp_col_max;
+  x->mv_row_min = tmp_row_min;
+  x->mv_row_max = tmp_row_max;
+
+  if (bestsme < INT_MAX) {
+    int dis; /* TODO: use dis in distortion calculation later. */
+    if (cpi->sf.use_upsampled_references) {
+      int best_mv_var;
+      const int try_second = x->second_best_mv.as_int != INVALID_MV &&
+                             x->second_best_mv.as_int != x->best_mv.as_int;
+      const int pw = 4 * num_4x4_blocks_wide_lookup[bsize];
+      const int ph = 4 * num_4x4_blocks_high_lookup[bsize];
+      // Use up-sampled reference frames.
+      struct macroblockd_plane *const pd = &xd->plane[0];
+      struct buf_2d backup_pred = pd->pre[ref_idx];
+      const YV12_BUFFER_CONFIG *upsampled_ref = get_upsampled_ref(cpi, ref);
+
+      // Set pred for Y plane
+      setup_pred_plane(&pd->pre[ref_idx], upsampled_ref->y_buffer,
+                       upsampled_ref->y_crop_width,
+                       upsampled_ref->y_crop_height, upsampled_ref->y_stride,
+                       (mi_row << 3), (mi_col << 3), NULL, pd->subsampling_x,
+                       pd->subsampling_y);
+
+      best_mv_var = cpi->find_fractional_mv_step(
+          x, &ref_mv, cm->allow_high_precision_mv, x->errorperbit,
+          &cpi->fn_ptr[bsize], cpi->sf.mv.subpel_force_stop,
+          cpi->sf.mv.subpel_iters_per_step, cond_cost_list(cpi, cost_list),
+          x->nmvjointcost, x->mvcost, &dis, &x->pred_sse[ref], NULL, pw, ph, 1);
+
+      if (try_second) {
+        const int minc = VPXMAX(x->mv_col_min * 8, ref_mv.col - MV_MAX);
+        const int maxc = VPXMIN(x->mv_col_max * 8, ref_mv.col + MV_MAX);
+        const int minr = VPXMAX(x->mv_row_min * 8, ref_mv.row - MV_MAX);
+        const int maxr = VPXMIN(x->mv_row_max * 8, ref_mv.row + MV_MAX);
+        int this_var;
+        MV best_mv = x->best_mv.as_mv;
+
+        x->best_mv = x->second_best_mv;
+        if (x->best_mv.as_mv.row * 8 <= maxr &&
+            x->best_mv.as_mv.row * 8 >= minr &&
+            x->best_mv.as_mv.col * 8 <= maxc &&
+            x->best_mv.as_mv.col * 8 >= minc) {
+          this_var = cpi->find_fractional_mv_step(
+              x, &ref_mv, cm->allow_high_precision_mv, x->errorperbit,
+              &cpi->fn_ptr[bsize], cpi->sf.mv.subpel_force_stop,
+              cpi->sf.mv.subpel_iters_per_step, cond_cost_list(cpi, cost_list),
+              x->nmvjointcost, x->mvcost, &dis, &x->pred_sse[ref], NULL, pw, ph,
+              1);
+          if (this_var < best_mv_var) best_mv = x->best_mv.as_mv;
+          x->best_mv.as_mv = best_mv;
+        }
+      }
+
+      // Restore the reference frames.
+      pd->pre[ref_idx] = backup_pred;
+    } else {
+      cpi->find_fractional_mv_step(
+          x, &ref_mv, cm->allow_high_precision_mv, x->errorperbit,
+          &cpi->fn_ptr[bsize], cpi->sf.mv.subpel_force_stop,
+          cpi->sf.mv.subpel_iters_per_step, cond_cost_list(cpi, cost_list),
+          x->nmvjointcost, x->mvcost, &dis, &x->pred_sse[ref], NULL, 0, 0, 0);
+    }
+  }
+  *rate_mv = vp10_mv_bit_cost(&x->best_mv.as_mv, &ref_mv, x->nmvjointcost,
+                              x->mvcost, MV_COST_WEIGHT);
+
+  if (cpi->sf.adaptive_motion_search) x->pred_mv[ref] = x->best_mv.as_mv;
+
+  if (scaled_ref_frame) {
+    int i;
+    for (i = 0; i < MAX_MB_PLANE; i++)
+      xd->plane[i].pre[ref_idx] = backup_yv12[i];
+  }
+}
+
+static INLINE void restore_dst_buf(MACROBLOCKD *xd,
+                                   uint8_t *orig_dst[MAX_MB_PLANE],
+                                   int orig_dst_stride[MAX_MB_PLANE]) {
+  int i;
+  for (i = 0; i < MAX_MB_PLANE; i++) {
+    xd->plane[i].dst.buf = orig_dst[i];
+    xd->plane[i].dst.stride = orig_dst_stride[i];
+  }
+}
+
+#if CONFIG_OBMC
+static void single_motion_search_obmc(VP10_COMP *cpi, MACROBLOCK *x,
+                                      BLOCK_SIZE bsize, int mi_row, int mi_col,
+                                      const int32_t *wsrc, const int32_t *mask,
+#if CONFIG_EXT_INTER
+                                      int ref_idx, int mv_idx,
+#endif  // CONFIG_EXT_INTER
+                                      int_mv *tmp_mv, int_mv pred_mv,
+                                      int *rate_mv) {
+  MACROBLOCKD *xd = &x->e_mbd;
+  const VP10_COMMON *cm = &cpi->common;
+  MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
+  struct buf_2d backup_yv12[MAX_MB_PLANE] = { { 0, 0, 0, 0, 0 } };
+  int bestsme = INT_MAX;
+  int step_param;
+  int sadpb = x->sadperbit16;
+  MV mvp_full;
+#if CONFIG_EXT_INTER
+  int ref = mbmi->ref_frame[ref_idx];
+  MV ref_mv = x->mbmi_ext->ref_mvs[ref][mv_idx].as_mv;
+#else
+  int ref = mbmi->ref_frame[0];
+  MV ref_mv = x->mbmi_ext->ref_mvs[ref][0].as_mv;
+  int ref_idx = 0;
+#endif  // CONFIG_EXT_INTER
+
+  int tmp_col_min = x->mv_col_min;
+  int tmp_col_max = x->mv_col_max;
+  int tmp_row_min = x->mv_row_min;
+  int tmp_row_max = x->mv_row_max;
+
+  const YV12_BUFFER_CONFIG *scaled_ref_frame =
+      vp10_get_scaled_ref_frame(cpi, ref);
+
+#if CONFIG_REF_MV
+  vp10_set_mvcost(x, ref);
+#endif
+
+  if (scaled_ref_frame) {
+    int i;
+    // Swap out the reference frame for a version that's been scaled to
+    // match the resolution of the current frame, allowing the existing
+    // motion search code to be used without additional modifications.
+    for (i = 0; i < MAX_MB_PLANE; i++)
+      backup_yv12[i] = xd->plane[i].pre[ref_idx];
+
+    vp10_setup_pre_planes(xd, ref_idx, scaled_ref_frame, mi_row, mi_col, NULL);
+  }
+
+  // Work out the size of the first step in the mv step search.
+  // 0 here is maximum length first step. 1 is VPXMAX >> 1 etc.
+  if (cpi->sf.mv.auto_mv_step_size && cm->show_frame) {
+    // Take wtd average of the step_params based on the last frame's
+    // max mv magnitude and that based on the best ref mvs of the current
+    // block for the given reference.
+    step_param =
+        (vp10_init_search_range(x->max_mv_context[ref]) + cpi->mv_step_param) /
+        2;
+  } else {
+    step_param = cpi->mv_step_param;
+  }
+
+  if (cpi->sf.adaptive_motion_search && bsize < cm->sb_size) {
+    int boffset =
+        2 * (b_width_log2_lookup[cm->sb_size] -
+             VPXMIN(b_height_log2_lookup[bsize], b_width_log2_lookup[bsize]));
+    step_param = VPXMAX(step_param, boffset);
+  }
+
+  if (cpi->sf.adaptive_motion_search) {
+    int bwl = b_width_log2_lookup[bsize];
+    int bhl = b_height_log2_lookup[bsize];
+    int tlevel = x->pred_mv_sad[ref] >> (bwl + bhl + 4);
+
+    if (tlevel < 5) step_param += 2;
+
+    // prev_mv_sad is not setup for dynamically scaled frames.
+    if (cpi->oxcf.resize_mode != RESIZE_DYNAMIC) {
+      int i;
+      for (i = LAST_FRAME; i <= ALTREF_FRAME && cm->show_frame; ++i) {
+        if ((x->pred_mv_sad[ref] >> 3) > x->pred_mv_sad[i]) {
+          x->pred_mv[ref].row = 0;
+          x->pred_mv[ref].col = 0;
+          tmp_mv->as_int = INVALID_MV;
+
+          if (scaled_ref_frame) {
+            int i;
+            for (i = 0; i < MAX_MB_PLANE; ++i)
+              xd->plane[i].pre[ref_idx] = backup_yv12[i];
+          }
+          return;
+        }
+      }
+    }
+  }
+
+  vp10_set_mv_search_range(x, &ref_mv);
+
+  mvp_full = pred_mv.as_mv;
+  mvp_full.col >>= 3;
+  mvp_full.row >>= 3;
+
+  bestsme = vp10_obmc_full_pixel_diamond(
+      cpi, x, wsrc, mask, &mvp_full, step_param, sadpb,
+      MAX_MVSEARCH_STEPS - 1 - step_param, 1, &cpi->fn_ptr[bsize], &ref_mv,
+      &tmp_mv->as_mv, ref_idx);
+
+  x->mv_col_min = tmp_col_min;
+  x->mv_col_max = tmp_col_max;
+  x->mv_row_min = tmp_row_min;
+  x->mv_row_max = tmp_row_max;
+
+  if (bestsme < INT_MAX) {
+    int dis;
+    vp10_find_best_obmc_sub_pixel_tree_up(
+        cpi, x, wsrc, mask, mi_row, mi_col, &tmp_mv->as_mv, &ref_mv,
+        cm->allow_high_precision_mv, x->errorperbit, &cpi->fn_ptr[bsize],
+        cpi->sf.mv.subpel_force_stop, cpi->sf.mv.subpel_iters_per_step,
+        x->nmvjointcost, x->mvcost, &dis, &x->pred_sse[ref], ref_idx,
+        cpi->sf.use_upsampled_references);
+  }
+  *rate_mv = vp10_mv_bit_cost(&tmp_mv->as_mv, &ref_mv, x->nmvjointcost,
+                              x->mvcost, MV_COST_WEIGHT);
+
+  if (scaled_ref_frame) {
+    int i;
+    for (i = 0; i < MAX_MB_PLANE; i++)
+      xd->plane[i].pre[ref_idx] = backup_yv12[i];
+  }
+}
+#endif  // CONFIG_OBMC
+
+#if CONFIG_EXT_INTER
+static void do_masked_motion_search(VP10_COMP *cpi, MACROBLOCK *x,
+                                    const uint8_t *mask, int mask_stride,
+                                    BLOCK_SIZE bsize, int mi_row, int mi_col,
+                                    int_mv *tmp_mv, int *rate_mv, int ref_idx,
+                                    int mv_idx) {
+  MACROBLOCKD *xd = &x->e_mbd;
+  const VP10_COMMON *cm = &cpi->common;
+  MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
+  struct buf_2d backup_yv12[MAX_MB_PLANE] = { { 0, 0, 0, 0, 0 } };
+  int bestsme = INT_MAX;
+  int step_param;
+  int sadpb = x->sadperbit16;
+  MV mvp_full;
+  int ref = mbmi->ref_frame[ref_idx];
+  MV ref_mv = x->mbmi_ext->ref_mvs[ref][mv_idx].as_mv;
+
+  int tmp_col_min = x->mv_col_min;
+  int tmp_col_max = x->mv_col_max;
+  int tmp_row_min = x->mv_row_min;
+  int tmp_row_max = x->mv_row_max;
+
+  const YV12_BUFFER_CONFIG *scaled_ref_frame =
+      vp10_get_scaled_ref_frame(cpi, ref);
+
+  MV pred_mv[3];
+  pred_mv[0] = x->mbmi_ext->ref_mvs[ref][0].as_mv;
+  pred_mv[1] = x->mbmi_ext->ref_mvs[ref][1].as_mv;
+  pred_mv[2] = x->pred_mv[ref];
+
+#if CONFIG_REF_MV
+  vp10_set_mvcost(x, ref);
+#endif
+
+  if (scaled_ref_frame) {
+    int i;
+    // Swap out the reference frame for a version that's been scaled to
+    // match the resolution of the current frame, allowing the existing
+    // motion search code to be used without additional modifications.
+    for (i = 0; i < MAX_MB_PLANE; i++)
+      backup_yv12[i] = xd->plane[i].pre[ref_idx];
+
+    vp10_setup_pre_planes(xd, ref_idx, scaled_ref_frame, mi_row, mi_col, NULL);
+  }
+
+  vp10_set_mv_search_range(x, &ref_mv);
+
+  // Work out the size of the first step in the mv step search.
+  // 0 here is maximum length first step. 1 is MAX >> 1 etc.
+  if (cpi->sf.mv.auto_mv_step_size && cm->show_frame) {
+    // Take wtd average of the step_params based on the last frame's
+    // max mv magnitude and that based on the best ref mvs of the current
+    // block for the given reference.
+    step_param =
+        (vp10_init_search_range(x->max_mv_context[ref]) + cpi->mv_step_param) /
+        2;
+  } else {
+    step_param = cpi->mv_step_param;
+  }
+
+  // TODO(debargha): is show_frame needed here?
+  if (cpi->sf.adaptive_motion_search && bsize < cm->sb_size && cm->show_frame) {
+    int boffset =
+        2 * (b_width_log2_lookup[cm->sb_size] -
+             VPXMIN(b_height_log2_lookup[bsize], b_width_log2_lookup[bsize]));
+    step_param = VPXMAX(step_param, boffset);
+  }
+
+  if (cpi->sf.adaptive_motion_search) {
+    int bwl = b_width_log2_lookup[bsize];
+    int bhl = b_height_log2_lookup[bsize];
+    int tlevel = x->pred_mv_sad[ref] >> (bwl + bhl + 4);
+
+    if (tlevel < 5) step_param += 2;
+
+    // prev_mv_sad is not setup for dynamically scaled frames.
+    if (cpi->oxcf.resize_mode != RESIZE_DYNAMIC) {
+      int i;
+      for (i = LAST_FRAME; i <= ALTREF_FRAME && cm->show_frame; ++i) {
+        if ((x->pred_mv_sad[ref] >> 3) > x->pred_mv_sad[i]) {
+          x->pred_mv[ref].row = 0;
+          x->pred_mv[ref].col = 0;
+          tmp_mv->as_int = INVALID_MV;
+
+          if (scaled_ref_frame) {
+            int i;
+            for (i = 0; i < MAX_MB_PLANE; ++i)
+              xd->plane[i].pre[ref_idx] = backup_yv12[i];
+          }
+          return;
+        }
+      }
+    }
+  }
+
+  mvp_full = pred_mv[x->mv_best_ref_index[ref]];
+
+  mvp_full.col >>= 3;
+  mvp_full.row >>= 3;
+
+  bestsme = vp10_masked_full_pixel_diamond(
+      cpi, x, mask, mask_stride, &mvp_full, step_param, sadpb,
+      MAX_MVSEARCH_STEPS - 1 - step_param, 1, &cpi->fn_ptr[bsize], &ref_mv,
+      &tmp_mv->as_mv, ref_idx);
+
+  x->mv_col_min = tmp_col_min;
+  x->mv_col_max = tmp_col_max;
+  x->mv_row_min = tmp_row_min;
+  x->mv_row_max = tmp_row_max;
+
+  if (bestsme < INT_MAX) {
+    int dis; /* TODO: use dis in distortion calculation later. */
+    vp10_find_best_masked_sub_pixel_tree_up(
+        cpi, x, mask, mask_stride, mi_row, mi_col, &tmp_mv->as_mv, &ref_mv,
+        cm->allow_high_precision_mv, x->errorperbit, &cpi->fn_ptr[bsize],
+        cpi->sf.mv.subpel_force_stop, cpi->sf.mv.subpel_iters_per_step,
+        x->nmvjointcost, x->mvcost, &dis, &x->pred_sse[ref], ref_idx,
+        cpi->sf.use_upsampled_references);
+  }
+  *rate_mv = vp10_mv_bit_cost(&tmp_mv->as_mv, &ref_mv, x->nmvjointcost,
+                              x->mvcost, MV_COST_WEIGHT);
+
+  if (cpi->sf.adaptive_motion_search && cm->show_frame)
+    x->pred_mv[ref] = tmp_mv->as_mv;
+
+  if (scaled_ref_frame) {
+    int i;
+    for (i = 0; i < MAX_MB_PLANE; i++)
+      xd->plane[i].pre[ref_idx] = backup_yv12[i];
+  }
+}
+
+static void do_masked_motion_search_indexed(VP10_COMP *cpi, MACROBLOCK *x,
+                                            int wedge_index, int wedge_sign,
+                                            BLOCK_SIZE bsize, int mi_row,
+                                            int mi_col, int_mv *tmp_mv,
+                                            int *rate_mv, int mv_idx[2],
+                                            int which) {
+  // NOTE: which values: 0 - 0 only, 1 - 1 only, 2 - both
+  MACROBLOCKD *xd = &x->e_mbd;
+  MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
+  BLOCK_SIZE sb_type = mbmi->sb_type;
+  const uint8_t *mask;
+  const int mask_stride = 4 * num_4x4_blocks_wide_lookup[bsize];
+  mask = vp10_get_contiguous_soft_mask(wedge_index, wedge_sign, sb_type);
+
+  if (which == 0 || which == 2)
+    do_masked_motion_search(cpi, x, mask, mask_stride, bsize, mi_row, mi_col,
+                            &tmp_mv[0], &rate_mv[0], 0, mv_idx[0]);
+
+  if (which == 1 || which == 2) {
+    // get the negative mask
+    mask = vp10_get_contiguous_soft_mask(wedge_index, !wedge_sign, sb_type);
+    do_masked_motion_search(cpi, x, mask, mask_stride, bsize, mi_row, mi_col,
+                            &tmp_mv[1], &rate_mv[1], 1, mv_idx[1]);
+  }
+}
+#endif  // CONFIG_EXT_INTER
+
+// In some situations we want to discount tha pparent cost of a new motion
+// vector. Where there is a subtle motion field and especially where there is
+// low spatial complexity then it can be hard to cover the cost of a new motion
+// vector in a single block, even if that motion vector reduces distortion.
+// However, once established that vector may be usable through the nearest and
+// near mv modes to reduce distortion in subsequent blocks and also improve
+// visual quality.
+static int discount_newmv_test(const VP10_COMP *cpi, int this_mode,
+                               int_mv this_mv,
+                               int_mv (*mode_mv)[TOTAL_REFS_PER_FRAME],
+                               int ref_frame) {
+  return (!cpi->rc.is_src_frame_alt_ref && (this_mode == NEWMV) &&
+          (this_mv.as_int != 0) &&
+          ((mode_mv[NEARESTMV][ref_frame].as_int == 0) ||
+           (mode_mv[NEARESTMV][ref_frame].as_int == INVALID_MV)) &&
+          ((mode_mv[NEARMV][ref_frame].as_int == 0) ||
+           (mode_mv[NEARMV][ref_frame].as_int == INVALID_MV)));
+}
+
+#define LEFT_TOP_MARGIN ((VPX_ENC_BORDER_IN_PIXELS - VPX_INTERP_EXTEND) << 3)
+#define RIGHT_BOTTOM_MARGIN \
+  ((VPX_ENC_BORDER_IN_PIXELS - VPX_INTERP_EXTEND) << 3)
+
+// TODO(jingning): this mv clamping function should be block size dependent.
+static INLINE void clamp_mv2(MV *mv, const MACROBLOCKD *xd) {
+  clamp_mv(mv, xd->mb_to_left_edge - LEFT_TOP_MARGIN,
+           xd->mb_to_right_edge + RIGHT_BOTTOM_MARGIN,
+           xd->mb_to_top_edge - LEFT_TOP_MARGIN,
+           xd->mb_to_bottom_edge + RIGHT_BOTTOM_MARGIN);
+}
+
+#if CONFIG_EXT_INTER
+static int estimate_wedge_sign(const VP10_COMP *cpi, const MACROBLOCK *x,
+                               const BLOCK_SIZE bsize, const uint8_t *pred0,
+                               int stride0, const uint8_t *pred1, int stride1) {
+  const struct macroblock_plane *const p = &x->plane[0];
+  const uint8_t *src = p->src.buf;
+  int src_stride = p->src.stride;
+  const int f_index = bsize - BLOCK_8X8;
+  const int bw = 4 << (b_width_log2_lookup[bsize]);
+  const int bh = 4 << (b_height_log2_lookup[bsize]);
+  uint32_t esq[2][4], var;
+  int64_t tl, br;
+
+#if CONFIG_VP9_HIGHBITDEPTH
+  if (x->e_mbd.cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+    pred0 = CONVERT_TO_BYTEPTR(pred0);
+    pred1 = CONVERT_TO_BYTEPTR(pred1);
+  }
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+  var = cpi->fn_ptr[f_index].vf(src, src_stride, pred0, stride0, &esq[0][0]);
+  var = cpi->fn_ptr[f_index].vf(src + bw / 2, src_stride, pred0 + bw / 2,
+                                stride0, &esq[0][1]);
+  var = cpi->fn_ptr[f_index].vf(src + bh / 2 * src_stride, src_stride,
+                                pred0 + bh / 2 * stride0, stride0, &esq[0][2]);
+  var = cpi->fn_ptr[f_index].vf(src + bh / 2 * src_stride + bw / 2, src_stride,
+                                pred0 + bh / 2 * stride0 + bw / 2, stride0,
+                                &esq[0][3]);
+  var = cpi->fn_ptr[f_index].vf(src, src_stride, pred1, stride1, &esq[1][0]);
+  var = cpi->fn_ptr[f_index].vf(src + bw / 2, src_stride, pred1 + bw / 2,
+                                stride1, &esq[1][1]);
+  var = cpi->fn_ptr[f_index].vf(src + bh / 2 * src_stride, src_stride,
+                                pred1 + bh / 2 * stride1, stride0, &esq[1][2]);
+  var = cpi->fn_ptr[f_index].vf(src + bh / 2 * src_stride + bw / 2, src_stride,
+                                pred1 + bh / 2 * stride1 + bw / 2, stride0,
+                                &esq[1][3]);
+  (void)var;
+
+  tl = (int64_t)(esq[0][0] + esq[0][1] + esq[0][2]) -
+       (int64_t)(esq[1][0] + esq[1][1] + esq[1][2]);
+  br = (int64_t)(esq[1][3] + esq[1][1] + esq[1][2]) -
+       (int64_t)(esq[0][3] + esq[0][1] + esq[0][2]);
+  return (tl + br > 0);
+}
+#endif  // CONFIG_EXT_INTER
+
+#if !CONFIG_DUAL_FILTER
+static INTERP_FILTER predict_interp_filter(
+    const VP10_COMP *cpi, const MACROBLOCK *x, const BLOCK_SIZE bsize,
+    const int mi_row, const int mi_col,
+    INTERP_FILTER (*single_filter)[TOTAL_REFS_PER_FRAME]) {
+  INTERP_FILTER best_filter = SWITCHABLE;
+  const VP10_COMMON *cm = &cpi->common;
+  const MACROBLOCKD *xd = &x->e_mbd;
+  int bsl = mi_width_log2_lookup[bsize];
+  int pred_filter_search =
+      cpi->sf.cb_pred_filter_search
+          ? (((mi_row + mi_col) >> bsl) +
+             get_chessboard_index(cm->current_video_frame)) &
+                0x1
+          : 0;
+  MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
+  const int is_comp_pred = has_second_ref(mbmi);
+  const int this_mode = mbmi->mode;
+  int refs[2] = { mbmi->ref_frame[0],
+                  (mbmi->ref_frame[1] < 0 ? 0 : mbmi->ref_frame[1]) };
+#if CONFIG_DUAL_FILTER
+  (void)pred_filter_search;
+  return SWITCHABLE;
+#else
+  if (pred_filter_search) {
+    INTERP_FILTER af = SWITCHABLE, lf = SWITCHABLE;
+    if (xd->up_available) af = xd->mi[-xd->mi_stride]->mbmi.interp_filter;
+    if (xd->left_available) lf = xd->mi[-1]->mbmi.interp_filter;
+
+#if CONFIG_EXT_INTER
+    if ((this_mode != NEWMV && this_mode != NEWFROMNEARMV &&
+         this_mode != NEW_NEWMV) ||
+        (af == lf))
+#else
+    if ((this_mode != NEWMV) || (af == lf))
+#endif  // CONFIG_EXT_INTER
+      best_filter = af;
+  }
+#endif
+  if (is_comp_pred) {
+    if (cpi->sf.adaptive_mode_search) {
+#if CONFIG_EXT_INTER
+      switch (this_mode) {
+        case NEAREST_NEARESTMV:
+          if (single_filter[NEARESTMV][refs[0]] ==
+              single_filter[NEARESTMV][refs[1]])
+            best_filter = single_filter[NEARESTMV][refs[0]];
+          break;
+        case NEAREST_NEARMV:
+          if (single_filter[NEARESTMV][refs[0]] ==
+              single_filter[NEARMV][refs[1]])
+            best_filter = single_filter[NEARESTMV][refs[0]];
+          break;
+        case NEAR_NEARESTMV:
+          if (single_filter[NEARMV][refs[0]] ==
+              single_filter[NEARESTMV][refs[1]])
+            best_filter = single_filter[NEARMV][refs[0]];
+          break;
+        case NEAR_NEARMV:
+          if (single_filter[NEARMV][refs[0]] == single_filter[NEARMV][refs[1]])
+            best_filter = single_filter[NEARMV][refs[0]];
+          break;
+        case ZERO_ZEROMV:
+          if (single_filter[ZEROMV][refs[0]] == single_filter[ZEROMV][refs[1]])
+            best_filter = single_filter[ZEROMV][refs[0]];
+          break;
+        case NEW_NEWMV:
+          if (single_filter[NEWMV][refs[0]] == single_filter[NEWMV][refs[1]])
+            best_filter = single_filter[NEWMV][refs[0]];
+          break;
+        case NEAREST_NEWMV:
+          if (single_filter[NEARESTMV][refs[0]] ==
+              single_filter[NEWMV][refs[1]])
+            best_filter = single_filter[NEARESTMV][refs[0]];
+          break;
+        case NEAR_NEWMV:
+          if (single_filter[NEARMV][refs[0]] == single_filter[NEWMV][refs[1]])
+            best_filter = single_filter[NEARMV][refs[0]];
+          break;
+        case NEW_NEARESTMV:
+          if (single_filter[NEWMV][refs[0]] ==
+              single_filter[NEARESTMV][refs[1]])
+            best_filter = single_filter[NEWMV][refs[0]];
+          break;
+        case NEW_NEARMV:
+          if (single_filter[NEWMV][refs[0]] == single_filter[NEARMV][refs[1]])
+            best_filter = single_filter[NEWMV][refs[0]];
+          break;
+        default:
+          if (single_filter[this_mode][refs[0]] ==
+              single_filter[this_mode][refs[1]])
+            best_filter = single_filter[this_mode][refs[0]];
+          break;
+      }
+#else
+      if (single_filter[this_mode][refs[0]] ==
+          single_filter[this_mode][refs[1]])
+        best_filter = single_filter[this_mode][refs[0]];
+#endif  // CONFIG_EXT_INTER
+    }
+  }
+  if (cm->interp_filter != BILINEAR) {
+    if (x->source_variance < cpi->sf.disable_filter_search_var_thresh) {
+      best_filter = EIGHTTAP_REGULAR;
+    }
+#if CONFIG_EXT_INTERP
+    else if (!vp10_is_interp_needed(xd) && cm->interp_filter == SWITCHABLE) {
+      best_filter = EIGHTTAP_REGULAR;
+    }
+#endif
+  }
+  return best_filter;
+}
+#endif
+
+#if CONFIG_EXT_INTER
+// Choose the best wedge index and sign
+static int64_t pick_wedge(const VP10_COMP *const cpi, const MACROBLOCK *const x,
+                          const BLOCK_SIZE bsize, const uint8_t *const p0,
+                          const uint8_t *const p1, int *const best_wedge_sign,
+                          int *const best_wedge_index) {
+  const MACROBLOCKD *const xd = &x->e_mbd;
+  const struct buf_2d *const src = &x->plane[0].src;
+  const int bw = 4 * num_4x4_blocks_wide_lookup[bsize];
+  const int bh = 4 * num_4x4_blocks_high_lookup[bsize];
+  const int N = bw * bh;
+  int rate;
+  int64_t dist;
+  int64_t rd, best_rd = INT64_MAX;
+  int wedge_index;
+  int wedge_sign;
+  int wedge_types = (1 << get_wedge_bits_lookup(bsize));
+  const uint8_t *mask;
+  uint64_t sse;
+#if CONFIG_VP9_HIGHBITDEPTH
+  const int hbd = xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH;
+  const int bd_round = hbd ? (xd->bd - 8) * 2 : 0;
+#else
+  const int bd_round = 0;
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+  DECLARE_ALIGNED(32, int16_t, r0[MAX_SB_SQUARE]);
+  DECLARE_ALIGNED(32, int16_t, r1[MAX_SB_SQUARE]);
+  DECLARE_ALIGNED(32, int16_t, d10[MAX_SB_SQUARE]);
+  DECLARE_ALIGNED(32, int16_t, ds[MAX_SB_SQUARE]);
+
+  int64_t sign_limit;
+
+#if CONFIG_VP9_HIGHBITDEPTH
+  if (hbd) {
+    vpx_highbd_subtract_block(bh, bw, r0, bw, src->buf, src->stride,
+                              CONVERT_TO_BYTEPTR(p0), bw, xd->bd);
+    vpx_highbd_subtract_block(bh, bw, r1, bw, src->buf, src->stride,
+                              CONVERT_TO_BYTEPTR(p1), bw, xd->bd);
+    vpx_highbd_subtract_block(bh, bw, d10, bw, CONVERT_TO_BYTEPTR(p1), bw,
+                              CONVERT_TO_BYTEPTR(p0), bw, xd->bd);
+  } else  // NOLINT
+#endif    // CONFIG_VP9_HIGHBITDEPTH
+  {
+    vpx_subtract_block(bh, bw, r0, bw, src->buf, src->stride, p0, bw);
+    vpx_subtract_block(bh, bw, r1, bw, src->buf, src->stride, p1, bw);
+    vpx_subtract_block(bh, bw, d10, bw, p1, bw, p0, bw);
+  }
+
+  sign_limit = ((int64_t)vpx_sum_squares_i16(r0, N) -
+                (int64_t)vpx_sum_squares_i16(r1, N)) *
+               (1 << WEDGE_WEIGHT_BITS) / 2;
+
+  vp10_wedge_compute_delta_squares(ds, r0, r1, N);
+
+  for (wedge_index = 0; wedge_index < wedge_types; ++wedge_index) {
+    mask = vp10_get_contiguous_soft_mask(wedge_index, 0, bsize);
+    wedge_sign = vp10_wedge_sign_from_residuals(ds, mask, N, sign_limit);
+
+    mask = vp10_get_contiguous_soft_mask(wedge_index, wedge_sign, bsize);
+    sse = vp10_wedge_sse_from_residuals(r1, d10, mask, N);
+    sse = ROUND_POWER_OF_TWO(sse, bd_round);
+
+    model_rd_from_sse(cpi, xd, bsize, 0, sse, &rate, &dist);
+    rd = RDCOST(x->rdmult, x->rddiv, rate, dist);
+
+    if (rd < best_rd) {
+      *best_wedge_index = wedge_index;
+      *best_wedge_sign = wedge_sign;
+      best_rd = rd;
+    }
+  }
+
+  return best_rd;
+}
+
+// Choose the best wedge index the specified sign
+static int64_t pick_wedge_fixed_sign(
+    const VP10_COMP *const cpi, const MACROBLOCK *const x,
+    const BLOCK_SIZE bsize, const uint8_t *const p0, const uint8_t *const p1,
+    const int wedge_sign, int *const best_wedge_index) {
+  const MACROBLOCKD *const xd = &x->e_mbd;
+  const struct buf_2d *const src = &x->plane[0].src;
+  const int bw = 4 * num_4x4_blocks_wide_lookup[bsize];
+  const int bh = 4 * num_4x4_blocks_high_lookup[bsize];
+  const int N = bw * bh;
+  int rate;
+  int64_t dist;
+  int64_t rd, best_rd = INT64_MAX;
+  int wedge_index;
+  int wedge_types = (1 << get_wedge_bits_lookup(bsize));
+  const uint8_t *mask;
+  uint64_t sse;
+#if CONFIG_VP9_HIGHBITDEPTH
+  const int hbd = xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH;
+  const int bd_round = hbd ? (xd->bd - 8) * 2 : 0;
+#else
+  const int bd_round = 0;
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+  DECLARE_ALIGNED(32, int16_t, r1[MAX_SB_SQUARE]);
+  DECLARE_ALIGNED(32, int16_t, d10[MAX_SB_SQUARE]);
+
+#if CONFIG_VP9_HIGHBITDEPTH
+  if (hbd) {
+    vpx_highbd_subtract_block(bh, bw, r1, bw, src->buf, src->stride,
+                              CONVERT_TO_BYTEPTR(p1), bw, xd->bd);
+    vpx_highbd_subtract_block(bh, bw, d10, bw, CONVERT_TO_BYTEPTR(p1), bw,
+                              CONVERT_TO_BYTEPTR(p0), bw, xd->bd);
+  } else  // NOLINT
+#endif    // CONFIG_VP9_HIGHBITDEPTH
+  {
+    vpx_subtract_block(bh, bw, r1, bw, src->buf, src->stride, p1, bw);
+    vpx_subtract_block(bh, bw, d10, bw, p1, bw, p0, bw);
+  }
+
+  for (wedge_index = 0; wedge_index < wedge_types; ++wedge_index) {
+    mask = vp10_get_contiguous_soft_mask(wedge_index, wedge_sign, bsize);
+    sse = vp10_wedge_sse_from_residuals(r1, d10, mask, N);
+    sse = ROUND_POWER_OF_TWO(sse, bd_round);
+
+    model_rd_from_sse(cpi, xd, bsize, 0, sse, &rate, &dist);
+    rd = RDCOST(x->rdmult, x->rddiv, rate, dist);
+
+    if (rd < best_rd) {
+      *best_wedge_index = wedge_index;
+      best_rd = rd;
+    }
+  }
+
+  return best_rd;
+}
+
+static int64_t pick_interinter_wedge(const VP10_COMP *const cpi,
+                                     const MACROBLOCK *const x,
+                                     const BLOCK_SIZE bsize,
+                                     const uint8_t *const p0,
+                                     const uint8_t *const p1) {
+  const MACROBLOCKD *const xd = &x->e_mbd;
+  MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
+  const int bw = 4 * num_4x4_blocks_wide_lookup[bsize];
+
+  int64_t rd;
+  int wedge_index = -1;
+  int wedge_sign = 0;
+
+  assert(is_interinter_wedge_used(bsize));
+
+  if (cpi->sf.fast_wedge_sign_estimate) {
+    wedge_sign = estimate_wedge_sign(cpi, x, bsize, p0, bw, p1, bw);
+    rd = pick_wedge_fixed_sign(cpi, x, bsize, p0, p1, wedge_sign, &wedge_index);
+  } else {
+    rd = pick_wedge(cpi, x, bsize, p0, p1, &wedge_sign, &wedge_index);
+  }
+
+  mbmi->interinter_wedge_sign = wedge_sign;
+  mbmi->interinter_wedge_index = wedge_index;
+  return rd;
+}
+
+static int64_t pick_interintra_wedge(const VP10_COMP *const cpi,
+                                     const MACROBLOCK *const x,
+                                     const BLOCK_SIZE bsize,
+                                     const uint8_t *const p0,
+                                     const uint8_t *const p1) {
+  const MACROBLOCKD *const xd = &x->e_mbd;
+  MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
+
+  int64_t rd;
+  int wedge_index = -1;
+
+  assert(is_interintra_wedge_used(bsize));
+
+  rd = pick_wedge_fixed_sign(cpi, x, bsize, p0, p1, 0, &wedge_index);
+
+  mbmi->interintra_wedge_sign = 0;
+  mbmi->interintra_wedge_index = wedge_index;
+  return rd;
+}
+#endif  // CONFIG_EXT_INTER
+
+static int64_t handle_inter_mode(
+    VP10_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize, int *rate2,
+    int64_t *distortion, int *skippable, int *rate_y, int *rate_uv,
+    int *disable_skip, int_mv (*mode_mv)[TOTAL_REFS_PER_FRAME], int mi_row,
+    int mi_col,
+#if CONFIG_OBMC
+    uint8_t *dst_buf1[3], int dst_stride1[3], uint8_t *dst_buf2[3],
+    int dst_stride2[3], const int32_t *const wsrc, const int32_t *const mask2d,
+#endif  // CONFIG_OBMC
+#if CONFIG_EXT_INTER
+    int_mv single_newmvs[2][TOTAL_REFS_PER_FRAME],
+    int single_newmvs_rate[2][TOTAL_REFS_PER_FRAME],
+    int *compmode_interintra_cost, int *compmode_wedge_cost,
+    int64_t (*const modelled_rd)[TOTAL_REFS_PER_FRAME],
+#else
+    int_mv single_newmv[TOTAL_REFS_PER_FRAME],
+#endif  // CONFIG_EXT_INTER
+    INTERP_FILTER (*single_filter)[TOTAL_REFS_PER_FRAME],
+    int (*single_skippable)[TOTAL_REFS_PER_FRAME], int64_t *psse,
+    const int64_t ref_best_rd) {
+  VP10_COMMON *cm = &cpi->common;
+  MACROBLOCKD *xd = &x->e_mbd;
+  MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
+  MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
+  const int is_comp_pred = has_second_ref(mbmi);
+  const int this_mode = mbmi->mode;
+  int_mv *frame_mv = mode_mv[this_mode];
+  int i;
+  int refs[2] = { mbmi->ref_frame[0],
+                  (mbmi->ref_frame[1] < 0 ? 0 : mbmi->ref_frame[1]) };
+  int_mv cur_mv[2];
+  int rate_mv = 0;
+#if CONFIG_EXT_INTER
+  const int bw = 4 * num_4x4_blocks_wide_lookup[bsize];
+  int mv_idx = (this_mode == NEWFROMNEARMV) ? 1 : 0;
+  int_mv single_newmv[TOTAL_REFS_PER_FRAME];
+  const unsigned int *const interintra_mode_cost =
+      cpi->interintra_mode_cost[size_group_lookup[bsize]];
+  const int is_comp_interintra_pred = (mbmi->ref_frame[1] == INTRA_FRAME);
+#if CONFIG_REF_MV
+  uint8_t ref_frame_type = vp10_ref_frame_type(mbmi->ref_frame);
+#endif
+#endif  // CONFIG_EXT_INTER
+#if CONFIG_VP9_HIGHBITDEPTH
+  DECLARE_ALIGNED(16, uint8_t, tmp_buf_[2 * MAX_MB_PLANE * MAX_SB_SQUARE]);
+#else
+  DECLARE_ALIGNED(16, uint8_t, tmp_buf_[MAX_MB_PLANE * MAX_SB_SQUARE]);
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+  uint8_t *tmp_buf;
+
+#if CONFIG_OBMC || CONFIG_WARPED_MOTION
+  int allow_motvar =
+#if CONFIG_EXT_INTER
+      !is_comp_interintra_pred &&
+#endif  // CONFIG_EXT_INTER
+      is_motvar_allowed(mbmi);
+  int rate2_nocoeff = 0, best_rate2 = INT_MAX, best_skippable, best_xskip,
+      best_disable_skip = 0;
+  int best_rate_y, best_rate_uv;
+#if CONFIG_VAR_TX
+  uint8_t best_blk_skip[MAX_MB_PLANE][MAX_MIB_SIZE * MAX_MIB_SIZE * 4];
+#endif  // CONFIG_VAR_TX
+  int64_t best_distortion = INT64_MAX;
+  MB_MODE_INFO best_mbmi;
+#if CONFIG_EXT_INTER
+  int rate2_bmc_nocoeff;
+  int rate_mv_bmc;
+  MB_MODE_INFO best_bmc_mbmi;
+#endif  // CONFIG_EXT_INTER
+#endif  // CONFIG_OBMC || CONFIG_WARPED_MOTION
+
+  int pred_exists = 0;
+  int intpel_mv;
+  int64_t rd, tmp_rd, best_rd = INT64_MAX;
+  int best_needs_copy = 0;
+  uint8_t *orig_dst[MAX_MB_PLANE];
+  int orig_dst_stride[MAX_MB_PLANE];
+  int rs = 0;
+#if CONFIG_DUAL_FILTER
+  // Index use case:
+  // {0, 1} -> (vertical, horizontal) filter types for the first ref frame
+  // {2, 3} -> (vertical, horizontal) filter types for the second ref frame
+  INTERP_FILTER best_filter[4] = {
+    SWITCHABLE, SWITCHABLE, SWITCHABLE, SWITCHABLE,
+  };
+#else
+  INTERP_FILTER best_filter = SWITCHABLE;
+#endif
+
+  int skip_txfm_sb = 0;
+  int64_t skip_sse_sb = INT64_MAX;
+  int64_t distortion_y = 0, distortion_uv = 0;
+  int16_t mode_ctx = mbmi_ext->mode_context[refs[0]];
+
+#if CONFIG_EXT_INTER
+  *compmode_interintra_cost = 0;
+  mbmi->use_wedge_interintra = 0;
+  *compmode_wedge_cost = 0;
+  mbmi->use_wedge_interinter = 0;
+
+  // is_comp_interintra_pred implies !is_comp_pred
+  assert(!is_comp_interintra_pred || (!is_comp_pred));
+  // is_comp_interintra_pred implies is_interintra_allowed(mbmi->sb_type)
+  assert(!is_comp_interintra_pred || is_interintra_allowed(mbmi));
+#endif  // CONFIG_EXT_INTER
+
+#if CONFIG_REF_MV
+#if CONFIG_EXT_INTER
+  if (is_comp_pred)
+    mode_ctx = mbmi_ext->compound_mode_context[refs[0]];
+  else
+#endif  // CONFIG_EXT_INTER
+    mode_ctx = vp10_mode_context_analyzer(mbmi_ext->mode_context,
+                                          mbmi->ref_frame, bsize, -1);
+#endif
+
+#if CONFIG_VP9_HIGHBITDEPTH
+  if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
+    tmp_buf = CONVERT_TO_BYTEPTR(tmp_buf_);
+  else
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+    tmp_buf = tmp_buf_;
+
+  if (is_comp_pred) {
+    if (frame_mv[refs[0]].as_int == INVALID_MV ||
+        frame_mv[refs[1]].as_int == INVALID_MV)
+      return INT64_MAX;
+  }
+
+  if (have_newmv_in_inter_mode(this_mode)) {
+    if (is_comp_pred) {
+#if CONFIG_EXT_INTER
+      for (i = 0; i < 2; ++i) {
+        single_newmv[refs[i]].as_int = single_newmvs[mv_idx][refs[i]].as_int;
+      }
+
+      if (this_mode == NEW_NEWMV) {
+        frame_mv[refs[0]].as_int = single_newmv[refs[0]].as_int;
+        frame_mv[refs[1]].as_int = single_newmv[refs[1]].as_int;
+
+        if (cpi->sf.comp_inter_joint_search_thresh <= bsize) {
+          joint_motion_search(cpi, x, bsize, frame_mv, mi_row, mi_col, NULL,
+                              single_newmv, &rate_mv, 0);
+        } else {
+#if CONFIG_REF_MV
+          vp10_set_mvcost(x, mbmi->ref_frame[0]);
+#endif  // CONFIG_REF_MV
+          rate_mv = vp10_mv_bit_cost(
+              &frame_mv[refs[0]].as_mv, &x->mbmi_ext->ref_mvs[refs[0]][0].as_mv,
+              x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
+#if CONFIG_REF_MV
+          vp10_set_mvcost(x, mbmi->ref_frame[1]);
+#endif  // CONFIG_REF_MV
+          rate_mv += vp10_mv_bit_cost(
+              &frame_mv[refs[1]].as_mv, &x->mbmi_ext->ref_mvs[refs[1]][0].as_mv,
+              x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
+        }
+      } else if (this_mode == NEAREST_NEWMV || this_mode == NEAR_NEWMV) {
+        frame_mv[refs[1]].as_int = single_newmv[refs[1]].as_int;
+        rate_mv = vp10_mv_bit_cost(&frame_mv[refs[1]].as_mv,
+                                   &x->mbmi_ext->ref_mvs[refs[1]][0].as_mv,
+                                   x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
+      } else {
+        frame_mv[refs[0]].as_int = single_newmv[refs[0]].as_int;
+        rate_mv = vp10_mv_bit_cost(&frame_mv[refs[0]].as_mv,
+                                   &x->mbmi_ext->ref_mvs[refs[0]][0].as_mv,
+                                   x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
+      }
+#else
+      // Initialize mv using single prediction mode result.
+      frame_mv[refs[0]].as_int = single_newmv[refs[0]].as_int;
+      frame_mv[refs[1]].as_int = single_newmv[refs[1]].as_int;
+
+      if (cpi->sf.comp_inter_joint_search_thresh <= bsize) {
+        joint_motion_search(cpi, x, bsize, frame_mv, mi_row, mi_col,
+                            single_newmv, &rate_mv, 0);
+      } else {
+#if CONFIG_REF_MV
+        vp10_set_mvcost(x, mbmi->ref_frame[0]);
+#endif  // CONFIG_REF_MV
+        rate_mv = vp10_mv_bit_cost(&frame_mv[refs[0]].as_mv,
+                                   &x->mbmi_ext->ref_mvs[refs[0]][0].as_mv,
+                                   x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
+#if CONFIG_REF_MV
+        vp10_set_mvcost(x, mbmi->ref_frame[1]);
+#endif  // CONFIG_REF_MV
+        rate_mv += vp10_mv_bit_cost(&frame_mv[refs[1]].as_mv,
+                                    &x->mbmi_ext->ref_mvs[refs[1]][0].as_mv,
+                                    x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
+      }
+#endif  // CONFIG_EXT_INTER
+    } else {
+#if CONFIG_EXT_INTER
+      if (is_comp_interintra_pred) {
+        x->best_mv = single_newmvs[mv_idx][refs[0]];
+        rate_mv = single_newmvs_rate[mv_idx][refs[0]];
+      } else {
+        single_motion_search(cpi, x, bsize, mi_row, mi_col, 0, mv_idx,
+                             &rate_mv);
+        single_newmvs[mv_idx][refs[0]] = x->best_mv;
+        single_newmvs_rate[mv_idx][refs[0]] = rate_mv;
+      }
+#else
+      single_motion_search(cpi, x, bsize, mi_row, mi_col, &rate_mv);
+      single_newmv[refs[0]] = x->best_mv;
+#endif  // CONFIG_EXT_INTER
+
+      if (x->best_mv.as_int == INVALID_MV) return INT64_MAX;
+
+      frame_mv[refs[0]] = x->best_mv;
+      xd->mi[0]->bmi[0].as_mv[0] = x->best_mv;
+
+      // Estimate the rate implications of a new mv but discount this
+      // under certain circumstances where we want to help initiate a weak
+      // motion field, where the distortion gain for a single block may not
+      // be enough to overcome the cost of a new mv.
+      if (discount_newmv_test(cpi, this_mode, x->best_mv, mode_mv, refs[0])) {
+        rate_mv = VPXMAX((rate_mv / NEW_MV_DISCOUNT_FACTOR), 1);
+      }
+    }
+    *rate2 += rate_mv;
+  }
+
+  for (i = 0; i < is_comp_pred + 1; ++i) {
+    cur_mv[i] = frame_mv[refs[i]];
+// Clip "next_nearest" so that it does not extend to far out of image
+#if CONFIG_EXT_INTER
+    if (this_mode != NEWMV && this_mode != NEWFROMNEARMV)
+#else
+    if (this_mode != NEWMV)
+#endif  // CONFIG_EXT_INTER
+      clamp_mv2(&cur_mv[i].as_mv, xd);
+
+    if (mv_check_bounds(x, &cur_mv[i].as_mv)) return INT64_MAX;
+    mbmi->mv[i].as_int = cur_mv[i].as_int;
+  }
+
+#if CONFIG_REF_MV
+#if CONFIG_EXT_INTER
+  if (this_mode == NEAREST_NEARESTMV) {
+#else
+  if (this_mode == NEARESTMV && is_comp_pred) {
+    uint8_t ref_frame_type = vp10_ref_frame_type(mbmi->ref_frame);
+#endif  // CONFIG_EXT_INTER
+    if (mbmi_ext->ref_mv_count[ref_frame_type] > 0) {
+      cur_mv[0] = mbmi_ext->ref_mv_stack[ref_frame_type][0].this_mv;
+      cur_mv[1] = mbmi_ext->ref_mv_stack[ref_frame_type][0].comp_mv;
+
+      for (i = 0; i < 2; ++i) {
+        clamp_mv2(&cur_mv[i].as_mv, xd);
+        if (mv_check_bounds(x, &cur_mv[i].as_mv)) return INT64_MAX;
+        mbmi->mv[i].as_int = cur_mv[i].as_int;
+      }
+    }
+  }
+
+#if CONFIG_EXT_INTER
+  if (mbmi_ext->ref_mv_count[ref_frame_type] > 0) {
+    if (this_mode == NEAREST_NEWMV || this_mode == NEAREST_NEARMV) {
+      cur_mv[0] = mbmi_ext->ref_mv_stack[ref_frame_type][0].this_mv;
+
+      lower_mv_precision(&cur_mv[0].as_mv, cm->allow_high_precision_mv);
+      clamp_mv2(&cur_mv[0].as_mv, xd);
+      if (mv_check_bounds(x, &cur_mv[0].as_mv)) return INT64_MAX;
+      mbmi->mv[0].as_int = cur_mv[0].as_int;
+    }
+
+    if (this_mode == NEW_NEARESTMV || this_mode == NEAR_NEARESTMV) {
+      cur_mv[1] = mbmi_ext->ref_mv_stack[ref_frame_type][0].comp_mv;
+
+      lower_mv_precision(&cur_mv[1].as_mv, cm->allow_high_precision_mv);
+      clamp_mv2(&cur_mv[1].as_mv, xd);
+      if (mv_check_bounds(x, &cur_mv[1].as_mv)) return INT64_MAX;
+      mbmi->mv[1].as_int = cur_mv[1].as_int;
+    }
+  }
+
+  if (mbmi_ext->ref_mv_count[ref_frame_type] > 1) {
+    if (this_mode == NEAR_NEWMV || this_mode == NEAR_NEARESTMV ||
+        this_mode == NEAR_NEARMV) {
+      cur_mv[0] = mbmi_ext->ref_mv_stack[ref_frame_type][1].this_mv;
+
+      lower_mv_precision(&cur_mv[0].as_mv, cm->allow_high_precision_mv);
+      clamp_mv2(&cur_mv[0].as_mv, xd);
+      if (mv_check_bounds(x, &cur_mv[0].as_mv)) return INT64_MAX;
+      mbmi->mv[0].as_int = cur_mv[0].as_int;
+    }
+
+    if (this_mode == NEW_NEARMV || this_mode == NEAREST_NEARMV ||
+        this_mode == NEAR_NEARMV) {
+      cur_mv[1] = mbmi_ext->ref_mv_stack[ref_frame_type][1].comp_mv;
+
+      lower_mv_precision(&cur_mv[1].as_mv, cm->allow_high_precision_mv);
+      clamp_mv2(&cur_mv[1].as_mv, xd);
+      if (mv_check_bounds(x, &cur_mv[1].as_mv)) return INT64_MAX;
+      mbmi->mv[1].as_int = cur_mv[1].as_int;
+    }
+  }
+#else
+  if (this_mode == NEARMV && is_comp_pred) {
+    uint8_t ref_frame_type = vp10_ref_frame_type(mbmi->ref_frame);
+    if (mbmi_ext->ref_mv_count[ref_frame_type] > 1) {
+      int ref_mv_idx = mbmi->ref_mv_idx + 1;
+      cur_mv[0] = mbmi_ext->ref_mv_stack[ref_frame_type][ref_mv_idx].this_mv;
+      cur_mv[1] = mbmi_ext->ref_mv_stack[ref_frame_type][ref_mv_idx].comp_mv;
+
+      for (i = 0; i < 2; ++i) {
+        clamp_mv2(&cur_mv[i].as_mv, xd);
+        if (mv_check_bounds(x, &cur_mv[i].as_mv)) return INT64_MAX;
+        mbmi->mv[i].as_int = cur_mv[i].as_int;
+      }
+    }
+  }
+#endif  // CONFIG_EXT_INTER
+#endif  // CONFIG_REF_MV
+
+  // do first prediction into the destination buffer. Do the next
+  // prediction into a temporary buffer. Then keep track of which one
+  // of these currently holds the best predictor, and use the other
+  // one for future predictions. In the end, copy from tmp_buf to
+  // dst if necessary.
+  for (i = 0; i < MAX_MB_PLANE; i++) {
+    orig_dst[i] = xd->plane[i].dst.buf;
+    orig_dst_stride[i] = xd->plane[i].dst.stride;
+  }
+
+  // We don't include the cost of the second reference here, because there
+  // are only three options: Last/Golden, ARF/Last or Golden/ARF, or in other
+  // words if you present them in that order, the second one is always known
+  // if the first is known.
+  //
+  // Under some circumstances we discount the cost of new mv mode to encourage
+  // initiation of a motion field.
+  if (discount_newmv_test(cpi, this_mode, frame_mv[refs[0]], mode_mv,
+                          refs[0])) {
+#if CONFIG_REF_MV && CONFIG_EXT_INTER
+    *rate2 += VPXMIN(cost_mv_ref(cpi, this_mode, is_comp_pred, mode_ctx),
+                     cost_mv_ref(cpi, NEARESTMV, is_comp_pred, mode_ctx));
+#else
+    *rate2 += VPXMIN(cost_mv_ref(cpi, this_mode, mode_ctx),
+                     cost_mv_ref(cpi, NEARESTMV, mode_ctx));
+#endif  // CONFIG_REF_MV && CONFIG_EXT_INTER
+  } else {
+#if CONFIG_REF_MV && CONFIG_EXT_INTER
+    *rate2 += cost_mv_ref(cpi, this_mode, is_comp_pred, mode_ctx);
+#else
+    *rate2 += cost_mv_ref(cpi, this_mode, mode_ctx);
+#endif  // CONFIG_REF_MV && CONFIG_EXT_INTER
+  }
+
+  if (RDCOST(x->rdmult, x->rddiv, *rate2, 0) > ref_best_rd &&
+#if CONFIG_EXT_INTER
+      mbmi->mode != NEARESTMV && mbmi->mode != NEAREST_NEARESTMV
+#else
+      mbmi->mode != NEARESTMV
+#endif  // CONFIG_EXT_INTER
+      )
+    return INT64_MAX;
+
+  pred_exists = 0;
+  // Are all MVs integer pel for Y and UV
+  intpel_mv = !mv_has_subpel(&mbmi->mv[0].as_mv);
+  if (is_comp_pred) intpel_mv &= !mv_has_subpel(&mbmi->mv[1].as_mv);
+
+#if !CONFIG_DUAL_FILTER
+  best_filter =
+      predict_interp_filter(cpi, x, bsize, mi_row, mi_col, single_filter);
+#endif
+
+  if (cm->interp_filter != BILINEAR) {
+    int newbest;
+    int tmp_rate_sum = 0;
+    int64_t tmp_dist_sum = 0;
+
+#if CONFIG_DUAL_FILTER
+#if CONFIG_EXT_INTERP
+    for (i = 0; i < 25; ++i) {
+#else
+    for (i = 0; i < 9; ++i) {
+#endif
+#else
+    for (i = 0; i < SWITCHABLE_FILTERS; ++i) {
+#endif
+      int j;
+      int64_t rs_rd;
+      int tmp_skip_sb = 0;
+      int64_t tmp_skip_sse = INT64_MAX;
+
+#if CONFIG_DUAL_FILTER
+      mbmi->interp_filter[0] = filter_sets[i][0];
+      mbmi->interp_filter[1] = filter_sets[i][1];
+      mbmi->interp_filter[2] = filter_sets[i][0];
+      mbmi->interp_filter[3] = filter_sets[i][1];
+#else
+      mbmi->interp_filter = i;
+#endif
+      rs = vp10_get_switchable_rate(cpi, xd);
+      rs_rd = RDCOST(x->rdmult, x->rddiv, rs, 0);
+
+      if (i > 0 && intpel_mv && IsInterpolatingFilter(i)) {
+        rd = RDCOST(x->rdmult, x->rddiv, tmp_rate_sum, tmp_dist_sum);
+        if (cm->interp_filter == SWITCHABLE) rd += rs_rd;
+      } else {
+        int rate_sum = 0;
+        int64_t dist_sum = 0;
+
+        if (i > 0 && cpi->sf.adaptive_interp_filter_search &&
+            (cpi->sf.interp_filter_search_mask & (1 << i))) {
+          rate_sum = INT_MAX;
+          dist_sum = INT64_MAX;
+          continue;
+        }
+
+        if ((cm->interp_filter == SWITCHABLE && (!i || best_needs_copy)) ||
+#if CONFIG_EXT_INTER
+            is_comp_interintra_pred ||
+#endif  // CONFIG_EXT_INTER
+            (cm->interp_filter != SWITCHABLE &&
+             (
+#if CONFIG_DUAL_FILTER
+                 cm->interp_filter == mbmi->interp_filter[0]
+#else
+                 cm->interp_filter == mbmi->interp_filter
+#endif
+                 || (i == 0 && intpel_mv && IsInterpolatingFilter(i))))) {
+          restore_dst_buf(xd, orig_dst, orig_dst_stride);
+        } else {
+          for (j = 0; j < MAX_MB_PLANE; j++) {
+            xd->plane[j].dst.buf = tmp_buf + j * MAX_SB_SQUARE;
+            xd->plane[j].dst.stride = MAX_SB_SIZE;
+          }
+        }
+        vp10_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
+        model_rd_for_sb(cpi, bsize, x, xd, 0, MAX_MB_PLANE - 1, &rate_sum,
+                        &dist_sum, &tmp_skip_sb, &tmp_skip_sse);
+
+        rd = RDCOST(x->rdmult, x->rddiv, rate_sum, dist_sum);
+        if (cm->interp_filter == SWITCHABLE) rd += rs_rd;
+
+        if (i == 0 && intpel_mv && IsInterpolatingFilter(i)) {
+          tmp_rate_sum = rate_sum;
+          tmp_dist_sum = dist_sum;
+        }
+      }
+
+      if (i == 0 && cpi->sf.use_rd_breakout && ref_best_rd < INT64_MAX) {
+        if (rd / 2 > ref_best_rd) {
+          restore_dst_buf(xd, orig_dst, orig_dst_stride);
+          return INT64_MAX;
+        }
+      }
+      newbest = i == 0 || rd < best_rd;
+
+      if (newbest) {
+        best_rd = rd;
+#if CONFIG_DUAL_FILTER
+        best_filter[0] = mbmi->interp_filter[0];
+        best_filter[1] = mbmi->interp_filter[1];
+        best_filter[2] = mbmi->interp_filter[2];
+        best_filter[3] = mbmi->interp_filter[3];
+#else
+        best_filter = mbmi->interp_filter;
+#endif
+        if (cm->interp_filter == SWITCHABLE && i &&
+            !(intpel_mv && IsInterpolatingFilter(i)))
+          best_needs_copy = !best_needs_copy;
+      }
+
+      if ((cm->interp_filter == SWITCHABLE && newbest) ||
+          (cm->interp_filter != SWITCHABLE &&
+#if CONFIG_DUAL_FILTER
+           cm->interp_filter == mbmi->interp_filter[0])) {
+#else
+           cm->interp_filter == mbmi->interp_filter)) {
+#endif
+        pred_exists = 1;
+        tmp_rd = best_rd;
+
+        skip_txfm_sb = tmp_skip_sb;
+        skip_sse_sb = tmp_skip_sse;
+      } else {
+        pred_exists = 0;
+      }
+    }
+    restore_dst_buf(xd, orig_dst, orig_dst_stride);
+  }
+
+// Set the appropriate filter
+#if CONFIG_DUAL_FILTER
+  mbmi->interp_filter[0] =
+      cm->interp_filter != SWITCHABLE ? cm->interp_filter : best_filter[0];
+  mbmi->interp_filter[1] =
+      cm->interp_filter != SWITCHABLE ? cm->interp_filter : best_filter[1];
+  if (mbmi->ref_frame[1] > INTRA_FRAME) {
+    mbmi->interp_filter[2] =
+        cm->interp_filter != SWITCHABLE ? cm->interp_filter : best_filter[2];
+    mbmi->interp_filter[3] =
+        cm->interp_filter != SWITCHABLE ? cm->interp_filter : best_filter[3];
+  }
+#else
+  mbmi->interp_filter =
+      cm->interp_filter != SWITCHABLE ? cm->interp_filter : best_filter;
+#endif
+  rs = cm->interp_filter == SWITCHABLE ? vp10_get_switchable_rate(cpi, xd) : 0;
+
+#if CONFIG_EXT_INTER
+#if CONFIG_OBMC
+  best_bmc_mbmi = *mbmi;
+  rate_mv_bmc = rate_mv;
+  rate2_bmc_nocoeff = *rate2;
+  if (cm->interp_filter == SWITCHABLE) rate2_bmc_nocoeff += rs;
+#endif  // CONFIG_OBMC
+
+  if (is_comp_pred && is_interinter_wedge_used(bsize)) {
+    int rate_sum, rs;
+    int64_t dist_sum;
+    int64_t best_rd_nowedge = INT64_MAX;
+    int64_t best_rd_wedge = INT64_MAX;
+    int tmp_skip_txfm_sb;
+    int64_t tmp_skip_sse_sb;
+
+    rs = vp10_cost_bit(cm->fc->wedge_interinter_prob[bsize], 0);
+    mbmi->use_wedge_interinter = 0;
+    vp10_build_inter_predictors_sby(xd, mi_row, mi_col, bsize);
+    vp10_subtract_plane(x, bsize, 0);
+    rd = estimate_yrd_for_sb(cpi, bsize, x, &rate_sum, &dist_sum,
+                             &tmp_skip_txfm_sb, &tmp_skip_sse_sb, INT64_MAX);
+    if (rd != INT64_MAX)
+      rd = RDCOST(x->rdmult, x->rddiv, rs + rate_mv + rate_sum, dist_sum);
+    best_rd_nowedge = rd;
+
+    // Disbale wedge search if source variance is small
+    if (x->source_variance > cpi->sf.disable_wedge_search_var_thresh &&
+        best_rd_nowedge / 3 < ref_best_rd) {
+      uint8_t pred0[2 * MAX_SB_SQUARE];
+      uint8_t pred1[2 * MAX_SB_SQUARE];
+      uint8_t *preds0[1] = { pred0 };
+      uint8_t *preds1[1] = { pred1 };
+      int strides[1] = { bw };
+
+      mbmi->use_wedge_interinter = 1;
+      rs = vp10_cost_literal(get_interinter_wedge_bits(bsize)) +
+           vp10_cost_bit(cm->fc->wedge_interinter_prob[bsize], 1);
+
+      vp10_build_inter_predictors_for_planes_single_buf(
+          xd, bsize, 0, 0, mi_row, mi_col, 0, preds0, strides);
+      vp10_build_inter_predictors_for_planes_single_buf(
+          xd, bsize, 0, 0, mi_row, mi_col, 1, preds1, strides);
+
+      // Choose the best wedge
+      best_rd_wedge = pick_interinter_wedge(cpi, x, bsize, pred0, pred1);
+      best_rd_wedge += RDCOST(x->rdmult, x->rddiv, rs + rate_mv, 0);
+
+      if (have_newmv_in_inter_mode(this_mode)) {
+        int_mv tmp_mv[2];
+        int rate_mvs[2], tmp_rate_mv = 0;
+        if (this_mode == NEW_NEWMV) {
+          int mv_idxs[2] = { 0, 0 };
+          do_masked_motion_search_indexed(
+              cpi, x, mbmi->interinter_wedge_index, mbmi->interinter_wedge_sign,
+              bsize, mi_row, mi_col, tmp_mv, rate_mvs, mv_idxs, 2);
+          tmp_rate_mv = rate_mvs[0] + rate_mvs[1];
+          mbmi->mv[0].as_int = tmp_mv[0].as_int;
+          mbmi->mv[1].as_int = tmp_mv[1].as_int;
+        } else if (this_mode == NEW_NEARESTMV || this_mode == NEW_NEARMV) {
+          int mv_idxs[2] = { 0, 0 };
+          do_masked_motion_search_indexed(
+              cpi, x, mbmi->interinter_wedge_index, mbmi->interinter_wedge_sign,
+              bsize, mi_row, mi_col, tmp_mv, rate_mvs, mv_idxs, 0);
+          tmp_rate_mv = rate_mvs[0];
+          mbmi->mv[0].as_int = tmp_mv[0].as_int;
+        } else if (this_mode == NEAREST_NEWMV || this_mode == NEAR_NEWMV) {
+          int mv_idxs[2] = { 0, 0 };
+          do_masked_motion_search_indexed(
+              cpi, x, mbmi->interinter_wedge_index, mbmi->interinter_wedge_sign,
+              bsize, mi_row, mi_col, tmp_mv, rate_mvs, mv_idxs, 1);
+          tmp_rate_mv = rate_mvs[1];
+          mbmi->mv[1].as_int = tmp_mv[1].as_int;
+        }
+        vp10_build_inter_predictors_sby(xd, mi_row, mi_col, bsize);
+        model_rd_for_sb(cpi, bsize, x, xd, 0, 0, &rate_sum, &dist_sum,
+                        &tmp_skip_txfm_sb, &tmp_skip_sse_sb);
+        rd = RDCOST(x->rdmult, x->rddiv, rs + tmp_rate_mv + rate_sum, dist_sum);
+        if (rd < best_rd_wedge) {
+          best_rd_wedge = rd;
+        } else {
+          mbmi->mv[0].as_int = cur_mv[0].as_int;
+          mbmi->mv[1].as_int = cur_mv[1].as_int;
+          tmp_rate_mv = rate_mv;
+          vp10_build_wedge_inter_predictor_from_buf(xd, bsize, 0, 0, preds0,
+                                                    strides, preds1, strides);
+        }
+        vp10_subtract_plane(x, bsize, 0);
+        rd =
+            estimate_yrd_for_sb(cpi, bsize, x, &rate_sum, &dist_sum,
+                                &tmp_skip_txfm_sb, &tmp_skip_sse_sb, INT64_MAX);
+        if (rd != INT64_MAX)
+          rd = RDCOST(x->rdmult, x->rddiv, rs + tmp_rate_mv + rate_sum,
+                      dist_sum);
+        best_rd_wedge = rd;
+
+        if (best_rd_wedge < best_rd_nowedge) {
+          mbmi->use_wedge_interinter = 1;
+          xd->mi[0]->bmi[0].as_mv[0].as_int = mbmi->mv[0].as_int;
+          xd->mi[0]->bmi[0].as_mv[1].as_int = mbmi->mv[1].as_int;
+          *rate2 += tmp_rate_mv - rate_mv;
+          rate_mv = tmp_rate_mv;
+        } else {
+          mbmi->use_wedge_interinter = 0;
+          mbmi->mv[0].as_int = cur_mv[0].as_int;
+          mbmi->mv[1].as_int = cur_mv[1].as_int;
+          xd->mi[0]->bmi[0].as_mv[0].as_int = mbmi->mv[0].as_int;
+          xd->mi[0]->bmi[0].as_mv[1].as_int = mbmi->mv[1].as_int;
+        }
+      } else {
+        vp10_build_wedge_inter_predictor_from_buf(xd, bsize, 0, 0, preds0,
+                                                  strides, preds1, strides);
+        vp10_subtract_plane(x, bsize, 0);
+        rd =
+            estimate_yrd_for_sb(cpi, bsize, x, &rate_sum, &dist_sum,
+                                &tmp_skip_txfm_sb, &tmp_skip_sse_sb, INT64_MAX);
+        if (rd != INT64_MAX)
+          rd = RDCOST(x->rdmult, x->rddiv, rs + rate_mv + rate_sum, dist_sum);
+        best_rd_wedge = rd;
+        if (best_rd_wedge < best_rd_nowedge) {
+          mbmi->use_wedge_interinter = 1;
+        } else {
+          mbmi->use_wedge_interinter = 0;
+        }
+      }
+    }
+    if (ref_best_rd < INT64_MAX &&
+        VPXMIN(best_rd_wedge, best_rd_nowedge) / 3 > ref_best_rd)
+      return INT64_MAX;
+
+    pred_exists = 0;
+    tmp_rd = VPXMIN(best_rd_wedge, best_rd_nowedge);
+
+    if (mbmi->use_wedge_interinter)
+      *compmode_wedge_cost =
+          vp10_cost_literal(get_interinter_wedge_bits(bsize)) +
+          vp10_cost_bit(cm->fc->wedge_interinter_prob[bsize], 1);
+    else
+      *compmode_wedge_cost =
+          vp10_cost_bit(cm->fc->wedge_interinter_prob[bsize], 0);
+  }
+
+  if (is_comp_interintra_pred) {
+    INTERINTRA_MODE best_interintra_mode = II_DC_PRED;
+    int64_t best_interintra_rd = INT64_MAX;
+    int rmode, rate_sum;
+    int64_t dist_sum;
+    int j;
+    int64_t best_interintra_rd_nowedge = INT64_MAX;
+    int64_t best_interintra_rd_wedge = INT64_MAX;
+    int rwedge;
+    int_mv tmp_mv;
+    int tmp_rate_mv = 0;
+    int tmp_skip_txfm_sb;
+    int64_t tmp_skip_sse_sb;
+    DECLARE_ALIGNED(16, uint8_t, intrapred_[2 * MAX_SB_SQUARE]);
+    uint8_t *intrapred;
+
+#if CONFIG_VP9_HIGHBITDEPTH
+    if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
+      intrapred = CONVERT_TO_BYTEPTR(intrapred_);
+    else
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+      intrapred = intrapred_;
+
+    mbmi->ref_frame[1] = NONE;
+    for (j = 0; j < MAX_MB_PLANE; j++) {
+      xd->plane[j].dst.buf = tmp_buf + j * MAX_SB_SQUARE;
+      xd->plane[j].dst.stride = bw;
+    }
+    vp10_build_inter_predictors_sby(xd, mi_row, mi_col, bsize);
+    restore_dst_buf(xd, orig_dst, orig_dst_stride);
+    mbmi->ref_frame[1] = INTRA_FRAME;
+    mbmi->use_wedge_interintra = 0;
+
+    for (j = 0; j < INTERINTRA_MODES; ++j) {
+      mbmi->interintra_mode = (INTERINTRA_MODE)j;
+      rmode = interintra_mode_cost[mbmi->interintra_mode];
+      vp10_build_intra_predictors_for_interintra(xd, bsize, 0, intrapred, bw);
+      vp10_combine_interintra(xd, bsize, 0, tmp_buf, bw, intrapred, bw);
+      model_rd_for_sb(cpi, bsize, x, xd, 0, 0, &rate_sum, &dist_sum,
+                      &tmp_skip_txfm_sb, &tmp_skip_sse_sb);
+      rd = RDCOST(x->rdmult, x->rddiv, rs + tmp_rate_mv + rate_sum, dist_sum);
+      if (rd < best_interintra_rd) {
+        best_interintra_rd = rd;
+        best_interintra_mode = mbmi->interintra_mode;
+      }
+    }
+    mbmi->interintra_mode = best_interintra_mode;
+    rmode = interintra_mode_cost[mbmi->interintra_mode];
+    vp10_build_intra_predictors_for_interintra(xd, bsize, 0, intrapred, bw);
+    vp10_combine_interintra(xd, bsize, 0, tmp_buf, bw, intrapred, bw);
+    vp10_subtract_plane(x, bsize, 0);
+    rd = estimate_yrd_for_sb(cpi, bsize, x, &rate_sum, &dist_sum,
+                             &tmp_skip_txfm_sb, &tmp_skip_sse_sb, INT64_MAX);
+    if (rd != INT64_MAX)
+      rd = RDCOST(x->rdmult, x->rddiv, rate_mv + rmode + rate_sum, dist_sum);
+    best_interintra_rd = rd;
+
+    if (ref_best_rd < INT64_MAX && best_interintra_rd > 2 * ref_best_rd) {
+      return INT64_MAX;
+    }
+    if (is_interintra_wedge_used(bsize)) {
+      rwedge = vp10_cost_bit(cm->fc->wedge_interintra_prob[bsize], 0);
+      if (rd != INT64_MAX)
+        rd = RDCOST(x->rdmult, x->rddiv, rmode + rate_mv + rwedge + rate_sum,
+                    dist_sum);
+      best_interintra_rd_nowedge = rd;
+
+      // Disbale wedge search if source variance is small
+      if (x->source_variance > cpi->sf.disable_wedge_search_var_thresh) {
+        mbmi->use_wedge_interintra = 1;
+
+        rwedge = vp10_cost_literal(get_interintra_wedge_bits(bsize)) +
+                 vp10_cost_bit(cm->fc->wedge_interintra_prob[bsize], 1);
+
+        best_interintra_rd_wedge =
+            pick_interintra_wedge(cpi, x, bsize, intrapred_, tmp_buf_);
+
+        best_interintra_rd_wedge +=
+            RDCOST(x->rdmult, x->rddiv, rmode + rate_mv + rwedge, 0);
+        // Refine motion vector.
+        if (have_newmv_in_inter_mode(this_mode)) {
+          // get negative of mask
+          const uint8_t *mask = vp10_get_contiguous_soft_mask(
+              mbmi->interintra_wedge_index, 1, bsize);
+          do_masked_motion_search(cpi, x, mask, bw, bsize, mi_row, mi_col,
+                                  &tmp_mv, &tmp_rate_mv, 0, mv_idx);
+          mbmi->mv[0].as_int = tmp_mv.as_int;
+          vp10_build_inter_predictors_sby(xd, mi_row, mi_col, bsize);
+          model_rd_for_sb(cpi, bsize, x, xd, 0, 0, &rate_sum, &dist_sum,
+                          &tmp_skip_txfm_sb, &tmp_skip_sse_sb);
+          rd = RDCOST(x->rdmult, x->rddiv,
+                      rmode + tmp_rate_mv + rwedge + rate_sum, dist_sum);
+          if (rd < best_interintra_rd_wedge) {
+            best_interintra_rd_wedge = rd;
+          } else {
+            tmp_mv.as_int = cur_mv[0].as_int;
+            tmp_rate_mv = rate_mv;
+          }
+        } else {
+          tmp_mv.as_int = cur_mv[0].as_int;
+          tmp_rate_mv = rate_mv;
+          vp10_combine_interintra(xd, bsize, 0, tmp_buf, bw, intrapred, bw);
+        }
+        // Evaluate closer to true rd
+        vp10_subtract_plane(x, bsize, 0);
+        rd =
+            estimate_yrd_for_sb(cpi, bsize, x, &rate_sum, &dist_sum,
+                                &tmp_skip_txfm_sb, &tmp_skip_sse_sb, INT64_MAX);
+        if (rd != INT64_MAX)
+          rd = RDCOST(x->rdmult, x->rddiv,
+                      rmode + tmp_rate_mv + rwedge + rate_sum, dist_sum);
+        best_interintra_rd_wedge = rd;
+        if (best_interintra_rd_wedge < best_interintra_rd_nowedge) {
+          mbmi->use_wedge_interintra = 1;
+          best_interintra_rd = best_interintra_rd_wedge;
+          mbmi->mv[0].as_int = tmp_mv.as_int;
+          *rate2 += tmp_rate_mv - rate_mv;
+          rate_mv = tmp_rate_mv;
+        } else {
+          mbmi->use_wedge_interintra = 0;
+          best_interintra_rd = best_interintra_rd_nowedge;
+          mbmi->mv[0].as_int = cur_mv[0].as_int;
+        }
+      } else {
+        mbmi->use_wedge_interintra = 0;
+        best_interintra_rd = best_interintra_rd_nowedge;
+      }
+    }
+
+    pred_exists = 0;
+    tmp_rd = best_interintra_rd;
+    *compmode_interintra_cost =
+        vp10_cost_bit(cm->fc->interintra_prob[size_group_lookup[bsize]], 1);
+    *compmode_interintra_cost += interintra_mode_cost[mbmi->interintra_mode];
+    if (is_interintra_wedge_used(bsize)) {
+      *compmode_interintra_cost += vp10_cost_bit(
+          cm->fc->wedge_interintra_prob[bsize], mbmi->use_wedge_interintra);
+      if (mbmi->use_wedge_interintra) {
+        *compmode_interintra_cost +=
+            vp10_cost_literal(get_interintra_wedge_bits(bsize));
+      }
+    }
+  } else if (is_interintra_allowed(mbmi)) {
+    *compmode_interintra_cost =
+        vp10_cost_bit(cm->fc->interintra_prob[size_group_lookup[bsize]], 0);
+  }
+
+#if CONFIG_EXT_INTERP
+  if (!vp10_is_interp_needed(xd) && cm->interp_filter == SWITCHABLE) {
+#if CONFIG_DUAL_FILTER
+    for (i = 0; i < 4; ++i) mbmi->interp_filter[i] = EIGHTTAP_REGULAR;
+#else
+    mbmi->interp_filter = EIGHTTAP_REGULAR;
+#endif
+    pred_exists = 0;
+  }
+#endif  // CONFIG_EXT_INTERP
+#endif  // CONFIG_EXT_INTER
+
+  if (pred_exists) {
+    if (best_needs_copy) {
+      // again temporarily set the buffers to local memory to prevent a memcpy
+      for (i = 0; i < MAX_MB_PLANE; i++) {
+        xd->plane[i].dst.buf = tmp_buf + i * MAX_SB_SQUARE;
+        xd->plane[i].dst.stride = MAX_SB_SIZE;
+      }
+    }
+    rd = tmp_rd;
+  } else {
+    int tmp_rate;
+    int64_t tmp_dist;
+
+    // Handles the special case when a filter that is not in the
+    // switchable list (ex. bilinear) is indicated at the frame level, or
+    // skip condition holds.
+    vp10_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
+    model_rd_for_sb(cpi, bsize, x, xd, 0, MAX_MB_PLANE - 1, &tmp_rate,
+                    &tmp_dist, &skip_txfm_sb, &skip_sse_sb);
+    rd = RDCOST(x->rdmult, x->rddiv, rs + tmp_rate, tmp_dist);
+  }
+
+#if CONFIG_DUAL_FILTER
+  if (!is_comp_pred) single_filter[this_mode][refs[0]] = mbmi->interp_filter[0];
+#else
+  if (!is_comp_pred) single_filter[this_mode][refs[0]] = mbmi->interp_filter;
+#endif
+
+#if CONFIG_EXT_INTER
+  if (modelled_rd != NULL) {
+    if (is_comp_pred) {
+      const int mode0 = compound_ref0_mode(this_mode);
+      const int mode1 = compound_ref1_mode(this_mode);
+      int64_t mrd =
+          VPXMIN(modelled_rd[mode0][refs[0]], modelled_rd[mode1][refs[1]]);
+      if (rd / 4 * 3 > mrd && ref_best_rd < INT64_MAX) {
+        restore_dst_buf(xd, orig_dst, orig_dst_stride);
+        return INT64_MAX;
+      }
+    } else if (!is_comp_interintra_pred) {
+      modelled_rd[this_mode][refs[0]] = rd;
+    }
+  }
+#endif  // CONFIG_EXT_INTER
+
+  if (cpi->sf.use_rd_breakout && ref_best_rd < INT64_MAX) {
+    // if current pred_error modeled rd is substantially more than the best
+    // so far, do not bother doing full rd
+    if (rd / 2 > ref_best_rd) {
+      restore_dst_buf(xd, orig_dst, orig_dst_stride);
+      return INT64_MAX;
+    }
+  }
+
+  if (cm->interp_filter == SWITCHABLE) *rate2 += rs;
+#if CONFIG_OBMC
+  rate2_nocoeff = *rate2;
+#endif  // CONFIG_OBMC
+
+#if CONFIG_OBMC || CONFIG_WARPED_MOTION
+  best_rd = INT64_MAX;
+  for (mbmi->motion_variation = SIMPLE_TRANSLATION;
+       mbmi->motion_variation < (allow_motvar ? MOTION_VARIATIONS : 1);
+       mbmi->motion_variation++) {
+    int64_t tmp_rd;
+#if CONFIG_EXT_INTER
+    int tmp_rate2 = mbmi->motion_variation != SIMPLE_TRANSLATION
+                        ? rate2_bmc_nocoeff
+                        : rate2_nocoeff;
+#else
+    int tmp_rate2 = rate2_nocoeff;
+#endif  // CONFIG_EXT_INTER
+#if CONFIG_EXT_INTERP
+#if CONFIG_DUAL_FILTER
+    INTERP_FILTER obmc_interp_filter[2][2] = {
+      { mbmi->interp_filter[0], mbmi->interp_filter[1] },  // obmc == 0
+      { mbmi->interp_filter[0], mbmi->interp_filter[1] }   // obmc == 1
+    };
+#else
+    INTERP_FILTER obmc_interp_filter[2] = {
+      mbmi->interp_filter,  // obmc == 0
+      mbmi->interp_filter   // obmc == 1
+    };
+#endif  // CONFIG_DUAL_FILTER
+#endif  // CONFIG_EXT_INTERP
+
+#if CONFIG_OBMC
+    int tmp_rate;
+    int64_t tmp_dist;
+    if (mbmi->motion_variation == OBMC_CAUSAL) {
+#if CONFIG_EXT_INTER
+      *mbmi = best_bmc_mbmi;
+      mbmi->motion_variation = OBMC_CAUSAL;
+#endif  // CONFIG_EXT_INTER
+      if (!is_comp_pred && have_newmv_in_inter_mode(this_mode)) {
+        int_mv tmp_mv;
+        int_mv pred_mv;
+        int tmp_rate_mv = 0;
+
+        pred_mv.as_int = mbmi->mv[0].as_int;
+        single_motion_search_obmc(cpi, x, bsize, mi_row, mi_col, wsrc, mask2d,
+#if CONFIG_EXT_INTER
+                                  0, mv_idx,
+#endif  // CONFIG_EXT_INTER
+                                  &tmp_mv, pred_mv, &tmp_rate_mv);
+        mbmi->mv[0].as_int = tmp_mv.as_int;
+        if (discount_newmv_test(cpi, this_mode, tmp_mv, mode_mv, refs[0])) {
+          tmp_rate_mv = VPXMAX((tmp_rate_mv / NEW_MV_DISCOUNT_FACTOR), 1);
+        }
+#if CONFIG_EXT_INTER
+        tmp_rate2 = rate2_bmc_nocoeff - rate_mv_bmc + tmp_rate_mv;
+#else
+        tmp_rate2 = rate2_nocoeff - rate_mv + tmp_rate_mv;
+#endif  // CONFIG_EXT_INTER
+#if CONFIG_EXT_INTERP
+#if CONFIG_DUAL_FILTER
+        if (!has_subpel_mv_component(xd->mi[0], xd, 0))
+          obmc_interp_filter[1][0] = mbmi->interp_filter[0] = EIGHTTAP_REGULAR;
+        if (!has_subpel_mv_component(xd->mi[0], xd, 1))
+          obmc_interp_filter[1][1] = mbmi->interp_filter[1] = EIGHTTAP_REGULAR;
+#else
+        if (!vp10_is_interp_needed(xd))
+          obmc_interp_filter[1] = mbmi->interp_filter = EIGHTTAP_REGULAR;
+#endif  // CONFIG_DUAL_FILTER
+        // This is not quite correct with CONFIG_DUAL_FILTER when a filter
+        // is needed in only one direction
+        if (!vp10_is_interp_needed(xd)) tmp_rate2 -= rs;
+#endif  // CONFIG_EXT_INTERP
+        vp10_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
+#if CONFIG_EXT_INTER
+      } else {
+        vp10_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
+#endif  // CONFIG_EXT_INTER
+      }
+      vp10_build_obmc_inter_prediction(cm, xd, mi_row, mi_col, dst_buf1,
+                                       dst_stride1, dst_buf2, dst_stride2);
+      model_rd_for_sb(cpi, bsize, x, xd, 0, MAX_MB_PLANE - 1, &tmp_rate,
+                      &tmp_dist, &skip_txfm_sb, &skip_sse_sb);
+    }
+#endif  // CONFIG_OBMC
+
+#if CONFIG_WARPED_MOTION
+    if (mbmi->motion_variation == WARPED_CAUSAL) {
+      // TODO(yuec): Add code
+    }
+#endif  // CONFIG_WARPED_MOTION
+    x->skip = 0;
+
+    *rate2 = tmp_rate2;
+    if (allow_motvar) *rate2 += cpi->motvar_cost[bsize][mbmi->motion_variation];
+    *distortion = 0;
+#endif  // CONFIG_OBMC || CONFIG_WARPED_MOTION
+    if (!skip_txfm_sb) {
+      int skippable_y, skippable_uv;
+      int64_t sseuv = INT64_MAX;
+      int64_t rdcosty = INT64_MAX;
+
+      // Y cost and distortion
+      vp10_subtract_plane(x, bsize, 0);
+#if CONFIG_VAR_TX
+      if (cm->tx_mode == TX_MODE_SELECT || xd->lossless[mbmi->segment_id]) {
+        select_tx_type_yrd(cpi, x, rate_y, &distortion_y, &skippable_y, psse,
+                           bsize, ref_best_rd);
+      } else {
+        int idx, idy;
+        super_block_yrd(cpi, x, rate_y, &distortion_y, &skippable_y, psse,
+                        bsize, ref_best_rd);
+        for (idy = 0; idy < xd->n8_h; ++idy)
+          for (idx = 0; idx < xd->n8_w; ++idx)
+            mbmi->inter_tx_size[idy][idx] = mbmi->tx_size;
+        memset(x->blk_skip[0], skippable_y,
+               sizeof(uint8_t) * xd->n8_h * xd->n8_w * 4);
+      }
+#else
+    super_block_yrd(cpi, x, rate_y, &distortion_y, &skippable_y, psse, bsize,
+                    ref_best_rd);
+#endif  // CONFIG_VAR_TX
+
+      if (*rate_y == INT_MAX) {
+        *rate2 = INT_MAX;
+        *distortion = INT64_MAX;
+#if CONFIG_OBMC || CONFIG_WARPED_MOTION
+        if (mbmi->motion_variation != SIMPLE_TRANSLATION) {
+          continue;
+        } else {
+#endif  // CONFIG_OBMC || CONFIG_WARPED_MOTION
+          restore_dst_buf(xd, orig_dst, orig_dst_stride);
+          return INT64_MAX;
+#if CONFIG_OBMC || CONFIG_WARPED_MOTION
+        }
+#endif  // CONFIG_OBMC || CONFIG_WARPED_MOTION
+      }
+
+      *rate2 += *rate_y;
+      *distortion += distortion_y;
+
+      rdcosty = RDCOST(x->rdmult, x->rddiv, *rate2, *distortion);
+      rdcosty = VPXMIN(rdcosty, RDCOST(x->rdmult, x->rddiv, 0, *psse));
+
+#if CONFIG_VAR_TX
+      if (!inter_block_uvrd(cpi, x, rate_uv, &distortion_uv, &skippable_uv,
+                            &sseuv, bsize, ref_best_rd - rdcosty))
+#else
+    if (!super_block_uvrd(cpi, x, rate_uv, &distortion_uv, &skippable_uv,
+                          &sseuv, bsize, ref_best_rd - rdcosty))
+#endif  // CONFIG_VAR_TX
+      {
+        *rate2 = INT_MAX;
+        *distortion = INT64_MAX;
+#if CONFIG_OBMC || CONFIG_WARPED_MOTION
+        continue;
+#else
+      restore_dst_buf(xd, orig_dst, orig_dst_stride);
+      return INT64_MAX;
+#endif  // CONFIG_OBMC || CONFIG_WARPED_MOTION
+      }
+
+      *psse += sseuv;
+      *rate2 += *rate_uv;
+      *distortion += distortion_uv;
+      *skippable = skippable_y && skippable_uv;
+#if CONFIG_OBMC || CONFIG_WARPED_MOTION
+      if (*skippable) {
+        *rate2 -= *rate_uv + *rate_y;
+        *rate_y = 0;
+        *rate_uv = 0;
+        *rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1);
+        mbmi->skip = 0;
+        // here mbmi->skip temporarily plays a role as what this_skip2 does
+      } else if (!xd->lossless[mbmi->segment_id] &&
+                 (RDCOST(x->rdmult, x->rddiv,
+                         *rate_y + *rate_uv +
+                             vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0),
+                         *distortion) >=
+                  RDCOST(x->rdmult, x->rddiv,
+                         vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1),
+                         *psse))) {
+        *rate2 -= *rate_uv + *rate_y;
+        *rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1);
+        *distortion = *psse;
+        *rate_y = 0;
+        *rate_uv = 0;
+        mbmi->skip = 1;
+      } else {
+        *rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0);
+        mbmi->skip = 0;
+      }
+      *disable_skip = 0;
+#endif  // CONFIG_OBMC || CONFIG_WARPED_MOTION
+    } else {
+      x->skip = 1;
+      *disable_skip = 1;
+
+// The cost of skip bit needs to be added.
+#if CONFIG_OBMC || CONFIG_WARPED_MOTION
+      mbmi->skip = 0;
+#endif  // CONFIG_OBMC || CONFIG_WARPED_MOTION
+      *rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1);
+
+      *distortion = skip_sse_sb;
+      *psse = skip_sse_sb;
+      *rate_y = 0;
+      *rate_uv = 0;
+      *skippable = 1;
+    }
+
+#if CONFIG_OBMC || CONFIG_WARPED_MOTION
+    tmp_rd = RDCOST(x->rdmult, x->rddiv, *rate2, *distortion);
+    if (mbmi->motion_variation == SIMPLE_TRANSLATION || (tmp_rd < best_rd)) {
+#if CONFIG_EXT_INTERP
+#if CONFIG_DUAL_FILTER
+      mbmi->interp_filter[0] = obmc_interp_filter[mbmi->motion_variation][0];
+      mbmi->interp_filter[1] = obmc_interp_filter[mbmi->motion_variation][1];
+#else
+      mbmi->interp_filter = obmc_interp_filter[mbmi->motion_variation];
+#endif  // CONFIG_DUAL_FILTER
+#endif  // CONFIG_EXT_INTERP
+      best_mbmi = *mbmi;
+      best_rd = tmp_rd;
+      best_rate2 = *rate2;
+      best_rate_y = *rate_y;
+      best_rate_uv = *rate_uv;
+#if CONFIG_VAR_TX
+      for (i = 0; i < MAX_MB_PLANE; ++i)
+        memcpy(best_blk_skip[i], x->blk_skip[i],
+               sizeof(uint8_t) * xd->n8_h * xd->n8_w * 4);
+#endif  // CONFIG_VAR_TX
+      best_distortion = *distortion;
+      best_skippable = *skippable;
+      best_xskip = x->skip;
+      best_disable_skip = *disable_skip;
+#if CONFIG_VP9_HIGHBITDEPTH
+      if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+        x->recon_variance = vp10_high_get_sby_perpixel_variance(
+            cpi, &xd->plane[0].dst, bsize, xd->bd);
+      } else {
+        x->recon_variance =
+            vp10_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
+      }
+#else
+      x->recon_variance =
+          vp10_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+    }
+  }
+
+  if (best_rd == INT64_MAX) {
+    *rate2 = INT_MAX;
+    *distortion = INT64_MAX;
+    restore_dst_buf(xd, orig_dst, orig_dst_stride);
+    return INT64_MAX;
+  }
+  *mbmi = best_mbmi;
+  *rate2 = best_rate2;
+  *rate_y = best_rate_y;
+  *rate_uv = best_rate_uv;
+#if CONFIG_VAR_TX
+  for (i = 0; i < MAX_MB_PLANE; ++i)
+    memcpy(x->blk_skip[i], best_blk_skip[i],
+           sizeof(uint8_t) * xd->n8_h * xd->n8_w * 4);
+#endif  // CONFIG_VAR_TX
+  *distortion = best_distortion;
+  *skippable = best_skippable;
+  x->skip = best_xskip;
+  *disable_skip = best_disable_skip;
+#endif  // CONFIG_OBMC || CONFIG_WARPED_MOTION
+
+  if (!is_comp_pred) single_skippable[this_mode][refs[0]] = *skippable;
+
+#if !(CONFIG_OBMC || CONFIG_WARPED_MOTION)
+#if CONFIG_VP9_HIGHBITDEPTH
+  if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+    x->recon_variance = vp10_high_get_sby_perpixel_variance(
+        cpi, &xd->plane[0].dst, bsize, xd->bd);
+  } else {
+    x->recon_variance =
+        vp10_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
+  }
+#else
+  x->recon_variance =
+      vp10_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // !(CONFIG_OBMC || CONFIG_WARPED_MOTION)
+
+  restore_dst_buf(xd, orig_dst, orig_dst_stride);
+  return 0;  // The rate-distortion cost will be re-calculated by caller.
+}
+
+void vp10_rd_pick_intra_mode_sb(VP10_COMP *cpi, MACROBLOCK *x, RD_COST *rd_cost,
+                                BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
+                                int64_t best_rd) {
+  VP10_COMMON *const cm = &cpi->common;
+  MACROBLOCKD *const xd = &x->e_mbd;
+  struct macroblockd_plane *const pd = xd->plane;
+  int rate_y = 0, rate_uv = 0, rate_y_tokenonly = 0, rate_uv_tokenonly = 0;
+  int y_skip = 0, uv_skip = 0;
+  int64_t dist_y = 0, dist_uv = 0;
+  TX_SIZE max_uv_tx_size;
+  ctx->skip = 0;
+  xd->mi[0]->mbmi.ref_frame[0] = INTRA_FRAME;
+  xd->mi[0]->mbmi.ref_frame[1] = NONE;
+
+  if (bsize >= BLOCK_8X8) {
+    if (rd_pick_intra_sby_mode(cpi, x, &rate_y, &rate_y_tokenonly, &dist_y,
+                               &y_skip, bsize, best_rd) >= best_rd) {
+      rd_cost->rate = INT_MAX;
+      return;
+    }
+  } else {
+    y_skip = 0;
+    if (rd_pick_intra_sub_8x8_y_mode(cpi, x, &rate_y, &rate_y_tokenonly,
+                                     &dist_y, best_rd) >= best_rd) {
+      rd_cost->rate = INT_MAX;
+      return;
+    }
+  }
+  max_uv_tx_size = get_uv_tx_size_impl(
+      xd->mi[0]->mbmi.tx_size, bsize, pd[1].subsampling_x, pd[1].subsampling_y);
+  rd_pick_intra_sbuv_mode(cpi, x, &rate_uv, &rate_uv_tokenonly, &dist_uv,
+                          &uv_skip, VPXMAX(BLOCK_8X8, bsize), max_uv_tx_size);
+
+  if (y_skip && uv_skip) {
+    rd_cost->rate = rate_y + rate_uv - rate_y_tokenonly - rate_uv_tokenonly +
+                    vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1);
+    rd_cost->dist = dist_y + dist_uv;
+  } else {
+    rd_cost->rate =
+        rate_y + rate_uv + vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0);
+    rd_cost->dist = dist_y + dist_uv;
+  }
+
+  ctx->mic = *xd->mi[0];
+  ctx->mbmi_ext = *x->mbmi_ext;
+  rd_cost->rdcost = RDCOST(x->rdmult, x->rddiv, rd_cost->rate, rd_cost->dist);
+}
+
+// This function is designed to apply a bias or adjustment to an rd value based
+// on the relative variance of the source and reconstruction.
+#define LOW_VAR_THRESH 16
+#define VLOW_ADJ_MAX 25
+#define VHIGH_ADJ_MAX 8
+static void rd_variance_adjustment(MACROBLOCK *x, int64_t *this_rd,
+                                   MV_REFERENCE_FRAME ref_frame,
+                                   unsigned int source_variance) {
+  unsigned int recon_variance = x->recon_variance;
+  unsigned int absvar_diff = 0;
+  int64_t var_error = 0;
+  int64_t var_factor = 0;
+
+  if (*this_rd == INT64_MAX) return;
+
+  if ((source_variance + recon_variance) > LOW_VAR_THRESH) {
+    absvar_diff = (source_variance > recon_variance)
+                      ? (source_variance - recon_variance)
+                      : (recon_variance - source_variance);
+
+    var_error = ((int64_t)200 * source_variance * recon_variance) /
+                (((int64_t)source_variance * source_variance) +
+                 ((int64_t)recon_variance * recon_variance));
+    var_error = 100 - var_error;
+  }
+
+  // Source variance above a threshold and ref frame is intra.
+  // This case is targeted mainly at discouraging intra modes that give rise
+  // to a predictor with a low spatial complexity compared to the source.
+  if ((source_variance > LOW_VAR_THRESH) && (ref_frame == INTRA_FRAME) &&
+      (source_variance > recon_variance)) {
+    var_factor = VPXMIN(absvar_diff, VPXMIN(VLOW_ADJ_MAX, var_error));
+    // A second possible case of interest is where the source variance
+    // is very low and we wish to discourage false texture or motion trails.
+  } else if ((source_variance < (LOW_VAR_THRESH >> 1)) &&
+             (recon_variance > source_variance)) {
+    var_factor = VPXMIN(absvar_diff, VPXMIN(VHIGH_ADJ_MAX, var_error));
+  }
+  *this_rd += (*this_rd * var_factor) / 100;
+}
+
+// Do we have an internal image edge (e.g. formatting bars).
+int vp10_internal_image_edge(VP10_COMP *cpi) {
+  return (cpi->oxcf.pass == 2) &&
+         ((cpi->twopass.this_frame_stats.inactive_zone_rows > 0) ||
+          (cpi->twopass.this_frame_stats.inactive_zone_cols > 0));
+}
+
+// Checks to see if a super block is on a horizontal image edge.
+// In most cases this is the "real" edge unless there are formatting
+// bars embedded in the stream.
+int vp10_active_h_edge(VP10_COMP *cpi, int mi_row, int mi_step) {
+  int top_edge = 0;
+  int bottom_edge = cpi->common.mi_rows;
+  int is_active_h_edge = 0;
+
+  // For two pass account for any formatting bars detected.
+  if (cpi->oxcf.pass == 2) {
+    TWO_PASS *twopass = &cpi->twopass;
+
+    // The inactive region is specified in MBs not mi units.
+    // The image edge is in the following MB row.
+    top_edge += (int)(twopass->this_frame_stats.inactive_zone_rows * 2);
+
+    bottom_edge -= (int)(twopass->this_frame_stats.inactive_zone_rows * 2);
+    bottom_edge = VPXMAX(top_edge, bottom_edge);
+  }
+
+  if (((top_edge >= mi_row) && (top_edge < (mi_row + mi_step))) ||
+      ((bottom_edge >= mi_row) && (bottom_edge < (mi_row + mi_step)))) {
+    is_active_h_edge = 1;
+  }
+  return is_active_h_edge;
+}
+
+// Checks to see if a super block is on a vertical image edge.
+// In most cases this is the "real" edge unless there are formatting
+// bars embedded in the stream.
+int vp10_active_v_edge(VP10_COMP *cpi, int mi_col, int mi_step) {
+  int left_edge = 0;
+  int right_edge = cpi->common.mi_cols;
+  int is_active_v_edge = 0;
+
+  // For two pass account for any formatting bars detected.
+  if (cpi->oxcf.pass == 2) {
+    TWO_PASS *twopass = &cpi->twopass;
+
+    // The inactive region is specified in MBs not mi units.
+    // The image edge is in the following MB row.
+    left_edge += (int)(twopass->this_frame_stats.inactive_zone_cols * 2);
+
+    right_edge -= (int)(twopass->this_frame_stats.inactive_zone_cols * 2);
+    right_edge = VPXMAX(left_edge, right_edge);
+  }
+
+  if (((left_edge >= mi_col) && (left_edge < (mi_col + mi_step))) ||
+      ((right_edge >= mi_col) && (right_edge < (mi_col + mi_step)))) {
+    is_active_v_edge = 1;
+  }
+  return is_active_v_edge;
+}
+
+// Checks to see if a super block is at the edge of the active image.
+// In most cases this is the "real" edge unless there are formatting
+// bars embedded in the stream.
+int vp10_active_edge_sb(VP10_COMP *cpi, int mi_row, int mi_col) {
+  return vp10_active_h_edge(cpi, mi_row, cpi->common.mib_size) ||
+         vp10_active_v_edge(cpi, mi_col, cpi->common.mib_size);
+}
+
+static void restore_uv_color_map(VP10_COMP *cpi, MACROBLOCK *x) {
+  MACROBLOCKD *const xd = &x->e_mbd;
+  MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
+  PALETTE_MODE_INFO *const pmi = &mbmi->palette_mode_info;
+  const BLOCK_SIZE bsize = mbmi->sb_type;
+  const int rows =
+      (4 * num_4x4_blocks_high_lookup[bsize]) >> (xd->plane[1].subsampling_y);
+  const int cols =
+      (4 * num_4x4_blocks_wide_lookup[bsize]) >> (xd->plane[1].subsampling_x);
+  int src_stride = x->plane[1].src.stride;
+  const uint8_t *const src_u = x->plane[1].src.buf;
+  const uint8_t *const src_v = x->plane[2].src.buf;
+  float *const data = x->palette_buffer->kmeans_data_buf;
+  float centroids[2 * PALETTE_MAX_SIZE];
+  uint8_t *const color_map = xd->plane[1].color_index_map;
+  int r, c;
+#if CONFIG_VP9_HIGHBITDEPTH
+  const uint16_t *const src_u16 = CONVERT_TO_SHORTPTR(src_u);
+  const uint16_t *const src_v16 = CONVERT_TO_SHORTPTR(src_v);
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+  (void)cpi;
+
+  for (r = 0; r < rows; ++r) {
+    for (c = 0; c < cols; ++c) {
+#if CONFIG_VP9_HIGHBITDEPTH
+      if (cpi->common.use_highbitdepth) {
+        data[(r * cols + c) * 2] = src_u16[r * src_stride + c];
+        data[(r * cols + c) * 2 + 1] = src_v16[r * src_stride + c];
+      } else {
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+        data[(r * cols + c) * 2] = src_u[r * src_stride + c];
+        data[(r * cols + c) * 2 + 1] = src_v[r * src_stride + c];
+#if CONFIG_VP9_HIGHBITDEPTH
+      }
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+    }
+  }
+
+  for (r = 1; r < 3; ++r) {
+    for (c = 0; c < pmi->palette_size[1]; ++c) {
+      centroids[c * 2 + r - 1] = pmi->palette_colors[r * PALETTE_MAX_SIZE + c];
+    }
+  }
+
+  vp10_calc_indices(data, centroids, color_map, rows * cols,
+                    pmi->palette_size[1], 2);
+}
+
+#if CONFIG_EXT_INTRA
+static void pick_ext_intra_interframe(
+    VP10_COMP *cpi, MACROBLOCK *x, PICK_MODE_CONTEXT *ctx, BLOCK_SIZE bsize,
+    int *rate_uv_intra, int *rate_uv_tokenonly, int64_t *dist_uv, int *skip_uv,
+    PREDICTION_MODE *mode_uv, EXT_INTRA_MODE_INFO *ext_intra_mode_info_uv,
+    PALETTE_MODE_INFO *pmi_uv, int8_t *uv_angle_delta, int palette_ctx,
+    int skip_mask, unsigned int *ref_costs_single, int64_t *best_rd,
+    int64_t *best_intra_rd, PREDICTION_MODE *best_intra_mode,
+    int *best_mode_index, int *best_skip2, int *best_mode_skippable,
+#if CONFIG_SUPERTX
+    int *returnrate_nocoef,
+#endif  // CONFIG_SUPERTX
+    int64_t *best_pred_rd, MB_MODE_INFO *best_mbmode, RD_COST *rd_cost) {
+  VP10_COMMON *const cm = &cpi->common;
+  MACROBLOCKD *const xd = &x->e_mbd;
+  MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
+  PALETTE_MODE_INFO *const pmi = &mbmi->palette_mode_info;
+  const TX_SIZE max_tx_size = max_txsize_lookup[bsize];
+  int rate2 = 0, rate_y = INT_MAX, skippable = 0, rate_uv, rate_dummy, i;
+  int dc_mode_index;
+  const int *const intra_mode_cost = cpi->mbmode_cost[size_group_lookup[bsize]];
+  int64_t distortion2 = 0, distortion_y = 0, this_rd = *best_rd, distortion_uv;
+  TX_SIZE uv_tx;
+
+  for (i = 0; i < MAX_MODES; ++i)
+    if (vp10_mode_order[i].mode == DC_PRED &&
+        vp10_mode_order[i].ref_frame[0] == INTRA_FRAME)
+      break;
+  dc_mode_index = i;
+  assert(i < MAX_MODES);
+
+  // TODO(huisu): use skip_mask for further speedup.
+  (void)skip_mask;
+  mbmi->mode = DC_PRED;
+  mbmi->uv_mode = DC_PRED;
+  mbmi->ref_frame[0] = INTRA_FRAME;
+  mbmi->ref_frame[1] = NONE;
+  if (!rd_pick_ext_intra_sby(cpi, x, &rate_dummy, &rate_y, &distortion_y,
+                             &skippable, bsize, intra_mode_cost[mbmi->mode],
+                             &this_rd, 0))
+    return;
+  if (rate_y == INT_MAX) return;
+
+  uv_tx = get_uv_tx_size_impl(mbmi->tx_size, bsize, xd->plane[1].subsampling_x,
+                              xd->plane[1].subsampling_y);
+  if (rate_uv_intra[uv_tx] == INT_MAX) {
+    choose_intra_uv_mode(cpi, x, ctx, bsize, uv_tx, &rate_uv_intra[uv_tx],
+                         &rate_uv_tokenonly[uv_tx], &dist_uv[uv_tx],
+                         &skip_uv[uv_tx], &mode_uv[uv_tx]);
+    if (cm->allow_screen_content_tools) pmi_uv[uv_tx] = *pmi;
+    ext_intra_mode_info_uv[uv_tx] = mbmi->ext_intra_mode_info;
+    uv_angle_delta[uv_tx] = mbmi->angle_delta[1];
+  }
+
+  rate_uv = rate_uv_tokenonly[uv_tx];
+  distortion_uv = dist_uv[uv_tx];
+  skippable = skippable && skip_uv[uv_tx];
+  mbmi->uv_mode = mode_uv[uv_tx];
+  if (cm->allow_screen_content_tools) {
+    pmi->palette_size[1] = pmi_uv[uv_tx].palette_size[1];
+    memcpy(pmi->palette_colors + PALETTE_MAX_SIZE,
+           pmi_uv[uv_tx].palette_colors + PALETTE_MAX_SIZE,
+           2 * PALETTE_MAX_SIZE * sizeof(pmi->palette_colors[0]));
+  }
+  mbmi->angle_delta[1] = uv_angle_delta[uv_tx];
+  mbmi->ext_intra_mode_info.use_ext_intra_mode[1] =
+      ext_intra_mode_info_uv[uv_tx].use_ext_intra_mode[1];
+  if (ext_intra_mode_info_uv[uv_tx].use_ext_intra_mode[1]) {
+    mbmi->ext_intra_mode_info.ext_intra_mode[1] =
+        ext_intra_mode_info_uv[uv_tx].ext_intra_mode[1];
+  }
+
+  rate2 = rate_y + intra_mode_cost[mbmi->mode] + rate_uv +
+          cpi->intra_uv_mode_cost[mbmi->mode][mbmi->uv_mode];
+  if (cpi->common.allow_screen_content_tools && mbmi->mode == DC_PRED)
+    rate2 += vp10_cost_bit(
+        vp10_default_palette_y_mode_prob[bsize - BLOCK_8X8][palette_ctx], 0);
+
+  if (!xd->lossless[mbmi->segment_id]) {
+    // super_block_yrd above includes the cost of the tx_size in the
+    // tokenonly rate, but for intra blocks, tx_size is always coded
+    // (prediction granularity), so we account for it in the full rate,
+    // not the tokenonly rate.
+    rate_y -= cpi->tx_size_cost[max_tx_size -
+                                TX_8X8][get_tx_size_context(xd)][mbmi->tx_size];
+  }
+
+  rate2 += vp10_cost_bit(cm->fc->ext_intra_probs[0],
+                         mbmi->ext_intra_mode_info.use_ext_intra_mode[0]);
+  rate2 += write_uniform_cost(FILTER_INTRA_MODES,
+                              mbmi->ext_intra_mode_info.ext_intra_mode[0]);
+  if (mbmi->uv_mode != DC_PRED && mbmi->uv_mode != TM_PRED) {
+    rate2 += write_uniform_cost(2 * MAX_ANGLE_DELTAS + 1,
+                                MAX_ANGLE_DELTAS + mbmi->angle_delta[1]);
+  }
+  if (mbmi->mode == DC_PRED) {
+    rate2 += vp10_cost_bit(cpi->common.fc->ext_intra_probs[1],
+                           mbmi->ext_intra_mode_info.use_ext_intra_mode[1]);
+    if (mbmi->ext_intra_mode_info.use_ext_intra_mode[1])
+      rate2 += write_uniform_cost(FILTER_INTRA_MODES,
+                                  mbmi->ext_intra_mode_info.ext_intra_mode[1]);
+  }
+  distortion2 = distortion_y + distortion_uv;
+  vp10_encode_intra_block_plane(x, bsize, 0, 0);
+#if CONFIG_VP9_HIGHBITDEPTH
+  if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+    x->recon_variance = vp10_high_get_sby_perpixel_variance(
+        cpi, &xd->plane[0].dst, bsize, xd->bd);
+  } else {
+    x->recon_variance =
+        vp10_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
+  }
+#else
+  x->recon_variance =
+      vp10_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+  rate2 += ref_costs_single[INTRA_FRAME];
+
+  if (skippable) {
+    rate2 -= (rate_y + rate_uv);
+    rate_y = 0;
+    rate_uv = 0;
+    rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1);
+  } else {
+    rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0);
+  }
+  this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
+  rd_variance_adjustment(x, &this_rd, INTRA_FRAME, x->source_variance);
+
+  if (this_rd < *best_intra_rd) {
+    *best_intra_rd = this_rd;
+    *best_intra_mode = mbmi->mode;
+  }
+  for (i = 0; i < REFERENCE_MODES; ++i)
+    best_pred_rd[i] = VPXMIN(best_pred_rd[i], this_rd);
+
+  if (this_rd < *best_rd) {
+    *best_mode_index = dc_mode_index;
+    mbmi->mv[0].as_int = 0;
+    rd_cost->rate = rate2;
+#if CONFIG_SUPERTX
+    if (x->skip)
+      *returnrate_nocoef = rate2;
+    else
+      *returnrate_nocoef = rate2 - rate_y - rate_uv;
+    *returnrate_nocoef -= vp10_cost_bit(vp10_get_skip_prob(cm, xd), skippable);
+    *returnrate_nocoef -= vp10_cost_bit(vp10_get_intra_inter_prob(cm, xd),
+                                        mbmi->ref_frame[0] != INTRA_FRAME);
+#endif  // CONFIG_SUPERTX
+    rd_cost->dist = distortion2;
+    rd_cost->rdcost = this_rd;
+    *best_rd = this_rd;
+    *best_mbmode = *mbmi;
+    *best_skip2 = 0;
+    *best_mode_skippable = skippable;
+  }
+}
+#endif  // CONFIG_EXT_INTRA
+
+#if CONFIG_OBMC
+static void calc_target_weighted_pred(
+    const VP10_COMMON *cm, const MACROBLOCK *x, const MACROBLOCKD *xd,
+    int mi_row, int mi_col, const uint8_t *above, int above_stride,
+    const uint8_t *left, int left_stride, int32_t *mask_buf, int32_t *wsrc_buf);
+#endif  // CONFIG_OBMC
+
+void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi, TileDataEnc *tile_data,
+                                MACROBLOCK *x, int mi_row, int mi_col,
+                                RD_COST *rd_cost,
+#if CONFIG_SUPERTX
+                                int *returnrate_nocoef,
+#endif  // CONFIG_SUPERTX
+                                BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
+                                int64_t best_rd_so_far) {
+  VP10_COMMON *const cm = &cpi->common;
+  RD_OPT *const rd_opt = &cpi->rd;
+  SPEED_FEATURES *const sf = &cpi->sf;
+  MACROBLOCKD *const xd = &x->e_mbd;
+  MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
+  PALETTE_MODE_INFO *const pmi = &mbmi->palette_mode_info;
+  MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
+  const struct segmentation *const seg = &cm->seg;
+  PREDICTION_MODE this_mode;
+  MV_REFERENCE_FRAME ref_frame, second_ref_frame;
+  unsigned char segment_id = mbmi->segment_id;
+  int comp_pred, i, k;
+  int_mv frame_mv[MB_MODE_COUNT][TOTAL_REFS_PER_FRAME];
+  struct buf_2d yv12_mb[TOTAL_REFS_PER_FRAME][MAX_MB_PLANE];
+#if CONFIG_EXT_INTER
+  int_mv single_newmvs[2][TOTAL_REFS_PER_FRAME] = { { { 0 } }, { { 0 } } };
+  int single_newmvs_rate[2][TOTAL_REFS_PER_FRAME] = { { 0 }, { 0 } };
+  int64_t modelled_rd[MB_MODE_COUNT][TOTAL_REFS_PER_FRAME];
+#else
+  int_mv single_newmv[TOTAL_REFS_PER_FRAME] = { { 0 } };
+#endif  // CONFIG_EXT_INTER
+  INTERP_FILTER single_inter_filter[MB_MODE_COUNT][TOTAL_REFS_PER_FRAME];
+  int single_skippable[MB_MODE_COUNT][TOTAL_REFS_PER_FRAME];
+  static const int flag_list[TOTAL_REFS_PER_FRAME] = {
+    0,
+    VPX_LAST_FLAG,
+#if CONFIG_EXT_REFS
+    VPX_LAST2_FLAG,
+    VPX_LAST3_FLAG,
+#endif  // CONFIG_EXT_REFS
+    VPX_GOLD_FLAG,
+#if CONFIG_EXT_REFS
+    VPX_BWD_FLAG,
+#endif  // CONFIG_EXT_REFS
+    VPX_ALT_FLAG
+  };
+  int64_t best_rd = best_rd_so_far;
+  int best_rate_y = INT_MAX, best_rate_uv = INT_MAX;
+  int64_t best_pred_diff[REFERENCE_MODES];
+  int64_t best_pred_rd[REFERENCE_MODES];
+  MB_MODE_INFO best_mbmode;
+  int best_mode_skippable = 0;
+  int midx, best_mode_index = -1;
+  unsigned int ref_costs_single[TOTAL_REFS_PER_FRAME];
+  unsigned int ref_costs_comp[TOTAL_REFS_PER_FRAME];
+  vpx_prob comp_mode_p;
+  int64_t best_intra_rd = INT64_MAX;
+  unsigned int best_pred_sse = UINT_MAX;
+  PREDICTION_MODE best_intra_mode = DC_PRED;
+  int rate_uv_intra[TX_SIZES], rate_uv_tokenonly[TX_SIZES];
+  int64_t dist_uv[TX_SIZES];
+  int skip_uv[TX_SIZES];
+  PREDICTION_MODE mode_uv[TX_SIZES];
+  PALETTE_MODE_INFO pmi_uv[TX_SIZES];
+#if CONFIG_EXT_INTRA
+  EXT_INTRA_MODE_INFO ext_intra_mode_info_uv[TX_SIZES];
+  int8_t uv_angle_delta[TX_SIZES], dc_skipped = 1;
+  int is_directional_mode, angle_stats_ready = 0;
+  int rate_overhead, rate_dummy;
+  uint8_t directional_mode_skip_mask[INTRA_MODES];
+#endif  // CONFIG_EXT_INTRA
+  const int intra_cost_penalty = vp10_get_intra_cost_penalty(
+      cm->base_qindex, cm->y_dc_delta_q, cm->bit_depth);
+  const int *const intra_mode_cost = cpi->mbmode_cost[size_group_lookup[bsize]];
+  int best_skip2 = 0;
+  uint8_t ref_frame_skip_mask[2] = { 0 };
+#if CONFIG_EXT_INTER
+  uint32_t mode_skip_mask[TOTAL_REFS_PER_FRAME] = { 0 };
+  MV_REFERENCE_FRAME best_single_inter_ref = LAST_FRAME;
+  int64_t best_single_inter_rd = INT64_MAX;
+#else
+  uint16_t mode_skip_mask[TOTAL_REFS_PER_FRAME] = { 0 };
+#endif  // CONFIG_EXT_INTER
+  int mode_skip_start = sf->mode_skip_start + 1;
+  const int *const rd_threshes = rd_opt->threshes[segment_id][bsize];
+  const int *const rd_thresh_freq_fact = tile_data->thresh_freq_fact[bsize];
+  int64_t mode_threshold[MAX_MODES];
+  int *mode_map = tile_data->mode_map[bsize];
+  const int mode_search_skip_flags = sf->mode_search_skip_flags;
+  const TX_SIZE max_tx_size = max_txsize_lookup[bsize];
+  int palette_ctx = 0;
+  const int rows = 4 * num_4x4_blocks_high_lookup[bsize];
+  const int cols = 4 * num_4x4_blocks_wide_lookup[bsize];
+  const MODE_INFO *above_mi = xd->above_mi;
+  const MODE_INFO *left_mi = xd->left_mi;
+#if CONFIG_OBMC
+#if CONFIG_VP9_HIGHBITDEPTH
+  DECLARE_ALIGNED(16, uint8_t, tmp_buf1[2 * MAX_MB_PLANE * MAX_SB_SQUARE]);
+  DECLARE_ALIGNED(16, uint8_t, tmp_buf2[2 * MAX_MB_PLANE * MAX_SB_SQUARE]);
+#else
+  DECLARE_ALIGNED(16, uint8_t, tmp_buf1[MAX_MB_PLANE * MAX_SB_SQUARE]);
+  DECLARE_ALIGNED(16, uint8_t, tmp_buf2[MAX_MB_PLANE * MAX_SB_SQUARE]);
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+  DECLARE_ALIGNED(16, int32_t, weighted_src_buf[MAX_SB_SQUARE]);
+  DECLARE_ALIGNED(16, int32_t, mask2d_buf[MAX_SB_SQUARE]);
+  uint8_t *dst_buf1[MAX_MB_PLANE], *dst_buf2[MAX_MB_PLANE];
+  int dst_width1[MAX_MB_PLANE] = { MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE };
+  int dst_width2[MAX_MB_PLANE] = { MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE };
+  int dst_height1[MAX_MB_PLANE] = { MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE };
+  int dst_height2[MAX_MB_PLANE] = { MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE };
+  int dst_stride1[MAX_MB_PLANE] = { MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE };
+  int dst_stride2[MAX_MB_PLANE] = { MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE };
+
+#if CONFIG_VP9_HIGHBITDEPTH
+  if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+    int len = sizeof(uint16_t);
+    dst_buf1[0] = CONVERT_TO_BYTEPTR(tmp_buf1);
+    dst_buf1[1] = CONVERT_TO_BYTEPTR(tmp_buf1 + MAX_SB_SQUARE * len);
+    dst_buf1[2] = CONVERT_TO_BYTEPTR(tmp_buf1 + 2 * MAX_SB_SQUARE * len);
+    dst_buf2[0] = CONVERT_TO_BYTEPTR(tmp_buf2);
+    dst_buf2[1] = CONVERT_TO_BYTEPTR(tmp_buf2 + MAX_SB_SQUARE * len);
+    dst_buf2[2] = CONVERT_TO_BYTEPTR(tmp_buf2 + 2 * MAX_SB_SQUARE * len);
+  } else {
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+    dst_buf1[0] = tmp_buf1;
+    dst_buf1[1] = tmp_buf1 + MAX_SB_SQUARE;
+    dst_buf1[2] = tmp_buf1 + 2 * MAX_SB_SQUARE;
+    dst_buf2[0] = tmp_buf2;
+    dst_buf2[1] = tmp_buf2 + MAX_SB_SQUARE;
+    dst_buf2[2] = tmp_buf2 + 2 * MAX_SB_SQUARE;
+#if CONFIG_VP9_HIGHBITDEPTH
+  }
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_OBMC
+
+  vp10_zero(best_mbmode);
+  vp10_zero(pmi_uv);
+
+  if (cm->allow_screen_content_tools) {
+    if (above_mi)
+      palette_ctx += (above_mi->mbmi.palette_mode_info.palette_size[0] > 0);
+    if (left_mi)
+      palette_ctx += (left_mi->mbmi.palette_mode_info.palette_size[0] > 0);
+  }
+
+#if CONFIG_EXT_INTRA
+  memset(directional_mode_skip_mask, 0,
+         sizeof(directional_mode_skip_mask[0]) * INTRA_MODES);
+#endif  // CONFIG_EXT_INTRA
+
+  estimate_ref_frame_costs(cm, xd, segment_id, ref_costs_single, ref_costs_comp,
+                           &comp_mode_p);
+
+  for (i = 0; i < REFERENCE_MODES; ++i) best_pred_rd[i] = INT64_MAX;
+  for (i = 0; i < TX_SIZES; i++) rate_uv_intra[i] = INT_MAX;
+  for (i = 0; i < TOTAL_REFS_PER_FRAME; ++i) x->pred_sse[i] = INT_MAX;
+  for (i = 0; i < MB_MODE_COUNT; ++i) {
+    for (k = 0; k < TOTAL_REFS_PER_FRAME; ++k) {
+      single_inter_filter[i][k] = SWITCHABLE;
+      single_skippable[i][k] = 0;
+    }
+  }
+
+  rd_cost->rate = INT_MAX;
+#if CONFIG_SUPERTX
+  *returnrate_nocoef = INT_MAX;
+#endif  // CONFIG_SUPERTX
+
+  for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
+    x->pred_mv_sad[ref_frame] = INT_MAX;
+    x->mbmi_ext->mode_context[ref_frame] = 0;
+#if CONFIG_REF_MV && CONFIG_EXT_INTER
+    x->mbmi_ext->compound_mode_context[ref_frame] = 0;
+#endif  // CONFIG_REF_MV && CONFIG_EXT_INTER
+    if (cpi->ref_frame_flags & flag_list[ref_frame]) {
+      assert(get_ref_frame_buffer(cpi, ref_frame) != NULL);
+      setup_buffer_inter(cpi, x, ref_frame, bsize, mi_row, mi_col,
+                         frame_mv[NEARESTMV], frame_mv[NEARMV], yv12_mb);
+    }
+    frame_mv[NEWMV][ref_frame].as_int = INVALID_MV;
+    frame_mv[ZEROMV][ref_frame].as_int = 0;
+#if CONFIG_EXT_INTER
+    frame_mv[NEWFROMNEARMV][ref_frame].as_int = INVALID_MV;
+    frame_mv[NEW_NEWMV][ref_frame].as_int = INVALID_MV;
+    frame_mv[ZERO_ZEROMV][ref_frame].as_int = 0;
+#endif  // CONFIG_EXT_INTER
+  }
+
+#if CONFIG_REF_MV
+  for (; ref_frame < MODE_CTX_REF_FRAMES; ++ref_frame) {
+    MODE_INFO *const mi = xd->mi[0];
+    int_mv *const candidates = x->mbmi_ext->ref_mvs[ref_frame];
+    x->mbmi_ext->mode_context[ref_frame] = 0;
+    vp10_find_mv_refs(cm, xd, mi, ref_frame, &mbmi_ext->ref_mv_count[ref_frame],
+                      mbmi_ext->ref_mv_stack[ref_frame],
+#if CONFIG_EXT_INTER
+                      mbmi_ext->compound_mode_context,
+#endif  // CONFIG_EXT_INTER
+                      candidates, mi_row, mi_col, NULL, NULL,
+                      mbmi_ext->mode_context);
+  }
+#endif  // CONFIG_REF_MV
+
+#if CONFIG_OBMC
+  vp10_build_prediction_by_above_preds(cm, xd, mi_row, mi_col, dst_buf1,
+                                       dst_width1, dst_height1, dst_stride1);
+  vp10_build_prediction_by_left_preds(cm, xd, mi_row, mi_col, dst_buf2,
+                                      dst_width2, dst_height2, dst_stride2);
+  vp10_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
+  calc_target_weighted_pred(cm, x, xd, mi_row, mi_col, dst_buf1[0],
+                            dst_stride1[0], dst_buf2[0], dst_stride2[0],
+                            mask2d_buf, weighted_src_buf);
+#endif  // CONFIG_OBMC
+
+  for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
+    if (!(cpi->ref_frame_flags & flag_list[ref_frame])) {
+// Skip checking missing references in both single and compound reference
+// modes. Note that a mode will be skipped iff both reference frames
+// are masked out.
+#if CONFIG_EXT_REFS
+      if (ref_frame == BWDREF_FRAME || ref_frame == ALTREF_FRAME) {
+        ref_frame_skip_mask[0] |= (1 << ref_frame);
+        ref_frame_skip_mask[1] |= ((1 << ref_frame) | 0x01);
+      } else {
+#endif  // CONFIG_EXT_REFS
+        ref_frame_skip_mask[0] |= (1 << ref_frame);
+        ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
+#if CONFIG_EXT_REFS
+      }
+#endif  // CONFIG_EXT_REFS
+    } else {
+      for (i = LAST_FRAME; i <= ALTREF_FRAME; ++i) {
+        // Skip fixed mv modes for poor references
+        if ((x->pred_mv_sad[ref_frame] >> 2) > x->pred_mv_sad[i]) {
+          mode_skip_mask[ref_frame] |= INTER_NEAREST_NEAR_ZERO;
+          break;
+        }
+      }
+    }
+    // If the segment reference frame feature is enabled....
+    // then do nothing if the current ref frame is not allowed..
+    if (segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME) &&
+        get_segdata(seg, segment_id, SEG_LVL_REF_FRAME) != (int)ref_frame) {
+      ref_frame_skip_mask[0] |= (1 << ref_frame);
+      ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
+    }
+  }
+
+  // Disable this drop out case if the ref frame
+  // segment level feature is enabled for this segment. This is to
+  // prevent the possibility that we end up unable to pick any mode.
+  if (!segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) {
+    // Only consider ZEROMV/ALTREF_FRAME for alt ref frame,
+    // unless ARNR filtering is enabled in which case we want
+    // an unfiltered alternative. We allow near/nearest as well
+    // because they may result in zero-zero MVs but be cheaper.
+    if (cpi->rc.is_src_frame_alt_ref && (cpi->oxcf.arnr_max_frames == 0)) {
+      ref_frame_skip_mask[0] = (1 << LAST_FRAME) |
+#if CONFIG_EXT_REFS
+                               (1 << LAST2_FRAME) | (1 << LAST3_FRAME) |
+                               (1 << BWDREF_FRAME) |
+#endif  // CONFIG_EXT_REFS
+                               (1 << GOLDEN_FRAME);
+      ref_frame_skip_mask[1] = SECOND_REF_FRAME_MASK;
+      // TODO(zoeliu): To further explore whether following needs to be done for
+      //               BWDREF_FRAME as well.
+      mode_skip_mask[ALTREF_FRAME] = ~INTER_NEAREST_NEAR_ZERO;
+      if (frame_mv[NEARMV][ALTREF_FRAME].as_int != 0)
+        mode_skip_mask[ALTREF_FRAME] |= (1 << NEARMV);
+      if (frame_mv[NEARESTMV][ALTREF_FRAME].as_int != 0)
+        mode_skip_mask[ALTREF_FRAME] |= (1 << NEARESTMV);
+#if CONFIG_EXT_INTER
+      if (frame_mv[NEAREST_NEARESTMV][ALTREF_FRAME].as_int != 0)
+        mode_skip_mask[ALTREF_FRAME] |= (1 << NEAREST_NEARESTMV);
+      if (frame_mv[NEAREST_NEARMV][ALTREF_FRAME].as_int != 0)
+        mode_skip_mask[ALTREF_FRAME] |= (1 << NEAREST_NEARMV);
+      if (frame_mv[NEAR_NEARESTMV][ALTREF_FRAME].as_int != 0)
+        mode_skip_mask[ALTREF_FRAME] |= (1 << NEAR_NEARESTMV);
+      if (frame_mv[NEAR_NEARMV][ALTREF_FRAME].as_int != 0)
+        mode_skip_mask[ALTREF_FRAME] |= (1 << NEAR_NEARMV);
+#endif  // CONFIG_EXT_INTER
+    }
+  }
+
+  if (cpi->rc.is_src_frame_alt_ref) {
+    if (sf->alt_ref_search_fp) {
+      assert(cpi->ref_frame_flags & flag_list[ALTREF_FRAME]);
+      mode_skip_mask[ALTREF_FRAME] = 0;
+      ref_frame_skip_mask[0] = ~(1 << ALTREF_FRAME);
+      ref_frame_skip_mask[1] = SECOND_REF_FRAME_MASK;
+    }
+  }
+
+  if (sf->alt_ref_search_fp)
+    if (!cm->show_frame && x->pred_mv_sad[GOLDEN_FRAME] < INT_MAX)
+      if (x->pred_mv_sad[ALTREF_FRAME] > (x->pred_mv_sad[GOLDEN_FRAME] << 1))
+        mode_skip_mask[ALTREF_FRAME] |= INTER_ALL;
+
+  if (sf->adaptive_mode_search) {
+    if (cm->show_frame && !cpi->rc.is_src_frame_alt_ref &&
+        cpi->rc.frames_since_golden >= 3)
+      if (x->pred_mv_sad[GOLDEN_FRAME] > (x->pred_mv_sad[LAST_FRAME] << 1))
+        mode_skip_mask[GOLDEN_FRAME] |= INTER_ALL;
+  }
+
+  if (bsize > sf->max_intra_bsize) {
+    ref_frame_skip_mask[0] |= (1 << INTRA_FRAME);
+    ref_frame_skip_mask[1] |= (1 << INTRA_FRAME);
+  }
+
+  mode_skip_mask[INTRA_FRAME] |=
+      ~(sf->intra_y_mode_mask[max_txsize_lookup[bsize]]);
+
+  for (i = 0; i <= LAST_NEW_MV_INDEX; ++i) mode_threshold[i] = 0;
+  for (i = LAST_NEW_MV_INDEX + 1; i < MAX_MODES; ++i)
+    mode_threshold[i] = ((int64_t)rd_threshes[i] * rd_thresh_freq_fact[i]) >> 5;
+
+  midx = sf->schedule_mode_search ? mode_skip_start : 0;
+  while (midx > 4) {
+    uint8_t end_pos = 0;
+    for (i = 5; i < midx; ++i) {
+      if (mode_threshold[mode_map[i - 1]] > mode_threshold[mode_map[i]]) {
+        uint8_t tmp = mode_map[i];
+        mode_map[i] = mode_map[i - 1];
+        mode_map[i - 1] = tmp;
+        end_pos = i;
+      }
+    }
+    midx = end_pos;
+  }
+
+  if (cpi->sf.tx_type_search.fast_intra_tx_type_search)
+    x->use_default_intra_tx_type = 1;
+  else
+    x->use_default_intra_tx_type = 0;
+
+  if (cpi->sf.tx_type_search.fast_inter_tx_type_search)
+    x->use_default_inter_tx_type = 1;
+  else
+    x->use_default_inter_tx_type = 0;
+
+#if CONFIG_EXT_INTER
+  for (i = 0; i < MB_MODE_COUNT; ++i)
+    for (ref_frame = 0; ref_frame < TOTAL_REFS_PER_FRAME; ++ref_frame)
+      modelled_rd[i][ref_frame] = INT64_MAX;
+#endif  // CONFIG_EXT_INTER
+
+  for (midx = 0; midx < MAX_MODES; ++midx) {
+    int mode_index;
+    int mode_excluded = 0;
+    int64_t this_rd = INT64_MAX;
+    int disable_skip = 0;
+    int compmode_cost = 0;
+#if CONFIG_EXT_INTER
+    int compmode_interintra_cost = 0;
+    int compmode_wedge_cost = 0;
+#endif  // CONFIG_EXT_INTER
+    int rate2 = 0, rate_y = 0, rate_uv = 0;
+    int64_t distortion2 = 0, distortion_y = 0, distortion_uv = 0;
+    int skippable = 0;
+    int this_skip2 = 0;
+    int64_t total_sse = INT64_MAX;
+    int early_term = 0;
+#if CONFIG_REF_MV
+    uint8_t ref_frame_type;
+#endif
+
+    mode_index = mode_map[midx];
+    this_mode = vp10_mode_order[mode_index].mode;
+    ref_frame = vp10_mode_order[mode_index].ref_frame[0];
+    second_ref_frame = vp10_mode_order[mode_index].ref_frame[1];
+
+#if CONFIG_EXT_INTER
+    if (ref_frame > INTRA_FRAME && second_ref_frame == INTRA_FRAME) {
+      // Mode must by compatible
+      assert(is_interintra_allowed_mode(this_mode));
+
+      if (!is_interintra_allowed_bsize(bsize)) continue;
+    }
+
+    if (is_inter_compound_mode(this_mode)) {
+      frame_mv[this_mode][ref_frame].as_int =
+          frame_mv[compound_ref0_mode(this_mode)][ref_frame].as_int;
+      frame_mv[this_mode][second_ref_frame].as_int =
+          frame_mv[compound_ref1_mode(this_mode)][second_ref_frame].as_int;
+    }
+#endif  // CONFIG_EXT_INTER
+
+    // Look at the reference frame of the best mode so far and set the
+    // skip mask to look at a subset of the remaining modes.
+    if (midx == mode_skip_start && best_mode_index >= 0) {
+      switch (best_mbmode.ref_frame[0]) {
+        case INTRA_FRAME: break;
+        case LAST_FRAME:
+          ref_frame_skip_mask[0] |= LAST_FRAME_MODE_MASK;
+          ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
+          break;
+#if CONFIG_EXT_REFS
+        case LAST2_FRAME:
+          ref_frame_skip_mask[0] |= LAST2_FRAME_MODE_MASK;
+          ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
+          break;
+        case LAST3_FRAME:
+          ref_frame_skip_mask[0] |= LAST3_FRAME_MODE_MASK;
+          ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
+          break;
+#endif  // CONFIG_EXT_REFS
+        case GOLDEN_FRAME:
+          ref_frame_skip_mask[0] |= GOLDEN_FRAME_MODE_MASK;
+          ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
+          break;
+#if CONFIG_EXT_REFS
+        case BWDREF_FRAME:
+          ref_frame_skip_mask[0] |= BWDREF_FRAME_MODE_MASK;
+          ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
+          break;
+#endif  // CONFIG_EXT_REFS
+        case ALTREF_FRAME: ref_frame_skip_mask[0] |= ALTREF_FRAME_MODE_MASK;
+#if CONFIG_EXT_REFS
+          ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
+#endif  // CONFIG_EXT_REFS
+          break;
+        case NONE:
+        case TOTAL_REFS_PER_FRAME:
+          assert(0 && "Invalid Reference frame");
+          break;
+      }
+    }
+
+    if ((ref_frame_skip_mask[0] & (1 << ref_frame)) &&
+        (ref_frame_skip_mask[1] & (1 << VPXMAX(0, second_ref_frame))))
+      continue;
+
+    if (mode_skip_mask[ref_frame] & (1 << this_mode)) continue;
+
+    // Test best rd so far against threshold for trying this mode.
+    if (best_mode_skippable && sf->schedule_mode_search)
+      mode_threshold[mode_index] <<= 1;
+
+    if (best_rd < mode_threshold[mode_index]) continue;
+
+    comp_pred = second_ref_frame > INTRA_FRAME;
+    if (comp_pred) {
+      if (!cpi->allow_comp_inter_inter) continue;
+
+      // Skip compound inter modes if ARF is not available.
+      if (!(cpi->ref_frame_flags & flag_list[second_ref_frame])) continue;
+
+      // Do not allow compound prediction if the segment level reference frame
+      // feature is in use as in this case there can only be one reference.
+      if (segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) continue;
+
+      if ((mode_search_skip_flags & FLAG_SKIP_COMP_BESTINTRA) &&
+          best_mode_index >= 0 && best_mbmode.ref_frame[0] == INTRA_FRAME)
+        continue;
+
+      mode_excluded = cm->reference_mode == SINGLE_REFERENCE;
+    } else {
+      if (ref_frame != INTRA_FRAME)
+        mode_excluded = cm->reference_mode == COMPOUND_REFERENCE;
+    }
+
+    if (ref_frame == INTRA_FRAME) {
+      if (sf->adaptive_mode_search)
+        if ((x->source_variance << num_pels_log2_lookup[bsize]) > best_pred_sse)
+          continue;
+
+      if (this_mode != DC_PRED) {
+        // Disable intra modes other than DC_PRED for blocks with low variance
+        // Threshold for intra skipping based on source variance
+        // TODO(debargha): Specialize the threshold for super block sizes
+        const unsigned int skip_intra_var_thresh = 64;
+        if ((mode_search_skip_flags & FLAG_SKIP_INTRA_LOWVAR) &&
+            x->source_variance < skip_intra_var_thresh)
+          continue;
+        // Only search the oblique modes if the best so far is
+        // one of the neighboring directional modes
+        if ((mode_search_skip_flags & FLAG_SKIP_INTRA_BESTINTER) &&
+            (this_mode >= D45_PRED && this_mode <= TM_PRED)) {
+          if (best_mode_index >= 0 && best_mbmode.ref_frame[0] > INTRA_FRAME)
+            continue;
+        }
+        if (mode_search_skip_flags & FLAG_SKIP_INTRA_DIRMISMATCH) {
+          if (conditional_skipintra(this_mode, best_intra_mode)) continue;
+        }
+      }
+    } else {
+      const MV_REFERENCE_FRAME ref_frames[2] = { ref_frame, second_ref_frame };
+      if (!check_best_zero_mv(cpi, mbmi_ext->mode_context,
+#if CONFIG_REF_MV && CONFIG_EXT_INTER
+                              mbmi_ext->compound_mode_context,
+#endif  // CONFIG_REF_MV && CONFIG_EXT_INTER
+                              frame_mv, this_mode, ref_frames, bsize, -1))
+        continue;
+    }
+
+    mbmi->mode = this_mode;
+    mbmi->uv_mode = DC_PRED;
+    mbmi->ref_frame[0] = ref_frame;
+    mbmi->ref_frame[1] = second_ref_frame;
+    pmi->palette_size[0] = 0;
+    pmi->palette_size[1] = 0;
+#if CONFIG_EXT_INTRA
+    mbmi->ext_intra_mode_info.use_ext_intra_mode[0] = 0;
+    mbmi->ext_intra_mode_info.use_ext_intra_mode[1] = 0;
+#endif  // CONFIG_EXT_INTRA
+        // Evaluate all sub-pel filters irrespective of whether we can use
+        // them for this frame.
+#if CONFIG_DUAL_FILTER
+    for (i = 0; i < 4; ++i) {
+      mbmi->interp_filter[i] = cm->interp_filter == SWITCHABLE
+                                   ? EIGHTTAP_REGULAR
+                                   : cm->interp_filter;
+    }
+#else
+    mbmi->interp_filter =
+        cm->interp_filter == SWITCHABLE ? EIGHTTAP_REGULAR : cm->interp_filter;
+#endif
+    mbmi->mv[0].as_int = mbmi->mv[1].as_int = 0;
+    mbmi->motion_variation = SIMPLE_TRANSLATION;
+
+    x->skip = 0;
+    set_ref_ptrs(cm, xd, ref_frame, second_ref_frame);
+
+    // Select prediction reference frames.
+    for (i = 0; i < MAX_MB_PLANE; i++) {
+      xd->plane[i].pre[0] = yv12_mb[ref_frame][i];
+      if (comp_pred) xd->plane[i].pre[1] = yv12_mb[second_ref_frame][i];
+    }
+
+#if CONFIG_EXT_INTER
+    mbmi->interintra_mode = (PREDICTION_MODE)(DC_PRED - 1);
+#endif  // CONFIG_EXT_INTER
+
+    if (ref_frame == INTRA_FRAME) {
+      TX_SIZE uv_tx;
+      struct macroblockd_plane *const pd = &xd->plane[1];
+#if CONFIG_EXT_INTRA
+      is_directional_mode = (mbmi->mode != DC_PRED && mbmi->mode != TM_PRED);
+      if (is_directional_mode) {
+        if (!angle_stats_ready) {
+          const int src_stride = x->plane[0].src.stride;
+          const uint8_t *src = x->plane[0].src.buf;
+          const int rows = 4 * num_4x4_blocks_high_lookup[bsize];
+          const int cols = 4 * num_4x4_blocks_wide_lookup[bsize];
+#if CONFIG_VP9_HIGHBITDEPTH
+          if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
+            highbd_angle_estimation(src, src_stride, rows, cols,
+                                    directional_mode_skip_mask);
+          else
+#endif
+            angle_estimation(src, src_stride, rows, cols,
+                             directional_mode_skip_mask);
+          angle_stats_ready = 1;
+        }
+        if (directional_mode_skip_mask[mbmi->mode]) continue;
+        rate_overhead = write_uniform_cost(2 * MAX_ANGLE_DELTAS + 1, 0) +
+                        intra_mode_cost[mbmi->mode];
+        rate_y = INT_MAX;
+        this_rd =
+            rd_pick_intra_angle_sby(cpi, x, &rate_dummy, &rate_y, &distortion_y,
+                                    &skippable, bsize, rate_overhead, best_rd);
+      } else {
+        mbmi->angle_delta[0] = 0;
+        super_block_yrd(cpi, x, &rate_y, &distortion_y, &skippable, NULL, bsize,
+                        best_rd);
+      }
+#else
+      super_block_yrd(cpi, x, &rate_y, &distortion_y, &skippable, NULL, bsize,
+                      best_rd);
+#endif  // CONFIG_EXT_INTRA
+
+      if (rate_y == INT_MAX) continue;
+
+#if CONFIG_EXT_INTRA
+      if (mbmi->mode == DC_PRED) dc_skipped = 0;
+#endif  // CONFIG_EXT_INTRA
+
+      uv_tx = get_uv_tx_size_impl(mbmi->tx_size, bsize, pd->subsampling_x,
+                                  pd->subsampling_y);
+      if (rate_uv_intra[uv_tx] == INT_MAX) {
+        choose_intra_uv_mode(cpi, x, ctx, bsize, uv_tx, &rate_uv_intra[uv_tx],
+                             &rate_uv_tokenonly[uv_tx], &dist_uv[uv_tx],
+                             &skip_uv[uv_tx], &mode_uv[uv_tx]);
+        if (cm->allow_screen_content_tools) pmi_uv[uv_tx] = *pmi;
+#if CONFIG_EXT_INTRA
+        ext_intra_mode_info_uv[uv_tx] = mbmi->ext_intra_mode_info;
+        uv_angle_delta[uv_tx] = mbmi->angle_delta[1];
+#endif  // CONFIG_EXT_INTRA
+      }
+
+      rate_uv = rate_uv_tokenonly[uv_tx];
+      distortion_uv = dist_uv[uv_tx];
+      skippable = skippable && skip_uv[uv_tx];
+      mbmi->uv_mode = mode_uv[uv_tx];
+      if (cm->allow_screen_content_tools) {
+        pmi->palette_size[1] = pmi_uv[uv_tx].palette_size[1];
+        memcpy(pmi->palette_colors + PALETTE_MAX_SIZE,
+               pmi_uv[uv_tx].palette_colors + PALETTE_MAX_SIZE,
+               2 * PALETTE_MAX_SIZE * sizeof(pmi->palette_colors[0]));
+      }
+#if CONFIG_EXT_INTRA
+      mbmi->angle_delta[1] = uv_angle_delta[uv_tx];
+      mbmi->ext_intra_mode_info.use_ext_intra_mode[1] =
+          ext_intra_mode_info_uv[uv_tx].use_ext_intra_mode[1];
+      if (ext_intra_mode_info_uv[uv_tx].use_ext_intra_mode[1]) {
+        mbmi->ext_intra_mode_info.ext_intra_mode[1] =
+            ext_intra_mode_info_uv[uv_tx].ext_intra_mode[1];
+      }
+#endif  // CONFIG_EXT_INTRA
+
+      rate2 = rate_y + intra_mode_cost[mbmi->mode] + rate_uv +
+              cpi->intra_uv_mode_cost[mbmi->mode][mbmi->uv_mode];
+      if (cpi->common.allow_screen_content_tools && mbmi->mode == DC_PRED)
+        rate2 += vp10_cost_bit(
+            vp10_default_palette_y_mode_prob[bsize - BLOCK_8X8][palette_ctx],
+            0);
+
+      if (!xd->lossless[mbmi->segment_id]) {
+        // super_block_yrd above includes the cost of the tx_size in the
+        // tokenonly rate, but for intra blocks, tx_size is always coded
+        // (prediction granularity), so we account for it in the full rate,
+        // not the tokenonly rate.
+        rate_y -= cpi->tx_size_cost[max_tx_size - TX_8X8][get_tx_size_context(
+            xd)][mbmi->tx_size];
+      }
+#if CONFIG_EXT_INTRA
+      if (is_directional_mode) {
+        int p_angle;
+        const int intra_filter_ctx = vp10_get_pred_context_intra_interp(xd);
+        rate2 += write_uniform_cost(2 * MAX_ANGLE_DELTAS + 1,
+                                    MAX_ANGLE_DELTAS + mbmi->angle_delta[0]);
+        p_angle =
+            mode_to_angle_map[mbmi->mode] + mbmi->angle_delta[0] * ANGLE_STEP;
+        if (vp10_is_intra_filter_switchable(p_angle))
+          rate2 += cpi->intra_filter_cost[intra_filter_ctx][mbmi->intra_filter];
+      }
+
+      if (mbmi->mode == DC_PRED && ALLOW_FILTER_INTRA_MODES) {
+        rate2 += vp10_cost_bit(cm->fc->ext_intra_probs[0],
+                               mbmi->ext_intra_mode_info.use_ext_intra_mode[0]);
+        if (mbmi->ext_intra_mode_info.use_ext_intra_mode[0]) {
+          rate2 += write_uniform_cost(
+              FILTER_INTRA_MODES, mbmi->ext_intra_mode_info.ext_intra_mode[0]);
+        }
+      }
+
+      if (mbmi->uv_mode != DC_PRED && mbmi->uv_mode != TM_PRED) {
+        rate2 += write_uniform_cost(2 * MAX_ANGLE_DELTAS + 1,
+                                    MAX_ANGLE_DELTAS + mbmi->angle_delta[1]);
+      }
+
+      if (ALLOW_FILTER_INTRA_MODES && mbmi->mode == DC_PRED) {
+        rate2 += vp10_cost_bit(cpi->common.fc->ext_intra_probs[1],
+                               mbmi->ext_intra_mode_info.use_ext_intra_mode[1]);
+        if (mbmi->ext_intra_mode_info.use_ext_intra_mode[1])
+          rate2 += write_uniform_cost(
+              FILTER_INTRA_MODES, mbmi->ext_intra_mode_info.ext_intra_mode[1]);
+      }
+#endif  // CONFIG_EXT_INTRA
+      if (this_mode != DC_PRED && this_mode != TM_PRED)
+        rate2 += intra_cost_penalty;
+      distortion2 = distortion_y + distortion_uv;
+      vp10_encode_intra_block_plane(x, bsize, 0, 1);
+#if CONFIG_VP9_HIGHBITDEPTH
+      if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+        x->recon_variance = vp10_high_get_sby_perpixel_variance(
+            cpi, &xd->plane[0].dst, bsize, xd->bd);
+      } else {
+        x->recon_variance =
+            vp10_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
+      }
+#else
+      x->recon_variance =
+          vp10_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+    } else {
+#if CONFIG_REF_MV
+      int_mv backup_ref_mv[2];
+
+      backup_ref_mv[0] = mbmi_ext->ref_mvs[ref_frame][0];
+      if (comp_pred) backup_ref_mv[1] = mbmi_ext->ref_mvs[second_ref_frame][0];
+#endif
+#if CONFIG_EXT_INTER
+      if (second_ref_frame == INTRA_FRAME) {
+        if (best_single_inter_ref != ref_frame) continue;
+        mbmi->interintra_mode = best_intra_mode;
+#if CONFIG_EXT_INTRA
+        // TODO(debargha|geza.lore):
+        // Should we use ext_intra modes for interintra?
+        mbmi->ext_intra_mode_info.use_ext_intra_mode[0] = 0;
+        mbmi->ext_intra_mode_info.use_ext_intra_mode[1] = 0;
+        mbmi->angle_delta[0] = 0;
+        mbmi->angle_delta[1] = 0;
+        mbmi->intra_filter = INTRA_FILTER_LINEAR;
+#endif  // CONFIG_EXT_INTRA
+      }
+#endif  // CONFIG_EXT_INTER
+#if CONFIG_REF_MV
+      mbmi->ref_mv_idx = 0;
+      ref_frame_type = vp10_ref_frame_type(mbmi->ref_frame);
+
+      if (this_mode == NEWMV && mbmi_ext->ref_mv_count[ref_frame_type] > 1) {
+        int ref;
+        for (ref = 0; ref < 1 + comp_pred; ++ref) {
+          int_mv this_mv =
+              (ref == 0) ? mbmi_ext->ref_mv_stack[ref_frame_type][0].this_mv
+                         : mbmi_ext->ref_mv_stack[ref_frame_type][0].comp_mv;
+          clamp_mv_ref(&this_mv.as_mv, xd->n8_w << 3, xd->n8_h << 3, xd);
+          mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][0] = this_mv;
+        }
+      }
+#endif
+      this_rd = handle_inter_mode(
+          cpi, x, bsize, &rate2, &distortion2, &skippable, &rate_y, &rate_uv,
+          &disable_skip, frame_mv, mi_row, mi_col,
+#if CONFIG_OBMC
+          dst_buf1, dst_stride1, dst_buf2, dst_stride2, weighted_src_buf,
+          mask2d_buf,
+#endif  // CONFIG_OBMC
+#if CONFIG_EXT_INTER
+          single_newmvs, single_newmvs_rate, &compmode_interintra_cost,
+          &compmode_wedge_cost, modelled_rd,
+#else
+          single_newmv,
+#endif  // CONFIG_EXT_INTER
+          single_inter_filter, single_skippable, &total_sse, best_rd);
+
+#if CONFIG_REF_MV
+      // TODO(jingning): This needs some refactoring to improve code quality
+      // and reduce redundant steps.
+      if ((mbmi->mode == NEARMV &&
+           mbmi_ext->ref_mv_count[ref_frame_type] > 2) ||
+          (mbmi->mode == NEWMV && mbmi_ext->ref_mv_count[ref_frame_type] > 1)) {
+        int_mv backup_mv = frame_mv[NEARMV][ref_frame];
+        MB_MODE_INFO backup_mbmi = *mbmi;
+        int backup_skip = x->skip;
+        int64_t tmp_ref_rd = this_rd;
+        int ref_idx;
+
+        // TODO(jingning): This should be deprecated shortly.
+        int idx_offset = (mbmi->mode == NEARMV) ? 1 : 0;
+        int ref_set =
+            VPXMIN(2, mbmi_ext->ref_mv_count[ref_frame_type] - 1 - idx_offset);
+
+        uint8_t drl_ctx =
+            vp10_drl_ctx(mbmi_ext->ref_mv_stack[ref_frame_type], idx_offset);
+        // Dummy
+        int_mv backup_fmv[2];
+        backup_fmv[0] = frame_mv[NEWMV][ref_frame];
+        if (comp_pred) backup_fmv[1] = frame_mv[NEWMV][second_ref_frame];
+
+        rate2 += cpi->drl_mode_cost0[drl_ctx][0];
+
+        if (this_rd < INT64_MAX) {
+          if (RDCOST(x->rdmult, x->rddiv, rate_y + rate_uv, distortion2) <
+              RDCOST(x->rdmult, x->rddiv, 0, total_sse))
+            tmp_ref_rd =
+                RDCOST(x->rdmult, x->rddiv,
+                       rate2 + vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0),
+                       distortion2);
+          else
+            tmp_ref_rd =
+                RDCOST(x->rdmult, x->rddiv,
+                       rate2 + vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1) -
+                           rate_y - rate_uv,
+                       total_sse);
+        }
+#if CONFIG_VAR_TX
+        for (i = 0; i < MAX_MB_PLANE; ++i)
+          memcpy(x->blk_skip_drl[i], x->blk_skip[i],
+                 sizeof(uint8_t) * ctx->num_4x4_blk);
+#endif
+
+        for (ref_idx = 0; ref_idx < ref_set; ++ref_idx) {
+          int64_t tmp_alt_rd = INT64_MAX;
+          int tmp_rate = 0, tmp_rate_y = 0, tmp_rate_uv = 0;
+          int tmp_skip = 1;
+          int64_t tmp_dist = 0, tmp_sse = 0;
+          int dummy_disable_skip = 0;
+          int ref;
+          int_mv cur_mv;
+
+          mbmi->ref_mv_idx = 1 + ref_idx;
+
+          for (ref = 0; ref < 1 + comp_pred; ++ref) {
+            int_mv this_mv =
+                (ref == 0)
+                    ? mbmi_ext->ref_mv_stack[ref_frame_type][mbmi->ref_mv_idx]
+                          .this_mv
+                    : mbmi_ext->ref_mv_stack[ref_frame_type][mbmi->ref_mv_idx]
+                          .comp_mv;
+            clamp_mv_ref(&this_mv.as_mv, xd->n8_w << 3, xd->n8_h << 3, xd);
+            mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][0] = this_mv;
+          }
+
+          cur_mv =
+              mbmi_ext->ref_mv_stack[ref_frame][mbmi->ref_mv_idx + idx_offset]
+                  .this_mv;
+          clamp_mv2(&cur_mv.as_mv, xd);
+
+          if (!mv_check_bounds(x, &cur_mv.as_mv)) {
+            INTERP_FILTER
+            dummy_single_inter_filter[MB_MODE_COUNT][TOTAL_REFS_PER_FRAME] = {
+              { 0 }
+            };
+            int dummy_single_skippable[MB_MODE_COUNT][TOTAL_REFS_PER_FRAME] = {
+              { 0 }
+            };
+            int dummy_disable_skip = 0;
+#if CONFIG_EXT_INTER
+            int_mv dummy_single_newmvs[2][TOTAL_REFS_PER_FRAME] = { { { 0 } },
+                                                                    { { 0 } } };
+            int dummy_single_newmvs_rate[2][TOTAL_REFS_PER_FRAME] = { { 0 },
+                                                                      { 0 } };
+            int dummy_compmode_interintra_cost = 0;
+            int dummy_compmode_wedge_cost = 0;
+#else
+            int_mv dummy_single_newmv[TOTAL_REFS_PER_FRAME] = { { 0 } };
+#endif
+
+            frame_mv[NEARMV][ref_frame] = cur_mv;
+            tmp_alt_rd = handle_inter_mode(
+                cpi, x, bsize, &tmp_rate, &tmp_dist, &tmp_skip, &tmp_rate_y,
+                &tmp_rate_uv, &dummy_disable_skip, frame_mv, mi_row, mi_col,
+#if CONFIG_OBMC
+                dst_buf1, dst_stride1, dst_buf2, dst_stride2, weighted_src_buf,
+                mask2d_buf,
+#endif  // CONFIG_OBMC
+#if CONFIG_EXT_INTER
+                dummy_single_newmvs, dummy_single_newmvs_rate,
+                &dummy_compmode_interintra_cost, &dummy_compmode_wedge_cost,
+                NULL,
+#else
+                dummy_single_newmv,
+#endif
+                dummy_single_inter_filter, dummy_single_skippable, &tmp_sse,
+                best_rd);
+          }
+
+          for (i = 0; i < mbmi->ref_mv_idx; ++i) {
+            uint8_t drl1_ctx = 0;
+            drl1_ctx = vp10_drl_ctx(mbmi_ext->ref_mv_stack[ref_frame_type],
+                                    i + idx_offset);
+            tmp_rate += cpi->drl_mode_cost0[drl1_ctx][1];
+          }
+
+          if (mbmi_ext->ref_mv_count[ref_frame_type] >
+                  mbmi->ref_mv_idx + idx_offset + 1 &&
+              ref_idx < ref_set - 1) {
+            uint8_t drl1_ctx =
+                vp10_drl_ctx(mbmi_ext->ref_mv_stack[ref_frame_type],
+                             mbmi->ref_mv_idx + idx_offset);
+            tmp_rate += cpi->drl_mode_cost0[drl1_ctx][0];
+          }
+
+          if (tmp_alt_rd < INT64_MAX) {
+#if CONFIG_OBMC
+            tmp_alt_rd = RDCOST(x->rdmult, x->rddiv, tmp_rate, tmp_dist);
+#else
+            if (RDCOST(x->rdmult, x->rddiv, tmp_rate_y + tmp_rate_uv,
+                       tmp_dist) < RDCOST(x->rdmult, x->rddiv, 0, tmp_sse))
+              tmp_alt_rd = RDCOST(
+                  x->rdmult, x->rddiv,
+                  tmp_rate + vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0),
+                  tmp_dist);
+            else
+              tmp_alt_rd = RDCOST(
+                  x->rdmult, x->rddiv,
+                  tmp_rate + vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1) -
+                      tmp_rate_y - tmp_rate_uv,
+                  tmp_sse);
+#endif  // CONFIG_OBMC
+          }
+
+          if (tmp_ref_rd > tmp_alt_rd) {
+            rate2 = tmp_rate;
+            disable_skip = dummy_disable_skip;
+            distortion2 = tmp_dist;
+            skippable = tmp_skip;
+            rate_y = tmp_rate_y;
+            rate_uv = tmp_rate_uv;
+            total_sse = tmp_sse;
+            this_rd = tmp_alt_rd;
+            tmp_ref_rd = tmp_alt_rd;
+            backup_mbmi = *mbmi;
+            backup_skip = x->skip;
+#if CONFIG_VAR_TX
+            for (i = 0; i < MAX_MB_PLANE; ++i)
+              memcpy(x->blk_skip_drl[i], x->blk_skip[i],
+                     sizeof(uint8_t) * ctx->num_4x4_blk);
+#endif
+          } else {
+            *mbmi = backup_mbmi;
+            x->skip = backup_skip;
+          }
+        }
+
+        frame_mv[NEARMV][ref_frame] = backup_mv;
+        frame_mv[NEWMV][ref_frame] = backup_fmv[0];
+        if (comp_pred) frame_mv[NEWMV][second_ref_frame] = backup_fmv[1];
+#if CONFIG_VAR_TX
+        for (i = 0; i < MAX_MB_PLANE; ++i)
+          memcpy(x->blk_skip[i], x->blk_skip_drl[i],
+                 sizeof(uint8_t) * ctx->num_4x4_blk);
+#endif
+      }
+      mbmi_ext->ref_mvs[ref_frame][0] = backup_ref_mv[0];
+      if (comp_pred) mbmi_ext->ref_mvs[second_ref_frame][0] = backup_ref_mv[1];
+#endif  // CONFIG_REF_MV
+
+      if (this_rd == INT64_MAX) continue;
+
+      compmode_cost = vp10_cost_bit(comp_mode_p, comp_pred);
+
+      if (cm->reference_mode == REFERENCE_MODE_SELECT) rate2 += compmode_cost;
+    }
+
+#if CONFIG_EXT_INTER
+    rate2 += compmode_interintra_cost;
+    if (cm->reference_mode != SINGLE_REFERENCE && comp_pred)
+#if CONFIG_OBMC || CONFIG_WARPED_MOTION
+      if (mbmi->motion_variation == SIMPLE_TRANSLATION)
+#endif  // CONFIG_OBMC || CONFIG_WARPED_MOTION
+        rate2 += compmode_wedge_cost;
+#endif  // CONFIG_EXT_INTER
+
+    // Estimate the reference frame signaling cost and add it
+    // to the rolling cost variable.
+    if (comp_pred) {
+      rate2 += ref_costs_comp[ref_frame];
+#if CONFIG_EXT_REFS
+      rate2 += ref_costs_comp[second_ref_frame];
+#endif  // CONFIG_EXT_REFS
+    } else {
+      rate2 += ref_costs_single[ref_frame];
+    }
+
+#if CONFIG_OBMC
+    if (ref_frame == INTRA_FRAME) {
+#else
+    if (!disable_skip) {
+#endif  // CONFIG_OBMC
+      if (skippable) {
+        // Back out the coefficient coding costs
+        rate2 -= (rate_y + rate_uv);
+        rate_y = 0;
+        rate_uv = 0;
+        // Cost the skip mb case
+        rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1);
+      } else if (ref_frame != INTRA_FRAME && !xd->lossless[mbmi->segment_id]) {
+        if (RDCOST(x->rdmult, x->rddiv, rate_y + rate_uv, distortion2) <
+            RDCOST(x->rdmult, x->rddiv, 0, total_sse)) {
+          // Add in the cost of the no skip flag.
+          rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0);
+        } else {
+          // FIXME(rbultje) make this work for splitmv also
+          rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1);
+          distortion2 = total_sse;
+          assert(total_sse >= 0);
+          rate2 -= (rate_y + rate_uv);
+          this_skip2 = 1;
+          rate_y = 0;
+          rate_uv = 0;
+        }
+      } else {
+        // Add in the cost of the no skip flag.
+        rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0);
+      }
+
+      // Calculate the final RD estimate for this mode.
+      this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
+#if CONFIG_OBMC
+    } else {
+      this_skip2 = mbmi->skip;
+      this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
+      if (this_skip2) {
+        rate_y = 0;
+        rate_uv = 0;
+      }
+#endif  // CONFIG_OBMC
+    }
+
+    // Apply an adjustment to the rd value based on the similarity of the
+    // source variance and reconstructed variance.
+    rd_variance_adjustment(x, &this_rd, ref_frame, x->source_variance);
+
+    if (ref_frame == INTRA_FRAME) {
+      // Keep record of best intra rd
+      if (this_rd < best_intra_rd) {
+        best_intra_rd = this_rd;
+        best_intra_mode = mbmi->mode;
+      }
+#if CONFIG_EXT_INTER
+    } else if (second_ref_frame == NONE) {
+      if (this_rd < best_single_inter_rd) {
+        best_single_inter_rd = this_rd;
+        best_single_inter_ref = mbmi->ref_frame[0];
+      }
+#endif  // CONFIG_EXT_INTER
+    }
+
+    if (!disable_skip && ref_frame == INTRA_FRAME) {
+      for (i = 0; i < REFERENCE_MODES; ++i)
+        best_pred_rd[i] = VPXMIN(best_pred_rd[i], this_rd);
+    }
+
+    // Did this mode help.. i.e. is it the new best mode
+    if (this_rd < best_rd || x->skip) {
+      if (!mode_excluded) {
+        // Note index of best mode so far
+        best_mode_index = mode_index;
+
+        if (ref_frame == INTRA_FRAME) {
+          /* required for left and above block mv */
+          mbmi->mv[0].as_int = 0;
+        } else {
+          best_pred_sse = x->pred_sse[ref_frame];
+        }
+
+        rd_cost->rate = rate2;
+#if CONFIG_SUPERTX
+        if (x->skip)
+          *returnrate_nocoef = rate2;
+        else
+          *returnrate_nocoef = rate2 - rate_y - rate_uv;
+        *returnrate_nocoef -=
+            vp10_cost_bit(vp10_get_skip_prob(cm, xd),
+                          disable_skip || skippable || this_skip2);
+        *returnrate_nocoef -= vp10_cost_bit(vp10_get_intra_inter_prob(cm, xd),
+                                            mbmi->ref_frame[0] != INTRA_FRAME);
+#if CONFIG_OBMC || CONFIG_WARPED_MOTION
+        if (is_inter_block(mbmi) && is_motvar_allowed(mbmi))
+          *returnrate_nocoef -= cpi->motvar_cost[bsize][mbmi->motion_variation];
+#endif  // CONFIG_OBMC || CONFIG_WARPED_MOTION
+#endif  // CONFIG_SUPERTX
+        rd_cost->dist = distortion2;
+        rd_cost->rdcost = this_rd;
+        best_rd = this_rd;
+        best_mbmode = *mbmi;
+        best_skip2 = this_skip2;
+        best_mode_skippable = skippable;
+        best_rate_y = rate_y + vp10_cost_bit(vp10_get_skip_prob(cm, xd),
+                                             this_skip2 || skippable);
+        best_rate_uv = rate_uv;
+
+#if CONFIG_VAR_TX
+        for (i = 0; i < MAX_MB_PLANE; ++i)
+          memcpy(ctx->blk_skip[i], x->blk_skip[i],
+                 sizeof(uint8_t) * ctx->num_4x4_blk);
+#endif
+
+        // TODO(debargha): enhance this test with a better distortion prediction
+        // based on qp, activity mask and history
+        if ((mode_search_skip_flags & FLAG_EARLY_TERMINATE) &&
+            (mode_index > MIN_EARLY_TERM_INDEX)) {
+          int qstep = xd->plane[0].dequant[1];
+          // TODO(debargha): Enhance this by specializing for each mode_index
+          int scale = 4;
+#if CONFIG_VP9_HIGHBITDEPTH
+          if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+            qstep >>= (xd->bd - 8);
+          }
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+          if (x->source_variance < UINT_MAX) {
+            const int var_adjust = (x->source_variance < 16);
+            scale -= var_adjust;
+          }
+          if (ref_frame > INTRA_FRAME && distortion2 * scale < qstep * qstep) {
+            early_term = 1;
+          }
+        }
+      }
+    }
+
+    /* keep record of best compound/single-only prediction */
+    if (!disable_skip && ref_frame != INTRA_FRAME) {
+      int64_t single_rd, hybrid_rd, single_rate, hybrid_rate;
+
+      if (cm->reference_mode == REFERENCE_MODE_SELECT) {
+        single_rate = rate2 - compmode_cost;
+        hybrid_rate = rate2;
+      } else {
+        single_rate = rate2;
+        hybrid_rate = rate2 + compmode_cost;
+      }
+
+      single_rd = RDCOST(x->rdmult, x->rddiv, single_rate, distortion2);
+      hybrid_rd = RDCOST(x->rdmult, x->rddiv, hybrid_rate, distortion2);
+
+      if (!comp_pred) {
+        if (single_rd < best_pred_rd[SINGLE_REFERENCE])
+          best_pred_rd[SINGLE_REFERENCE] = single_rd;
+      } else {
+        if (single_rd < best_pred_rd[COMPOUND_REFERENCE])
+          best_pred_rd[COMPOUND_REFERENCE] = single_rd;
+      }
+      if (hybrid_rd < best_pred_rd[REFERENCE_MODE_SELECT])
+        best_pred_rd[REFERENCE_MODE_SELECT] = hybrid_rd;
+    }
+
+    if (early_term) break;
+
+    if (x->skip && !comp_pred) break;
+  }
+
+  if (xd->lossless[mbmi->segment_id] == 0 && best_mode_index >= 0 &&
+      ((sf->tx_type_search.fast_inter_tx_type_search == 1 &&
+        is_inter_mode(best_mbmode.mode)) ||
+       (sf->tx_type_search.fast_intra_tx_type_search == 1 &&
+        !is_inter_mode(best_mbmode.mode)))) {
+    int rate_y = 0, rate_uv = 0;
+    int64_t dist_y = 0, dist_uv = 0;
+    int skip_y = 0, skip_uv = 0, skip_blk = 0;
+    int64_t sse_y = 0, sse_uv = 0;
+
+    x->use_default_inter_tx_type = 0;
+    x->use_default_intra_tx_type = 0;
+
+    *mbmi = best_mbmode;
+
+    set_ref_ptrs(cm, xd, mbmi->ref_frame[0], mbmi->ref_frame[1]);
+
+    // Select prediction reference frames.
+    for (i = 0; i < MAX_MB_PLANE; i++) {
+      xd->plane[i].pre[0] = yv12_mb[mbmi->ref_frame[0]][i];
+      if (has_second_ref(mbmi))
+        xd->plane[i].pre[1] = yv12_mb[mbmi->ref_frame[1]][i];
+    }
+
+    if (is_inter_mode(mbmi->mode)) {
+      vp10_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
+#if CONFIG_OBMC
+      if (mbmi->motion_variation == OBMC_CAUSAL)
+        vp10_build_obmc_inter_prediction(cm, xd, mi_row, mi_col, dst_buf1,
+                                         dst_stride1, dst_buf2, dst_stride2);
+#endif  // CONFIG_OBMC
+      vp10_subtract_plane(x, bsize, 0);
+#if CONFIG_VAR_TX
+      if (cm->tx_mode == TX_MODE_SELECT || xd->lossless[mbmi->segment_id]) {
+        select_tx_type_yrd(cpi, x, &rate_y, &dist_y, &skip_y, &sse_y, bsize,
+                           INT64_MAX);
+      } else {
+        int idx, idy;
+        super_block_yrd(cpi, x, &rate_y, &dist_y, &skip_y, &sse_y, bsize,
+                        INT64_MAX);
+        for (idy = 0; idy < xd->n8_h; ++idy)
+          for (idx = 0; idx < xd->n8_w; ++idx)
+            mbmi->inter_tx_size[idy][idx] = mbmi->tx_size;
+        memset(x->blk_skip[0], skip_y,
+               sizeof(uint8_t) * xd->n8_h * xd->n8_w * 4);
+      }
+
+      inter_block_uvrd(cpi, x, &rate_uv, &dist_uv, &skip_uv, &sse_uv, bsize,
+                       INT64_MAX);
+#else
+      super_block_yrd(cpi, x, &rate_y, &dist_y, &skip_y, &sse_y, bsize,
+                      INT64_MAX);
+      super_block_uvrd(cpi, x, &rate_uv, &dist_uv, &skip_uv, &sse_uv, bsize,
+                       INT64_MAX);
+#endif  // CONFIG_VAR_TX
+    } else {
+      super_block_yrd(cpi, x, &rate_y, &dist_y, &skip_y, &sse_y, bsize,
+                      INT64_MAX);
+      super_block_uvrd(cpi, x, &rate_uv, &dist_uv, &skip_uv, &sse_uv, bsize,
+                       INT64_MAX);
+    }
+
+    if (RDCOST(x->rdmult, x->rddiv, rate_y + rate_uv, (dist_y + dist_uv)) >
+        RDCOST(x->rdmult, x->rddiv, 0, (sse_y + sse_uv))) {
+      skip_blk = 1;
+      rate_y = vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1);
+      rate_uv = 0;
+      dist_y = sse_y;
+      dist_uv = sse_uv;
+    } else {
+      skip_blk = 0;
+      rate_y += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0);
+    }
+
+    if (RDCOST(x->rdmult, x->rddiv, best_rate_y + best_rate_uv, rd_cost->dist) >
+        RDCOST(x->rdmult, x->rddiv, rate_y + rate_uv, (dist_y + dist_uv))) {
+#if CONFIG_VAR_TX
+      int idx, idy;
+#endif
+      best_mbmode.tx_type = mbmi->tx_type;
+      best_mbmode.tx_size = mbmi->tx_size;
+#if CONFIG_VAR_TX
+      for (idy = 0; idy < xd->n8_h; ++idy)
+        for (idx = 0; idx < xd->n8_w; ++idx)
+          best_mbmode.inter_tx_size[idy][idx] = mbmi->inter_tx_size[idy][idx];
+
+      for (i = 0; i < MAX_MB_PLANE; ++i)
+        memcpy(ctx->blk_skip[i], x->blk_skip[i],
+               sizeof(uint8_t) * ctx->num_4x4_blk);
+#endif
+      rd_cost->rate += (rate_y + rate_uv - best_rate_y - best_rate_uv);
+      rd_cost->dist = dist_y + dist_uv;
+      rd_cost->rdcost =
+          RDCOST(x->rdmult, x->rddiv, rd_cost->rate, rd_cost->dist);
+      best_skip2 = skip_blk;
+    }
+  }
+
+  // Only try palette mode when the best mode so far is an intra mode.
+  if (cm->allow_screen_content_tools && !is_inter_mode(best_mbmode.mode)) {
+    PREDICTION_MODE mode_selected;
+    int rate2 = 0, rate_y = 0;
+#if CONFIG_SUPERTX
+    int best_rate_nocoef;
+#endif
+    int64_t distortion2 = 0, distortion_y = 0, dummy_rd = best_rd, this_rd;
+    int skippable = 0, rate_overhead = 0;
+    TX_SIZE best_tx_size, uv_tx;
+    TX_TYPE best_tx_type;
+    PALETTE_MODE_INFO palette_mode_info;
+    uint8_t *const best_palette_color_map =
+        x->palette_buffer->best_palette_color_map;
+    uint8_t *const color_map = xd->plane[0].color_index_map;
+
+    mbmi->mode = DC_PRED;
+    mbmi->uv_mode = DC_PRED;
+    mbmi->ref_frame[0] = INTRA_FRAME;
+    mbmi->ref_frame[1] = NONE;
+    palette_mode_info.palette_size[0] = 0;
+    rate_overhead = rd_pick_palette_intra_sby(
+        cpi, x, bsize, palette_ctx, intra_mode_cost[DC_PRED],
+        &palette_mode_info, best_palette_color_map, &best_tx_size,
+        &best_tx_type, &mode_selected, &dummy_rd);
+    if (palette_mode_info.palette_size[0] == 0) goto PALETTE_EXIT;
+
+    pmi->palette_size[0] = palette_mode_info.palette_size[0];
+    if (palette_mode_info.palette_size[0] > 0) {
+      memcpy(pmi->palette_colors, palette_mode_info.palette_colors,
+             PALETTE_MAX_SIZE * sizeof(palette_mode_info.palette_colors[0]));
+      memcpy(color_map, best_palette_color_map,
+             rows * cols * sizeof(best_palette_color_map[0]));
+    }
+    super_block_yrd(cpi, x, &rate_y, &distortion_y, &skippable, NULL, bsize,
+                    best_rd);
+    if (rate_y == INT_MAX) goto PALETTE_EXIT;
+    uv_tx =
+        get_uv_tx_size_impl(mbmi->tx_size, bsize, xd->plane[1].subsampling_x,
+                            xd->plane[1].subsampling_y);
+    if (rate_uv_intra[uv_tx] == INT_MAX) {
+      choose_intra_uv_mode(cpi, x, ctx, bsize, uv_tx, &rate_uv_intra[uv_tx],
+                           &rate_uv_tokenonly[uv_tx], &dist_uv[uv_tx],
+                           &skip_uv[uv_tx], &mode_uv[uv_tx]);
+      pmi_uv[uv_tx] = *pmi;
+#if CONFIG_EXT_INTRA
+      ext_intra_mode_info_uv[uv_tx] = mbmi->ext_intra_mode_info;
+      uv_angle_delta[uv_tx] = mbmi->angle_delta[1];
+#endif  // CONFIG_EXT_INTRA
+    }
+    mbmi->uv_mode = mode_uv[uv_tx];
+    pmi->palette_size[1] = pmi_uv[uv_tx].palette_size[1];
+    if (pmi->palette_size[1] > 0)
+      memcpy(pmi->palette_colors + PALETTE_MAX_SIZE,
+             pmi_uv[uv_tx].palette_colors + PALETTE_MAX_SIZE,
+             2 * PALETTE_MAX_SIZE * sizeof(pmi->palette_colors[0]));
+#if CONFIG_EXT_INTRA
+    mbmi->angle_delta[1] = uv_angle_delta[uv_tx];
+    mbmi->ext_intra_mode_info.use_ext_intra_mode[1] =
+        ext_intra_mode_info_uv[uv_tx].use_ext_intra_mode[1];
+    if (ext_intra_mode_info_uv[uv_tx].use_ext_intra_mode[1]) {
+      mbmi->ext_intra_mode_info.ext_intra_mode[1] =
+          ext_intra_mode_info_uv[uv_tx].ext_intra_mode[1];
+    }
+#endif  // CONFIG_EXT_INTRA
+    skippable = skippable && skip_uv[uv_tx];
+    distortion2 = distortion_y + dist_uv[uv_tx];
+    rate2 = rate_y + rate_overhead + rate_uv_intra[uv_tx];
+    rate2 += ref_costs_single[INTRA_FRAME];
+
+    if (skippable) {
+      rate2 -= (rate_y + rate_uv_tokenonly[uv_tx]);
+#if CONFIG_SUPERTX
+      best_rate_nocoef = rate2;
+#endif
+      rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1);
+    } else {
+#if CONFIG_SUPERTX
+      best_rate_nocoef = rate2 - (rate_y + rate_uv_tokenonly[uv_tx]);
+#endif
+      rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0);
+    }
+    this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
+    if (this_rd < best_rd) {
+      best_mode_index = 3;
+      mbmi->mv[0].as_int = 0;
+      rd_cost->rate = rate2;
+#if CONFIG_SUPERTX
+      *returnrate_nocoef = best_rate_nocoef;
+#endif
+      rd_cost->dist = distortion2;
+      rd_cost->rdcost = this_rd;
+      best_rd = this_rd;
+      best_mbmode = *mbmi;
+      best_skip2 = 0;
+      best_mode_skippable = skippable;
+    }
+  }
+PALETTE_EXIT:
+
+#if CONFIG_EXT_INTRA
+  // TODO(huisu): ext-intra is turned off in lossless mode for now to
+  // avoid a unit test failure
+  if (!xd->lossless[mbmi->segment_id] && ALLOW_FILTER_INTRA_MODES &&
+      mbmi->palette_mode_info.palette_size[0] == 0 && !dc_skipped &&
+      best_mode_index >= 0 && best_intra_rd < (best_rd + (best_rd >> 3))) {
+    pick_ext_intra_interframe(
+        cpi, x, ctx, bsize, rate_uv_intra, rate_uv_tokenonly, dist_uv, skip_uv,
+        mode_uv, ext_intra_mode_info_uv, pmi_uv, uv_angle_delta, palette_ctx, 0,
+        ref_costs_single, &best_rd, &best_intra_rd, &best_intra_mode,
+        &best_mode_index, &best_skip2, &best_mode_skippable,
+#if CONFIG_SUPERTX
+        returnrate_nocoef,
+#endif  // CONFIG_SUPERTX
+        best_pred_rd, &best_mbmode, rd_cost);
+  }
+#endif  // CONFIG_EXT_INTRA
+
+  // The inter modes' rate costs are not calculated precisely in some cases.
+  // Therefore, sometimes, NEWMV is chosen instead of NEARESTMV, NEARMV, and
+  // ZEROMV. Here, checks are added for those cases, and the mode decisions
+  // are corrected.
+  if (best_mbmode.mode == NEWMV
+#if CONFIG_EXT_INTER
+      || best_mbmode.mode == NEWFROMNEARMV || best_mbmode.mode == NEW_NEWMV
+#endif  // CONFIG_EXT_INTER
+      ) {
+    const MV_REFERENCE_FRAME refs[2] = { best_mbmode.ref_frame[0],
+                                         best_mbmode.ref_frame[1] };
+    int comp_pred_mode = refs[1] > INTRA_FRAME;
+#if CONFIG_REF_MV
+    const uint8_t rf_type = vp10_ref_frame_type(best_mbmode.ref_frame);
+    if (!comp_pred_mode) {
+      int i;
+      int ref_set = (mbmi_ext->ref_mv_count[rf_type] >= 2)
+                        ? VPXMIN(2, mbmi_ext->ref_mv_count[rf_type] - 2)
+                        : INT_MAX;
+
+      for (i = 0; i <= ref_set && ref_set != INT_MAX; ++i) {
+        int_mv cur_mv = mbmi_ext->ref_mv_stack[rf_type][i + 1].this_mv;
+        if (cur_mv.as_int == best_mbmode.mv[0].as_int) {
+          best_mbmode.mode = NEARMV;
+          best_mbmode.ref_mv_idx = i;
+        }
+      }
+
+      if (frame_mv[NEARESTMV][refs[0]].as_int == best_mbmode.mv[0].as_int)
+        best_mbmode.mode = NEARESTMV;
+      else if (best_mbmode.mv[0].as_int == 0)
+        best_mbmode.mode = ZEROMV;
+    } else {
+      int_mv nearestmv[2];
+      int_mv nearmv[2];
+
+#if CONFIG_EXT_INTER
+      if (mbmi_ext->ref_mv_count[rf_type] > 1) {
+        nearmv[0] = mbmi_ext->ref_mv_stack[rf_type][1].this_mv;
+        nearmv[1] = mbmi_ext->ref_mv_stack[rf_type][1].comp_mv;
+      } else {
+        nearmv[0] = frame_mv[NEARMV][refs[0]];
+        nearmv[1] = frame_mv[NEARMV][refs[1]];
+      }
+#else
+      int i;
+      int ref_set = (mbmi_ext->ref_mv_count[rf_type] >= 2)
+                        ? VPXMIN(2, mbmi_ext->ref_mv_count[rf_type] - 2)
+                        : INT_MAX;
+
+      for (i = 0; i <= ref_set && ref_set != INT_MAX; ++i) {
+        nearmv[0] = mbmi_ext->ref_mv_stack[rf_type][i + 1].this_mv;
+        nearmv[1] = mbmi_ext->ref_mv_stack[rf_type][i + 1].comp_mv;
+
+        if (nearmv[0].as_int == best_mbmode.mv[0].as_int &&
+            nearmv[1].as_int == best_mbmode.mv[1].as_int) {
+          best_mbmode.mode = NEARMV;
+          best_mbmode.ref_mv_idx = i;
+        }
+      }
+#endif
+      if (mbmi_ext->ref_mv_count[rf_type] >= 1) {
+        nearestmv[0] = mbmi_ext->ref_mv_stack[rf_type][0].this_mv;
+        nearestmv[1] = mbmi_ext->ref_mv_stack[rf_type][0].comp_mv;
+      } else {
+        nearestmv[0] = frame_mv[NEARESTMV][refs[0]];
+        nearestmv[1] = frame_mv[NEARESTMV][refs[1]];
+      }
+
+      if (nearestmv[0].as_int == best_mbmode.mv[0].as_int &&
+          nearestmv[1].as_int == best_mbmode.mv[1].as_int)
+#if CONFIG_EXT_INTER
+        best_mbmode.mode = NEAREST_NEARESTMV;
+      else if (nearestmv[0].as_int == best_mbmode.mv[0].as_int &&
+               nearmv[1].as_int == best_mbmode.mv[1].as_int)
+        best_mbmode.mode = NEAREST_NEARMV;
+      else if (nearmv[0].as_int == best_mbmode.mv[0].as_int &&
+               nearestmv[1].as_int == best_mbmode.mv[1].as_int)
+        best_mbmode.mode = NEAR_NEARESTMV;
+      else if (nearmv[0].as_int == best_mbmode.mv[0].as_int &&
+               nearmv[1].as_int == best_mbmode.mv[1].as_int)
+        best_mbmode.mode = NEAR_NEARMV;
+      else if (best_mbmode.mv[0].as_int == 0 && best_mbmode.mv[1].as_int == 0)
+        best_mbmode.mode = ZERO_ZEROMV;
+#else
+        best_mbmode.mode = NEARESTMV;
+      else if (best_mbmode.mv[0].as_int == 0 && best_mbmode.mv[1].as_int == 0)
+        best_mbmode.mode = ZEROMV;
+#endif  // CONFIG_EXT_INTER
+    }
+#else
+#if CONFIG_EXT_INTER
+    if (!comp_pred_mode) {
+#endif  // CONFIG_EXT_INTER
+      if (frame_mv[NEARESTMV][refs[0]].as_int == best_mbmode.mv[0].as_int &&
+          ((comp_pred_mode &&
+            frame_mv[NEARESTMV][refs[1]].as_int == best_mbmode.mv[1].as_int) ||
+           !comp_pred_mode))
+        best_mbmode.mode = NEARESTMV;
+      else if (frame_mv[NEARMV][refs[0]].as_int == best_mbmode.mv[0].as_int &&
+               ((comp_pred_mode &&
+                 frame_mv[NEARMV][refs[1]].as_int ==
+                     best_mbmode.mv[1].as_int) ||
+                !comp_pred_mode))
+        best_mbmode.mode = NEARMV;
+      else if (best_mbmode.mv[0].as_int == 0 &&
+               ((comp_pred_mode && best_mbmode.mv[1].as_int == 0) ||
+                !comp_pred_mode))
+        best_mbmode.mode = ZEROMV;
+#if CONFIG_EXT_INTER
+    } else {
+      const MV_REFERENCE_FRAME refs[2] = { best_mbmode.ref_frame[0],
+                                           best_mbmode.ref_frame[1] };
+
+      if (frame_mv[NEAREST_NEARESTMV][refs[0]].as_int ==
+              best_mbmode.mv[0].as_int &&
+          frame_mv[NEAREST_NEARESTMV][refs[1]].as_int ==
+              best_mbmode.mv[1].as_int)
+        best_mbmode.mode = NEAREST_NEARESTMV;
+      else if (frame_mv[NEAREST_NEARMV][refs[0]].as_int ==
+                   best_mbmode.mv[0].as_int &&
+               frame_mv[NEAREST_NEARMV][refs[1]].as_int ==
+                   best_mbmode.mv[1].as_int)
+        best_mbmode.mode = NEAREST_NEARMV;
+      else if (frame_mv[NEAR_NEARESTMV][refs[0]].as_int ==
+                   best_mbmode.mv[0].as_int &&
+               frame_mv[NEAR_NEARESTMV][refs[1]].as_int ==
+                   best_mbmode.mv[1].as_int)
+        best_mbmode.mode = NEAR_NEARESTMV;
+      else if (frame_mv[NEAR_NEARMV][refs[0]].as_int ==
+                   best_mbmode.mv[0].as_int &&
+               frame_mv[NEAR_NEARMV][refs[1]].as_int ==
+                   best_mbmode.mv[1].as_int)
+        best_mbmode.mode = NEAR_NEARMV;
+      else if (best_mbmode.mv[0].as_int == 0 && best_mbmode.mv[1].as_int == 0)
+        best_mbmode.mode = ZERO_ZEROMV;
+    }
+#endif  // CONFIG_EXT_INTER
+#endif
+  }
+
+#if CONFIG_REF_MV
+  if (best_mbmode.ref_frame[0] > INTRA_FRAME && best_mbmode.mv[0].as_int == 0 &&
+#if CONFIG_EXT_INTER
+      (best_mbmode.ref_frame[1] <= INTRA_FRAME)
+#else
+      (best_mbmode.ref_frame[1] == NONE || best_mbmode.mv[1].as_int == 0)
+#endif  // CONFIG_EXT_INTER
+          ) {
+    int16_t mode_ctx = mbmi_ext->mode_context[best_mbmode.ref_frame[0]];
+#if !CONFIG_EXT_INTER
+    if (best_mbmode.ref_frame[1] > NONE)
+      mode_ctx &= (mbmi_ext->mode_context[best_mbmode.ref_frame[1]] | 0x00ff);
+#endif  // !CONFIG_EXT_INTER
+
+    if (mode_ctx & (1 << ALL_ZERO_FLAG_OFFSET)) best_mbmode.mode = ZEROMV;
+  }
+#endif
+
+  if (best_mode_index < 0 || best_rd >= best_rd_so_far) {
+    rd_cost->rate = INT_MAX;
+    rd_cost->rdcost = INT64_MAX;
+    return;
+  }
+
+  // If we used an estimate for the uv intra rd in the loop above...
+  if (sf->use_uv_intra_rd_estimate) {
+    // Do Intra UV best rd mode selection if best mode choice above was intra.
+    if (best_mbmode.ref_frame[0] == INTRA_FRAME) {
+      TX_SIZE uv_tx_size;
+      *mbmi = best_mbmode;
+      uv_tx_size = get_uv_tx_size(mbmi, &xd->plane[1]);
+      rd_pick_intra_sbuv_mode(
+          cpi, x, &rate_uv_intra[uv_tx_size], &rate_uv_tokenonly[uv_tx_size],
+          &dist_uv[uv_tx_size], &skip_uv[uv_tx_size],
+          bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize, uv_tx_size);
+    }
+  }
+
+#if CONFIG_DUAL_FILTER
+  assert((cm->interp_filter == SWITCHABLE) ||
+         (cm->interp_filter == best_mbmode.interp_filter[0]) ||
+         !is_inter_block(&best_mbmode));
+  assert((cm->interp_filter == SWITCHABLE) ||
+         (cm->interp_filter == best_mbmode.interp_filter[1]) ||
+         !is_inter_block(&best_mbmode));
+  if (best_mbmode.ref_frame[1] > INTRA_FRAME) {
+    assert((cm->interp_filter == SWITCHABLE) ||
+           (cm->interp_filter == best_mbmode.interp_filter[2]) ||
+           !is_inter_block(&best_mbmode));
+    assert((cm->interp_filter == SWITCHABLE) ||
+           (cm->interp_filter == best_mbmode.interp_filter[3]) ||
+           !is_inter_block(&best_mbmode));
+  }
+#else
+  assert((cm->interp_filter == SWITCHABLE) ||
+         (cm->interp_filter == best_mbmode.interp_filter) ||
+         !is_inter_block(&best_mbmode));
+#endif
+
+  if (!cpi->rc.is_src_frame_alt_ref)
+    vp10_update_rd_thresh_fact(cm, tile_data->thresh_freq_fact,
+                               sf->adaptive_rd_thresh, bsize, best_mode_index);
+
+  // macroblock modes
+  *mbmi = best_mbmode;
+  x->skip |= best_skip2;
+
+#if CONFIG_REF_MV
+  for (i = 0; i < 1 + has_second_ref(mbmi); ++i) {
+    if (mbmi->mode != NEWMV)
+      mbmi->pred_mv[i].as_int = mbmi->mv[i].as_int;
+    else
+      mbmi->pred_mv[i].as_int = mbmi_ext->ref_mvs[mbmi->ref_frame[i]][0].as_int;
+  }
+#endif
+
+  for (i = 0; i < REFERENCE_MODES; ++i) {
+    if (best_pred_rd[i] == INT64_MAX)
+      best_pred_diff[i] = INT_MIN;
+    else
+      best_pred_diff[i] = best_rd - best_pred_rd[i];
+  }
+
+  x->skip |= best_mode_skippable;
+
+  assert(best_mode_index >= 0);
+
+  store_coding_context(x, ctx, best_mode_index, best_pred_diff,
+                       best_mode_skippable);
+
+  if (cm->allow_screen_content_tools && pmi->palette_size[1] > 0) {
+    restore_uv_color_map(cpi, x);
+  }
+}
+
+void vp10_rd_pick_inter_mode_sb_seg_skip(VP10_COMP *cpi, TileDataEnc *tile_data,
+                                         MACROBLOCK *x, RD_COST *rd_cost,
+                                         BLOCK_SIZE bsize,
+                                         PICK_MODE_CONTEXT *ctx,
+                                         int64_t best_rd_so_far) {
+  VP10_COMMON *const cm = &cpi->common;
+  MACROBLOCKD *const xd = &x->e_mbd;
+  MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
+  unsigned char segment_id = mbmi->segment_id;
+  const int comp_pred = 0;
+  int i;
+  int64_t best_pred_diff[REFERENCE_MODES];
+  unsigned int ref_costs_single[TOTAL_REFS_PER_FRAME];
+  unsigned int ref_costs_comp[TOTAL_REFS_PER_FRAME];
+  vpx_prob comp_mode_p;
+  INTERP_FILTER best_filter = SWITCHABLE;
+  int64_t this_rd = INT64_MAX;
+  int rate2 = 0;
+  const int64_t distortion2 = 0;
+
+  estimate_ref_frame_costs(cm, xd, segment_id, ref_costs_single, ref_costs_comp,
+                           &comp_mode_p);
+
+  for (i = 0; i < TOTAL_REFS_PER_FRAME; ++i) x->pred_sse[i] = INT_MAX;
+  for (i = LAST_FRAME; i < TOTAL_REFS_PER_FRAME; ++i)
+    x->pred_mv_sad[i] = INT_MAX;
+
+  rd_cost->rate = INT_MAX;
+
+  assert(segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP));
+
+  mbmi->palette_mode_info.palette_size[0] = 0;
+  mbmi->palette_mode_info.palette_size[1] = 0;
+#if CONFIG_EXT_INTRA
+  mbmi->ext_intra_mode_info.use_ext_intra_mode[0] = 0;
+  mbmi->ext_intra_mode_info.use_ext_intra_mode[1] = 0;
+#endif  // CONFIG_EXT_INTRA
+  mbmi->mode = ZEROMV;
+  mbmi->motion_variation = SIMPLE_TRANSLATION;
+  mbmi->uv_mode = DC_PRED;
+  mbmi->ref_frame[0] = LAST_FRAME;
+  mbmi->ref_frame[1] = NONE;
+  mbmi->mv[0].as_int = 0;
+#if CONFIG_REF_MV
+  mbmi->ref_mv_idx = 0;
+  mbmi->pred_mv[0].as_int = 0;
+#endif
+  x->skip = 1;
+
+  if (cm->interp_filter != BILINEAR) {
+    best_filter = EIGHTTAP_REGULAR;
+    if (cm->interp_filter == SWITCHABLE &&
+#if CONFIG_EXT_INTERP
+        vp10_is_interp_needed(xd) &&
+#endif  // CONFIG_EXT_INTERP
+        x->source_variance >= cpi->sf.disable_filter_search_var_thresh) {
+      int rs;
+      int best_rs = INT_MAX;
+      for (i = 0; i < SWITCHABLE_FILTERS; ++i) {
+#if CONFIG_DUAL_FILTER
+        int k;
+        for (k = 0; k < 4; ++k) mbmi->interp_filter[k] = i;
+#else
+        mbmi->interp_filter = i;
+#endif
+        rs = vp10_get_switchable_rate(cpi, xd);
+        if (rs < best_rs) {
+          best_rs = rs;
+#if CONFIG_DUAL_FILTER
+          best_filter = mbmi->interp_filter[0];
+#else
+          best_filter = mbmi->interp_filter;
+#endif
+        }
+      }
+    }
+  }
+  // Set the appropriate filter
+  if (cm->interp_filter == SWITCHABLE) {
+#if CONFIG_DUAL_FILTER
+    for (i = 0; i < 4; ++i) mbmi->interp_filter[i] = best_filter;
+#else
+    mbmi->interp_filter = best_filter;
+#endif
+    rate2 += vp10_get_switchable_rate(cpi, xd);
+  } else {
+#if CONFIG_DUAL_FILTER
+    for (i = 0; i < 4; ++i) mbmi->interp_filter[0] = cm->interp_filter;
+#else
+    mbmi->interp_filter = cm->interp_filter;
+#endif
+  }
+
+  if (cm->reference_mode == REFERENCE_MODE_SELECT)
+    rate2 += vp10_cost_bit(comp_mode_p, comp_pred);
+
+  // Estimate the reference frame signaling cost and add it
+  // to the rolling cost variable.
+  rate2 += ref_costs_single[LAST_FRAME];
+  this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
+
+  rd_cost->rate = rate2;
+  rd_cost->dist = distortion2;
+  rd_cost->rdcost = this_rd;
+
+  if (this_rd >= best_rd_so_far) {
+    rd_cost->rate = INT_MAX;
+    rd_cost->rdcost = INT64_MAX;
+    return;
+  }
+
+#if CONFIG_DUAL_FILTER
+  assert((cm->interp_filter == SWITCHABLE) ||
+         (cm->interp_filter == mbmi->interp_filter[0]));
+#else
+  assert((cm->interp_filter == SWITCHABLE) ||
+         (cm->interp_filter == mbmi->interp_filter));
+#endif
+
+  vp10_update_rd_thresh_fact(cm, tile_data->thresh_freq_fact,
+                             cpi->sf.adaptive_rd_thresh, bsize, THR_ZEROMV);
+
+  vp10_zero(best_pred_diff);
+
+  store_coding_context(x, ctx, THR_ZEROMV, best_pred_diff, 0);
+}
+
+void vp10_rd_pick_inter_mode_sub8x8(struct VP10_COMP *cpi,
+                                    TileDataEnc *tile_data,
+                                    struct macroblock *x, int mi_row,
+                                    int mi_col, struct RD_COST *rd_cost,
+#if CONFIG_SUPERTX
+                                    int *returnrate_nocoef,
+#endif  // CONFIG_SUPERTX
+                                    BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
+                                    int64_t best_rd_so_far) {
+  VP10_COMMON *const cm = &cpi->common;
+  RD_OPT *const rd_opt = &cpi->rd;
+  SPEED_FEATURES *const sf = &cpi->sf;
+  MACROBLOCKD *const xd = &x->e_mbd;
+  MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
+  const struct segmentation *const seg = &cm->seg;
+  MV_REFERENCE_FRAME ref_frame, second_ref_frame;
+  unsigned char segment_id = mbmi->segment_id;
+  int comp_pred, i;
+  int_mv frame_mv[MB_MODE_COUNT][TOTAL_REFS_PER_FRAME];
+  struct buf_2d yv12_mb[TOTAL_REFS_PER_FRAME][MAX_MB_PLANE];
+  static const int flag_list[TOTAL_REFS_PER_FRAME] = {
+    0,
+    VPX_LAST_FLAG,
+#if CONFIG_EXT_REFS
+    VPX_LAST2_FLAG,
+    VPX_LAST3_FLAG,
+#endif  // CONFIG_EXT_REFS
+    VPX_GOLD_FLAG,
+#if CONFIG_EXT_REFS
+    VPX_BWD_FLAG,
+#endif  // CONFIG_EXT_REFS
+    VPX_ALT_FLAG
+  };
+  int64_t best_rd = best_rd_so_far;
+  int64_t best_yrd = best_rd_so_far;  // FIXME(rbultje) more precise
+  int64_t best_pred_diff[REFERENCE_MODES];
+  int64_t best_pred_rd[REFERENCE_MODES];
+  MB_MODE_INFO best_mbmode;
+  int ref_index, best_ref_index = 0;
+  unsigned int ref_costs_single[TOTAL_REFS_PER_FRAME];
+  unsigned int ref_costs_comp[TOTAL_REFS_PER_FRAME];
+  vpx_prob comp_mode_p;
+#if CONFIG_DUAL_FILTER
+  INTERP_FILTER tmp_best_filter[4] = { 0 };
+#else
+  INTERP_FILTER tmp_best_filter = SWITCHABLE;
+#endif
+  int rate_uv_intra, rate_uv_tokenonly;
+  int64_t dist_uv;
+  int skip_uv;
+  PREDICTION_MODE mode_uv = DC_PRED;
+  const int intra_cost_penalty = vp10_get_intra_cost_penalty(
+      cm->base_qindex, cm->y_dc_delta_q, cm->bit_depth);
+#if CONFIG_EXT_INTER
+  int_mv seg_mvs[4][2][TOTAL_REFS_PER_FRAME];
+#else
+  int_mv seg_mvs[4][TOTAL_REFS_PER_FRAME];
+#endif  // CONFIG_EXT_INTER
+  b_mode_info best_bmodes[4];
+  int best_skip2 = 0;
+  int ref_frame_skip_mask[2] = { 0 };
+  int internal_active_edge =
+      vp10_active_edge_sb(cpi, mi_row, mi_col) && vp10_internal_image_edge(cpi);
+
+#if CONFIG_SUPERTX
+  best_rd_so_far = INT64_MAX;
+  best_rd = best_rd_so_far;
+  best_yrd = best_rd_so_far;
+#endif  // CONFIG_SUPERTX
+  vp10_zero(best_mbmode);
+
+#if CONFIG_EXT_INTRA
+  mbmi->ext_intra_mode_info.use_ext_intra_mode[0] = 0;
+  mbmi->ext_intra_mode_info.use_ext_intra_mode[1] = 0;
+#endif  // CONFIG_EXT_INTRA
+  mbmi->motion_variation = SIMPLE_TRANSLATION;
+#if CONFIG_EXT_INTER
+  mbmi->use_wedge_interinter = 0;
+  mbmi->use_wedge_interintra = 0;
+#endif  // CONFIG_EXT_INTER
+
+  for (i = 0; i < 4; i++) {
+    int j;
+#if CONFIG_EXT_INTER
+    int k;
+
+    for (k = 0; k < 2; k++)
+      for (j = 0; j < TOTAL_REFS_PER_FRAME; j++)
+        seg_mvs[i][k][j].as_int = INVALID_MV;
+#else
+    for (j = 0; j < TOTAL_REFS_PER_FRAME; j++)
+      seg_mvs[i][j].as_int = INVALID_MV;
+#endif  // CONFIG_EXT_INTER
+  }
+
+  estimate_ref_frame_costs(cm, xd, segment_id, ref_costs_single, ref_costs_comp,
+                           &comp_mode_p);
+
+  for (i = 0; i < REFERENCE_MODES; ++i) best_pred_rd[i] = INT64_MAX;
+  rate_uv_intra = INT_MAX;
+
+  rd_cost->rate = INT_MAX;
+#if CONFIG_SUPERTX
+  *returnrate_nocoef = INT_MAX;
+#endif
+
+  for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ref_frame++) {
+    x->mbmi_ext->mode_context[ref_frame] = 0;
+#if CONFIG_REF_MV && CONFIG_EXT_INTER
+    x->mbmi_ext->compound_mode_context[ref_frame] = 0;
+#endif  // CONFIG_REF_MV && CONFIG_EXT_INTER
+    if (cpi->ref_frame_flags & flag_list[ref_frame]) {
+      setup_buffer_inter(cpi, x, ref_frame, bsize, mi_row, mi_col,
+                         frame_mv[NEARESTMV], frame_mv[NEARMV], yv12_mb);
+    } else {
+      ref_frame_skip_mask[0] |= (1 << ref_frame);
+      ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
+    }
+    frame_mv[NEWMV][ref_frame].as_int = INVALID_MV;
+#if CONFIG_EXT_INTER
+    frame_mv[NEWFROMNEARMV][ref_frame].as_int = INVALID_MV;
+#endif  // CONFIG_EXT_INTER
+    frame_mv[ZEROMV][ref_frame].as_int = 0;
+  }
+
+  mbmi->palette_mode_info.palette_size[0] = 0;
+  mbmi->palette_mode_info.palette_size[1] = 0;
+
+  for (ref_index = 0; ref_index < MAX_REFS; ++ref_index) {
+    int mode_excluded = 0;
+    int64_t this_rd = INT64_MAX;
+    int disable_skip = 0;
+    int compmode_cost = 0;
+    int rate2 = 0, rate_y = 0, rate_uv = 0;
+    int64_t distortion2 = 0, distortion_y = 0, distortion_uv = 0;
+    int skippable = 0;
+    int i;
+    int this_skip2 = 0;
+    int64_t total_sse = INT_MAX;
+    int early_term = 0;
+
+    ref_frame = vp10_ref_order[ref_index].ref_frame[0];
+    second_ref_frame = vp10_ref_order[ref_index].ref_frame[1];
+
+    // Look at the reference frame of the best mode so far and set the
+    // skip mask to look at a subset of the remaining modes.
+    if (ref_index > 2 && sf->mode_skip_start < MAX_MODES) {
+      if (ref_index == 3) {
+        switch (best_mbmode.ref_frame[0]) {
+          case INTRA_FRAME: break;
+          case LAST_FRAME:
+            ref_frame_skip_mask[0] |= (1 << GOLDEN_FRAME) |
+#if CONFIG_EXT_REFS
+                                      (1 << LAST2_FRAME) | (1 << LAST3_FRAME) |
+                                      (1 << BWDREF_FRAME) |
+#endif  // CONFIG_EXT_REFS
+                                      (1 << ALTREF_FRAME);
+            ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
+            break;
+#if CONFIG_EXT_REFS
+          case LAST2_FRAME:
+            ref_frame_skip_mask[0] |= (1 << LAST_FRAME) | (1 << LAST3_FRAME) |
+                                      (1 << GOLDEN_FRAME) |
+                                      (1 << BWDREF_FRAME) | (1 << ALTREF_FRAME);
+            ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
+            break;
+          case LAST3_FRAME:
+            ref_frame_skip_mask[0] |= (1 << LAST_FRAME) | (1 << LAST2_FRAME) |
+                                      (1 << GOLDEN_FRAME) |
+                                      (1 << BWDREF_FRAME) | (1 << ALTREF_FRAME);
+            ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
+            break;
+#endif  // CONFIG_EXT_REFS
+          case GOLDEN_FRAME:
+            ref_frame_skip_mask[0] |= (1 << LAST_FRAME) |
+#if CONFIG_EXT_REFS
+                                      (1 << LAST2_FRAME) | (1 << LAST3_FRAME) |
+                                      (1 << BWDREF_FRAME) |
+#endif  // CONFIG_EXT_REFS
+                                      (1 << ALTREF_FRAME);
+            ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
+            break;
+#if CONFIG_EXT_REFS
+          case BWDREF_FRAME:
+            ref_frame_skip_mask[0] |= (1 << LAST_FRAME) | (1 << LAST2_FRAME) |
+                                      (1 << LAST3_FRAME) | (1 << GOLDEN_FRAME) |
+                                      (1 << ALTREF_FRAME);
+            ref_frame_skip_mask[1] |= (1 << ALTREF_FRAME) | 0x01;
+            break;
+#endif  // CONFIG_EXT_REFS
+          case ALTREF_FRAME:
+            ref_frame_skip_mask[0] |= (1 << LAST_FRAME) |
+#if CONFIG_EXT_REFS
+                                      (1 << LAST2_FRAME) | (1 << LAST3_FRAME) |
+                                      (1 << BWDREF_FRAME) |
+#endif  // CONFIG_EXT_REFS
+                                      (1 << GOLDEN_FRAME);
+#if CONFIG_EXT_REFS
+            ref_frame_skip_mask[1] |= (1 << BWDREF_FRAME) | 0x01;
+#endif  // CONFIG_EXT_REFS
+            break;
+          case NONE:
+          case TOTAL_REFS_PER_FRAME:
+            assert(0 && "Invalid Reference frame");
+            break;
+        }
+      }
+    }
+
+    if ((ref_frame_skip_mask[0] & (1 << ref_frame)) &&
+        (ref_frame_skip_mask[1] & (1 << VPXMAX(0, second_ref_frame))))
+      continue;
+
+    // Test best rd so far against threshold for trying this mode.
+    if (!internal_active_edge &&
+        rd_less_than_thresh(best_rd,
+                            rd_opt->threshes[segment_id][bsize][ref_index],
+                            tile_data->thresh_freq_fact[bsize][ref_index]))
+      continue;
+
+    comp_pred = second_ref_frame > INTRA_FRAME;
+    if (comp_pred) {
+      if (!cpi->allow_comp_inter_inter) continue;
+      if (!(cpi->ref_frame_flags & flag_list[second_ref_frame])) continue;
+      // Do not allow compound prediction if the segment level reference frame
+      // feature is in use as in this case there can only be one reference.
+      if (segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) continue;
+
+      if ((sf->mode_search_skip_flags & FLAG_SKIP_COMP_BESTINTRA) &&
+          best_mbmode.ref_frame[0] == INTRA_FRAME)
+        continue;
+    }
+
+    // TODO(jingning, jkoleszar): scaling reference frame not supported for
+    // sub8x8 blocks.
+    if (ref_frame > INTRA_FRAME &&
+        vp10_is_scaled(&cm->frame_refs[ref_frame - 1].sf))
+      continue;
+
+    if (second_ref_frame > INTRA_FRAME &&
+        vp10_is_scaled(&cm->frame_refs[second_ref_frame - 1].sf))
+      continue;
+
+    if (comp_pred)
+      mode_excluded = cm->reference_mode == SINGLE_REFERENCE;
+    else if (ref_frame != INTRA_FRAME)
+      mode_excluded = cm->reference_mode == COMPOUND_REFERENCE;
+
+    // If the segment reference frame feature is enabled....
+    // then do nothing if the current ref frame is not allowed..
+    if (segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME) &&
+        get_segdata(seg, segment_id, SEG_LVL_REF_FRAME) != (int)ref_frame) {
+      continue;
+      // Disable this drop out case if the ref frame
+      // segment level feature is enabled for this segment. This is to
+      // prevent the possibility that we end up unable to pick any mode.
+    } else if (!segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) {
+      // Only consider ZEROMV/ALTREF_FRAME for alt ref frame,
+      // unless ARNR filtering is enabled in which case we want
+      // an unfiltered alternative. We allow near/nearest as well
+      // because they may result in zero-zero MVs but be cheaper.
+      if (cpi->rc.is_src_frame_alt_ref && (cpi->oxcf.arnr_max_frames == 0))
+        continue;
+    }
+
+    mbmi->tx_size = TX_4X4;
+    mbmi->uv_mode = DC_PRED;
+    mbmi->ref_frame[0] = ref_frame;
+    mbmi->ref_frame[1] = second_ref_frame;
+// Evaluate all sub-pel filters irrespective of whether we can use
+// them for this frame.
+#if CONFIG_DUAL_FILTER
+    for (i = 0; i < 4; ++i)
+      mbmi->interp_filter[i] = cm->interp_filter == SWITCHABLE
+                                   ? EIGHTTAP_REGULAR
+                                   : cm->interp_filter;
+#else
+    mbmi->interp_filter =
+        cm->interp_filter == SWITCHABLE ? EIGHTTAP_REGULAR : cm->interp_filter;
+#endif
+    x->skip = 0;
+    set_ref_ptrs(cm, xd, ref_frame, second_ref_frame);
+
+    // Select prediction reference frames.
+    for (i = 0; i < MAX_MB_PLANE; i++) {
+      xd->plane[i].pre[0] = yv12_mb[ref_frame][i];
+      if (comp_pred) xd->plane[i].pre[1] = yv12_mb[second_ref_frame][i];
+    }
+
+#if CONFIG_VAR_TX
+    mbmi->inter_tx_size[0][0] = mbmi->tx_size;
+#endif
+
+    if (ref_frame == INTRA_FRAME) {
+      int rate;
+      if (rd_pick_intra_sub_8x8_y_mode(cpi, x, &rate, &rate_y, &distortion_y,
+                                       best_rd) >= best_rd)
+        continue;
+      rate2 += rate;
+      rate2 += intra_cost_penalty;
+      distortion2 += distortion_y;
+
+      if (rate_uv_intra == INT_MAX) {
+        choose_intra_uv_mode(cpi, x, ctx, bsize, TX_4X4, &rate_uv_intra,
+                             &rate_uv_tokenonly, &dist_uv, &skip_uv, &mode_uv);
+      }
+      rate2 += rate_uv_intra;
+      rate_uv = rate_uv_tokenonly;
+      distortion2 += dist_uv;
+      distortion_uv = dist_uv;
+      mbmi->uv_mode = mode_uv;
+    } else {
+      int rate;
+      int64_t distortion;
+      int64_t this_rd_thresh;
+      int64_t tmp_rd, tmp_best_rd = INT64_MAX, tmp_best_rdu = INT64_MAX;
+      int tmp_best_rate = INT_MAX, tmp_best_ratey = INT_MAX;
+      int64_t tmp_best_distortion = INT_MAX, tmp_best_sse, uv_sse;
+      int tmp_best_skippable = 0;
+      int switchable_filter_index;
+      int_mv *second_ref =
+          comp_pred ? &x->mbmi_ext->ref_mvs[second_ref_frame][0] : NULL;
+      b_mode_info tmp_best_bmodes[16];  // Should this be 4 ?
+      MB_MODE_INFO tmp_best_mbmode;
+#if CONFIG_DUAL_FILTER
+#if CONFIG_EXT_INTERP
+      BEST_SEG_INFO bsi[25];
+#else
+      BEST_SEG_INFO bsi[9];
+#endif
+#else
+      BEST_SEG_INFO bsi[SWITCHABLE_FILTERS];
+#endif
+      int pred_exists = 0;
+      int uv_skippable;
+#if CONFIG_EXT_INTER
+      int_mv compound_seg_newmvs[4][2];
+      for (i = 0; i < 4; i++) {
+        compound_seg_newmvs[i][0].as_int = INVALID_MV;
+        compound_seg_newmvs[i][1].as_int = INVALID_MV;
+      }
+#endif  // CONFIG_EXT_INTER
+
+      this_rd_thresh = (ref_frame == LAST_FRAME)
+                           ? rd_opt->threshes[segment_id][bsize][THR_LAST]
+                           : rd_opt->threshes[segment_id][bsize][THR_ALTR];
+#if CONFIG_EXT_REFS
+      this_rd_thresh = (ref_frame == LAST2_FRAME)
+                           ? rd_opt->threshes[segment_id][bsize][THR_LAST2]
+                           : this_rd_thresh;
+      this_rd_thresh = (ref_frame == LAST3_FRAME)
+                           ? rd_opt->threshes[segment_id][bsize][THR_LAST3]
+                           : this_rd_thresh;
+#endif  // CONFIG_EXT_REFS
+      this_rd_thresh = (ref_frame == GOLDEN_FRAME)
+                           ? rd_opt->threshes[segment_id][bsize][THR_GOLD]
+                           : this_rd_thresh;
+#if CONFIG_EXT_REFS
+// TODO(zoeliu): To explore whether this_rd_thresh should consider
+//               BWDREF_FRAME and ALTREF_FRAME
+#endif  // CONFIG_EXT_REFS
+
+      // TODO(any): Add search of the tx_type to improve rd performance at the
+      // expense of speed.
+      mbmi->tx_type = DCT_DCT;
+
+      if (cm->interp_filter != BILINEAR) {
+#if CONFIG_DUAL_FILTER
+        tmp_best_filter[0] = EIGHTTAP_REGULAR;
+        tmp_best_filter[1] = EIGHTTAP_REGULAR;
+        tmp_best_filter[2] = EIGHTTAP_REGULAR;
+        tmp_best_filter[3] = EIGHTTAP_REGULAR;
+#else
+        tmp_best_filter = EIGHTTAP_REGULAR;
+#endif
+        if (x->source_variance < sf->disable_filter_search_var_thresh) {
+#if CONFIG_DUAL_FILTER
+          tmp_best_filter[0] = EIGHTTAP_REGULAR;
+#else
+          tmp_best_filter = EIGHTTAP_REGULAR;
+#endif
+        } else if (sf->adaptive_pred_interp_filter == 1 &&
+                   ctx->pred_interp_filter < SWITCHABLE) {
+#if CONFIG_DUAL_FILTER
+          tmp_best_filter[0] = ctx->pred_interp_filter;
+#else
+          tmp_best_filter = ctx->pred_interp_filter;
+#endif
+        } else if (sf->adaptive_pred_interp_filter == 2) {
+#if CONFIG_DUAL_FILTER
+          tmp_best_filter[0] = ctx->pred_interp_filter < SWITCHABLE
+                                   ? ctx->pred_interp_filter
+                                   : 0;
+#else
+          tmp_best_filter = ctx->pred_interp_filter < SWITCHABLE
+                                ? ctx->pred_interp_filter
+                                : 0;
+#endif
+        } else {
+#if CONFIG_DUAL_FILTER
+          for (switchable_filter_index = 0;
+#if CONFIG_EXT_INTERP
+               switchable_filter_index < 25;
+#else
+               switchable_filter_index < 9;
+#endif
+               ++switchable_filter_index) {
+#else
+          for (switchable_filter_index = 0;
+               switchable_filter_index < SWITCHABLE_FILTERS;
+               ++switchable_filter_index) {
+#endif
+            int newbest, rs;
+            int64_t rs_rd;
+            MB_MODE_INFO_EXT *mbmi_ext = x->mbmi_ext;
+#if CONFIG_DUAL_FILTER
+            mbmi->interp_filter[0] = filter_sets[switchable_filter_index][0];
+            mbmi->interp_filter[1] = filter_sets[switchable_filter_index][1];
+            mbmi->interp_filter[2] = filter_sets[switchable_filter_index][0];
+            mbmi->interp_filter[3] = filter_sets[switchable_filter_index][1];
+#else
+            mbmi->interp_filter = switchable_filter_index;
+#endif
+            tmp_rd = rd_pick_best_sub8x8_mode(
+                cpi, x, &mbmi_ext->ref_mvs[ref_frame][0], second_ref, best_yrd,
+                &rate, &rate_y, &distortion, &skippable, &total_sse,
+                (int)this_rd_thresh, seg_mvs,
+#if CONFIG_EXT_INTER
+                compound_seg_newmvs,
+#endif  // CONFIG_EXT_INTER
+                bsi, switchable_filter_index, mi_row, mi_col);
+#if CONFIG_EXT_INTERP
+#if CONFIG_DUAL_FILTER
+            if (!vp10_is_interp_needed(xd) && cm->interp_filter == SWITCHABLE &&
+                (mbmi->interp_filter[0] != EIGHTTAP_REGULAR ||
+                 mbmi->interp_filter[1] != EIGHTTAP_REGULAR))  // invalid config
+              continue;
+#else
+            if (!vp10_is_interp_needed(xd) && cm->interp_filter == SWITCHABLE &&
+                mbmi->interp_filter != EIGHTTAP_REGULAR)  // invalid config
+              continue;
+#endif
+#endif  // CONFIG_EXT_INTERP
+            if (tmp_rd == INT64_MAX) continue;
+            rs = vp10_get_switchable_rate(cpi, xd);
+            rs_rd = RDCOST(x->rdmult, x->rddiv, rs, 0);
+            if (cm->interp_filter == SWITCHABLE) tmp_rd += rs_rd;
+
+            newbest = (tmp_rd < tmp_best_rd);
+            if (newbest) {
+#if CONFIG_DUAL_FILTER
+              tmp_best_filter[0] = mbmi->interp_filter[0];
+              tmp_best_filter[1] = mbmi->interp_filter[1];
+              tmp_best_filter[2] = mbmi->interp_filter[2];
+              tmp_best_filter[3] = mbmi->interp_filter[3];
+#else
+              tmp_best_filter = mbmi->interp_filter;
+#endif
+              tmp_best_rd = tmp_rd;
+            }
+            if ((newbest && cm->interp_filter == SWITCHABLE) ||
+                (
+#if CONFIG_DUAL_FILTER
+                    mbmi->interp_filter[0] == cm->interp_filter
+#else
+                    mbmi->interp_filter == cm->interp_filter
+#endif
+                    && cm->interp_filter != SWITCHABLE)) {
+              tmp_best_rdu = tmp_rd;
+              tmp_best_rate = rate;
+              tmp_best_ratey = rate_y;
+              tmp_best_distortion = distortion;
+              tmp_best_sse = total_sse;
+              tmp_best_skippable = skippable;
+              tmp_best_mbmode = *mbmi;
+              for (i = 0; i < 4; i++) {
+                tmp_best_bmodes[i] = xd->mi[0]->bmi[i];
+              }
+              pred_exists = 1;
+            }
+          }  // switchable_filter_index loop
+        }
+      }
+
+      if (tmp_best_rdu == INT64_MAX && pred_exists) continue;
+
+#if CONFIG_DUAL_FILTER
+      mbmi->interp_filter[0] =
+          (cm->interp_filter == SWITCHABLE ? tmp_best_filter[0]
+                                           : cm->interp_filter);
+      mbmi->interp_filter[1] =
+          (cm->interp_filter == SWITCHABLE ? tmp_best_filter[1]
+                                           : cm->interp_filter);
+      mbmi->interp_filter[2] =
+          (cm->interp_filter == SWITCHABLE ? tmp_best_filter[2]
+                                           : cm->interp_filter);
+      mbmi->interp_filter[3] =
+          (cm->interp_filter == SWITCHABLE ? tmp_best_filter[3]
+                                           : cm->interp_filter);
+#else
+      mbmi->interp_filter =
+          (cm->interp_filter == SWITCHABLE ? tmp_best_filter
+                                           : cm->interp_filter);
+#endif
+
+      if (!pred_exists) {
+        // Handles the special case when a filter that is not in the
+        // switchable list (bilinear) is indicated at the frame level
+        tmp_rd = rd_pick_best_sub8x8_mode(
+            cpi, x, &x->mbmi_ext->ref_mvs[ref_frame][0], second_ref, best_yrd,
+            &rate, &rate_y, &distortion, &skippable, &total_sse,
+            (int)this_rd_thresh, seg_mvs,
+#if CONFIG_EXT_INTER
+            compound_seg_newmvs,
+#endif  // CONFIG_EXT_INTER
+            bsi, 0, mi_row, mi_col);
+#if CONFIG_EXT_INTERP
+#if CONFIG_DUAL_FILTER
+        if (!vp10_is_interp_needed(xd) && cm->interp_filter == SWITCHABLE &&
+            (mbmi->interp_filter[0] != EIGHTTAP_REGULAR ||
+             mbmi->interp_filter[1] != EIGHTTAP_REGULAR)) {
+          mbmi->interp_filter[0] = EIGHTTAP_REGULAR;
+          mbmi->interp_filter[1] = EIGHTTAP_REGULAR;
+        }
+#else
+        if (!vp10_is_interp_needed(xd) && cm->interp_filter == SWITCHABLE &&
+            mbmi->interp_filter != EIGHTTAP_REGULAR)
+          mbmi->interp_filter = EIGHTTAP_REGULAR;
+#endif  // CONFIG_DUAL_FILTER
+#endif  // CONFIG_EXT_INTERP
+        if (tmp_rd == INT64_MAX) continue;
+      } else {
+        total_sse = tmp_best_sse;
+        rate = tmp_best_rate;
+        rate_y = tmp_best_ratey;
+        distortion = tmp_best_distortion;
+        skippable = tmp_best_skippable;
+        *mbmi = tmp_best_mbmode;
+        for (i = 0; i < 4; i++) xd->mi[0]->bmi[i] = tmp_best_bmodes[i];
+      }
+      // Add in the cost of the transform type
+      if (!xd->lossless[mbmi->segment_id]) {
+        int rate_tx_type = 0;
+#if CONFIG_EXT_TX
+        if (get_ext_tx_types(mbmi->tx_size, bsize, 1) > 1) {
+          const int eset = get_ext_tx_set(mbmi->tx_size, bsize, 1);
+          rate_tx_type =
+              cpi->inter_tx_type_costs[eset][mbmi->tx_size][mbmi->tx_type];
+        }
+#else
+        if (mbmi->tx_size < TX_32X32) {
+          rate_tx_type = cpi->inter_tx_type_costs[mbmi->tx_size][mbmi->tx_type];
+        }
+#endif
+        rate += rate_tx_type;
+        rate_y += rate_tx_type;
+      }
+
+      rate2 += rate;
+      distortion2 += distortion;
+
+      if (cm->interp_filter == SWITCHABLE)
+        rate2 += vp10_get_switchable_rate(cpi, xd);
+
+      if (!mode_excluded)
+        mode_excluded = comp_pred ? cm->reference_mode == SINGLE_REFERENCE
+                                  : cm->reference_mode == COMPOUND_REFERENCE;
+
+      compmode_cost = vp10_cost_bit(comp_mode_p, comp_pred);
+
+      tmp_best_rdu =
+          best_rd - VPXMIN(RDCOST(x->rdmult, x->rddiv, rate2, distortion2),
+                           RDCOST(x->rdmult, x->rddiv, 0, total_sse));
+
+      if (tmp_best_rdu > 0) {
+        // If even the 'Y' rd value of split is higher than best so far
+        // then dont bother looking at UV
+        vp10_build_inter_predictors_sbuv(&x->e_mbd, mi_row, mi_col, BLOCK_8X8);
+#if CONFIG_VAR_TX
+        if (!inter_block_uvrd(cpi, x, &rate_uv, &distortion_uv, &uv_skippable,
+                              &uv_sse, BLOCK_8X8, tmp_best_rdu))
+          continue;
+#else
+        if (!super_block_uvrd(cpi, x, &rate_uv, &distortion_uv, &uv_skippable,
+                              &uv_sse, BLOCK_8X8, tmp_best_rdu))
+          continue;
+#endif
+        rate2 += rate_uv;
+        distortion2 += distortion_uv;
+        skippable = skippable && uv_skippable;
+        total_sse += uv_sse;
+      } else {
+        continue;
+      }
+    }
+
+    if (cm->reference_mode == REFERENCE_MODE_SELECT) rate2 += compmode_cost;
+
+    // Estimate the reference frame signaling cost and add it
+    // to the rolling cost variable.
+    if (second_ref_frame > INTRA_FRAME) {
+      rate2 += ref_costs_comp[ref_frame];
+#if CONFIG_EXT_REFS
+      rate2 += ref_costs_comp[second_ref_frame];
+#endif  // CONFIG_EXT_REFS
+    } else {
+      rate2 += ref_costs_single[ref_frame];
+    }
+
+    if (!disable_skip) {
+      // Skip is never coded at the segment level for sub8x8 blocks and instead
+      // always coded in the bitstream at the mode info level.
+
+      if (ref_frame != INTRA_FRAME && !xd->lossless[mbmi->segment_id]) {
+        if (RDCOST(x->rdmult, x->rddiv, rate_y + rate_uv, distortion2) <
+            RDCOST(x->rdmult, x->rddiv, 0, total_sse)) {
+          // Add in the cost of the no skip flag.
+          rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0);
+        } else {
+          // FIXME(rbultje) make this work for splitmv also
+          rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1);
+          distortion2 = total_sse;
+          assert(total_sse >= 0);
+          rate2 -= (rate_y + rate_uv);
+          rate_y = 0;
+          rate_uv = 0;
+          this_skip2 = 1;
+        }
+      } else {
+        // Add in the cost of the no skip flag.
+        rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0);
+      }
+
+      // Calculate the final RD estimate for this mode.
+      this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
+    }
+
+    if (!disable_skip && ref_frame == INTRA_FRAME) {
+      for (i = 0; i < REFERENCE_MODES; ++i)
+        best_pred_rd[i] = VPXMIN(best_pred_rd[i], this_rd);
+    }
+
+    // Did this mode help.. i.e. is it the new best mode
+    if (this_rd < best_rd || x->skip) {
+      if (!mode_excluded) {
+        // Note index of best mode so far
+        best_ref_index = ref_index;
+
+        if (ref_frame == INTRA_FRAME) {
+          /* required for left and above block mv */
+          mbmi->mv[0].as_int = 0;
+        }
+
+        rd_cost->rate = rate2;
+#if CONFIG_SUPERTX
+        *returnrate_nocoef = rate2 - rate_y - rate_uv;
+        if (!disable_skip)
+          *returnrate_nocoef -=
+              vp10_cost_bit(vp10_get_skip_prob(cm, xd), this_skip2);
+        *returnrate_nocoef -= vp10_cost_bit(vp10_get_intra_inter_prob(cm, xd),
+                                            mbmi->ref_frame[0] != INTRA_FRAME);
+        assert(*returnrate_nocoef > 0);
+#endif  // CONFIG_SUPERTX
+        rd_cost->dist = distortion2;
+        rd_cost->rdcost = this_rd;
+        best_rd = this_rd;
+        best_yrd =
+            best_rd - RDCOST(x->rdmult, x->rddiv, rate_uv, distortion_uv);
+        best_mbmode = *mbmi;
+        best_skip2 = this_skip2;
+
+#if CONFIG_VAR_TX
+        for (i = 0; i < MAX_MB_PLANE; ++i)
+          memset(ctx->blk_skip[i], 0, sizeof(uint8_t) * ctx->num_4x4_blk);
+#endif
+
+        for (i = 0; i < 4; i++) best_bmodes[i] = xd->mi[0]->bmi[i];
+
+        // TODO(debargha): enhance this test with a better distortion prediction
+        // based on qp, activity mask and history
+        if ((sf->mode_search_skip_flags & FLAG_EARLY_TERMINATE) &&
+            (ref_index > MIN_EARLY_TERM_INDEX)) {
+          int qstep = xd->plane[0].dequant[1];
+          // TODO(debargha): Enhance this by specializing for each mode_index
+          int scale = 4;
+#if CONFIG_VP9_HIGHBITDEPTH
+          if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+            qstep >>= (xd->bd - 8);
+          }
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+          if (x->source_variance < UINT_MAX) {
+            const int var_adjust = (x->source_variance < 16);
+            scale -= var_adjust;
+          }
+          if (ref_frame > INTRA_FRAME && distortion2 * scale < qstep * qstep) {
+            early_term = 1;
+          }
+        }
+      }
+    }
+
+    /* keep record of best compound/single-only prediction */
+    if (!disable_skip && ref_frame != INTRA_FRAME) {
+      int64_t single_rd, hybrid_rd, single_rate, hybrid_rate;
+
+      if (cm->reference_mode == REFERENCE_MODE_SELECT) {
+        single_rate = rate2 - compmode_cost;
+        hybrid_rate = rate2;
+      } else {
+        single_rate = rate2;
+        hybrid_rate = rate2 + compmode_cost;
+      }
+
+      single_rd = RDCOST(x->rdmult, x->rddiv, single_rate, distortion2);
+      hybrid_rd = RDCOST(x->rdmult, x->rddiv, hybrid_rate, distortion2);
+
+      if (!comp_pred && single_rd < best_pred_rd[SINGLE_REFERENCE])
+        best_pred_rd[SINGLE_REFERENCE] = single_rd;
+      else if (comp_pred && single_rd < best_pred_rd[COMPOUND_REFERENCE])
+        best_pred_rd[COMPOUND_REFERENCE] = single_rd;
+
+      if (hybrid_rd < best_pred_rd[REFERENCE_MODE_SELECT])
+        best_pred_rd[REFERENCE_MODE_SELECT] = hybrid_rd;
+    }
+
+    if (early_term) break;
+
+    if (x->skip && !comp_pred) break;
+  }
+
+  if (best_rd >= best_rd_so_far) {
+    rd_cost->rate = INT_MAX;
+    rd_cost->rdcost = INT64_MAX;
+#if CONFIG_SUPERTX
+    *returnrate_nocoef = INT_MAX;
+#endif  // CONFIG_SUPERTX
+    return;
+  }
+
+  // If we used an estimate for the uv intra rd in the loop above...
+  if (sf->use_uv_intra_rd_estimate) {
+    // Do Intra UV best rd mode selection if best mode choice above was intra.
+    if (best_mbmode.ref_frame[0] == INTRA_FRAME) {
+      *mbmi = best_mbmode;
+      rd_pick_intra_sbuv_mode(cpi, x, &rate_uv_intra, &rate_uv_tokenonly,
+                              &dist_uv, &skip_uv, BLOCK_8X8, TX_4X4);
+    }
+  }
+
+  if (best_rd == INT64_MAX) {
+    rd_cost->rate = INT_MAX;
+    rd_cost->dist = INT64_MAX;
+    rd_cost->rdcost = INT64_MAX;
+#if CONFIG_SUPERTX
+    *returnrate_nocoef = INT_MAX;
+#endif  // CONFIG_SUPERTX
+    return;
+  }
+
+#if CONFIG_DUAL_FILTER
+  assert((cm->interp_filter == SWITCHABLE) ||
+         (cm->interp_filter == best_mbmode.interp_filter[0]) ||
+         !is_inter_block(&best_mbmode));
+#else
+  assert((cm->interp_filter == SWITCHABLE) ||
+         (cm->interp_filter == best_mbmode.interp_filter) ||
+         !is_inter_block(&best_mbmode));
+#endif
+
+  vp10_update_rd_thresh_fact(cm, tile_data->thresh_freq_fact,
+                             sf->adaptive_rd_thresh, bsize, best_ref_index);
+
+  // macroblock modes
+  *mbmi = best_mbmode;
+#if CONFIG_VAR_TX && CONFIG_EXT_TX && CONFIG_RECT_TX
+  mbmi->inter_tx_size[0][0] = mbmi->tx_size;
+#endif  // CONFIG_EXT_TX && CONFIG_RECT_TX
+
+  x->skip |= best_skip2;
+  if (!is_inter_block(&best_mbmode)) {
+    for (i = 0; i < 4; i++) xd->mi[0]->bmi[i].as_mode = best_bmodes[i].as_mode;
+  } else {
+    for (i = 0; i < 4; ++i)
+      memcpy(&xd->mi[0]->bmi[i], &best_bmodes[i], sizeof(b_mode_info));
+
+    mbmi->mv[0].as_int = xd->mi[0]->bmi[3].as_mv[0].as_int;
+    mbmi->mv[1].as_int = xd->mi[0]->bmi[3].as_mv[1].as_int;
+#if CONFIG_REF_MV
+    mbmi->pred_mv[0].as_int = xd->mi[0]->bmi[3].pred_mv_s8[0].as_int;
+    mbmi->pred_mv[1].as_int = xd->mi[0]->bmi[3].pred_mv_s8[1].as_int;
+#endif
+  }
+
+  for (i = 0; i < REFERENCE_MODES; ++i) {
+    if (best_pred_rd[i] == INT64_MAX)
+      best_pred_diff[i] = INT_MIN;
+    else
+      best_pred_diff[i] = best_rd - best_pred_rd[i];
+  }
+
+  store_coding_context(x, ctx, best_ref_index, best_pred_diff, 0);
+}
+
+#if CONFIG_OBMC
+// This function has a structure similar to vp10_build_obmc_inter_prediction
+//
+// The OBMC predictor is computed as:
+//
+//  PObmc(x,y) =
+//    VPX_BLEND_A64(Mh(x),
+//                  VPX_BLEND_A64(Mv(y), P(x,y), PAbove(x,y)),
+//                  PLeft(x, y))
+//
+// Scaling up by VPX_BLEND_A64_MAX_ALPHA ** 2 and omitting the intermediate
+// rounding, this can be written as:
+//
+//  VPX_BLEND_A64_MAX_ALPHA * VPX_BLEND_A64_MAX_ALPHA * Pobmc(x,y) =
+//    Mh(x) * Mv(y) * P(x,y) +
+//      Mh(x) * Cv(y) * Pabove(x,y) +
+//      VPX_BLEND_A64_MAX_ALPHA * Ch(x) * PLeft(x, y)
+//
+// Where :
+//
+//  Cv(y) = VPX_BLEND_A64_MAX_ALPHA - Mv(y)
+//  Ch(y) = VPX_BLEND_A64_MAX_ALPHA - Mh(y)
+//
+// This function computes 'wsrc' and 'mask' as:
+//
+//  wsrc(x, y) =
+//    VPX_BLEND_A64_MAX_ALPHA * VPX_BLEND_A64_MAX_ALPHA * src(x, y) -
+//      Mh(x) * Cv(y) * Pabove(x,y) +
+//      VPX_BLEND_A64_MAX_ALPHA * Ch(x) * PLeft(x, y)
+//
+//  mask(x, y) = Mh(x) * Mv(y)
+//
+// These can then be used to efficiently approximate the error for any
+// predictor P in the context of the provided neighbouring predictors by
+// computing:
+//
+//  error(x, y) =
+//    wsrc(x, y) - mask(x, y) * P(x, y) / (VPX_BLEND_A64_MAX_ALPHA ** 2)
+//
+static void calc_target_weighted_pred(const VP10_COMMON *cm,
+                                      const MACROBLOCK *x,
+                                      const MACROBLOCKD *xd, int mi_row,
+                                      int mi_col, const uint8_t *above,
+                                      int above_stride, const uint8_t *left,
+                                      int left_stride, int32_t *mask_buf,
+                                      int32_t *wsrc_buf) {
+  const BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
+  int row, col, i;
+  const int bw = 8 * xd->n8_w;
+  const int bh = 8 * xd->n8_h;
+  const int wsrc_stride = bw;
+  const int mask_stride = bw;
+  const int src_scale = VPX_BLEND_A64_MAX_ALPHA * VPX_BLEND_A64_MAX_ALPHA;
+#if CONFIG_VP9_HIGHBITDEPTH
+  const int is_hbd = (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) ? 1 : 0;
+#else
+  const int is_hbd = 0;
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+  // plane 0 should not be subsampled
+  assert(xd->plane[0].subsampling_x == 0);
+  assert(xd->plane[0].subsampling_y == 0);
+
+  vp10_zero_array(wsrc_buf, bw * bh);
+  for (i = 0; i < bw * bh; ++i) mask_buf[i] = VPX_BLEND_A64_MAX_ALPHA;
+
+  // handle above row
+  if (xd->up_available) {
+    const int overlap = num_4x4_blocks_high_lookup[bsize] * 2;
+    const int miw = VPXMIN(xd->n8_w, cm->mi_cols - mi_col);
+    const int mi_row_offset = -1;
+    const uint8_t *const mask1d = vp10_get_obmc_mask(overlap);
+
+    assert(miw > 0);
+
+    i = 0;
+    do {  // for each mi in the above row
+      const int mi_col_offset = i;
+      const MB_MODE_INFO *const above_mbmi =
+          &xd->mi[mi_col_offset + mi_row_offset * xd->mi_stride]->mbmi;
+      const int mi_step =
+          VPXMIN(xd->n8_w, num_8x8_blocks_wide_lookup[above_mbmi->sb_type]);
+      const int neighbor_bw = mi_step * MI_SIZE;
+
+      if (is_neighbor_overlappable(above_mbmi)) {
+        const int tmp_stride = above_stride;
+        int32_t *wsrc = wsrc_buf + (i * MI_SIZE);
+        int32_t *mask = mask_buf + (i * MI_SIZE);
+
+        if (!is_hbd) {
+          const uint8_t *tmp = above;
+
+          for (row = 0; row < overlap; ++row) {
+            const uint8_t m0 = mask1d[row];
+            const uint8_t m1 = VPX_BLEND_A64_MAX_ALPHA - m0;
+            for (col = 0; col < neighbor_bw; ++col) {
+              wsrc[col] = m1 * tmp[col];
+              mask[col] = m0;
+            }
+            wsrc += wsrc_stride;
+            mask += mask_stride;
+            tmp += tmp_stride;
+          }
+#if CONFIG_VP9_HIGHBITDEPTH
+        } else {
+          const uint16_t *tmp = CONVERT_TO_SHORTPTR(above);
+
+          for (row = 0; row < overlap; ++row) {
+            const uint8_t m0 = mask1d[row];
+            const uint8_t m1 = VPX_BLEND_A64_MAX_ALPHA - m0;
+            for (col = 0; col < neighbor_bw; ++col) {
+              wsrc[col] = m1 * tmp[col];
+              mask[col] = m0;
+            }
+            wsrc += wsrc_stride;
+            mask += mask_stride;
+            tmp += tmp_stride;
+          }
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+        }
+      }
+
+      above += neighbor_bw;
+      i += mi_step;
+    } while (i < miw);
+  }
+
+  for (i = 0; i < bw * bh; ++i) {
+    wsrc_buf[i] *= VPX_BLEND_A64_MAX_ALPHA;
+    mask_buf[i] *= VPX_BLEND_A64_MAX_ALPHA;
+  }
+
+  // handle left column
+  if (xd->left_available) {
+    const int overlap = num_4x4_blocks_wide_lookup[bsize] * 2;
+    const int mih = VPXMIN(xd->n8_h, cm->mi_rows - mi_row);
+    const int mi_col_offset = -1;
+    const uint8_t *const mask1d = vp10_get_obmc_mask(overlap);
+
+    assert(mih > 0);
+
+    i = 0;
+    do {  // for each mi in the left column
+      const int mi_row_offset = i;
+      const MB_MODE_INFO *const left_mbmi =
+          &xd->mi[mi_col_offset + mi_row_offset * xd->mi_stride]->mbmi;
+      const int mi_step =
+          VPXMIN(xd->n8_h, num_8x8_blocks_high_lookup[left_mbmi->sb_type]);
+      const int neighbor_bh = mi_step * MI_SIZE;
+
+      if (is_neighbor_overlappable(left_mbmi)) {
+        const int tmp_stride = left_stride;
+        int32_t *wsrc = wsrc_buf + (i * MI_SIZE * wsrc_stride);
+        int32_t *mask = mask_buf + (i * MI_SIZE * mask_stride);
+
+        if (!is_hbd) {
+          const uint8_t *tmp = left;
+
+          for (row = 0; row < neighbor_bh; ++row) {
+            for (col = 0; col < overlap; ++col) {
+              const uint8_t m0 = mask1d[col];
+              const uint8_t m1 = VPX_BLEND_A64_MAX_ALPHA - m0;
+              wsrc[col] = (wsrc[col] >> VPX_BLEND_A64_ROUND_BITS) * m0 +
+                          (tmp[col] << VPX_BLEND_A64_ROUND_BITS) * m1;
+              mask[col] = (mask[col] >> VPX_BLEND_A64_ROUND_BITS) * m0;
+            }
+            wsrc += wsrc_stride;
+            mask += mask_stride;
+            tmp += tmp_stride;
+          }
+#if CONFIG_VP9_HIGHBITDEPTH
+        } else {
+          const uint16_t *tmp = CONVERT_TO_SHORTPTR(left);
+
+          for (row = 0; row < neighbor_bh; ++row) {
+            for (col = 0; col < overlap; ++col) {
+              const uint8_t m0 = mask1d[col];
+              const uint8_t m1 = VPX_BLEND_A64_MAX_ALPHA - m0;
+              wsrc[col] = (wsrc[col] >> VPX_BLEND_A64_ROUND_BITS) * m0 +
+                          (tmp[col] << VPX_BLEND_A64_ROUND_BITS) * m1;
+              mask[col] = (mask[col] >> VPX_BLEND_A64_ROUND_BITS) * m0;
+            }
+            wsrc += wsrc_stride;
+            mask += mask_stride;
+            tmp += tmp_stride;
+          }
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+        }
+      }
+
+      left += neighbor_bh * left_stride;
+      i += mi_step;
+    } while (i < mih);
+  }
+
+  if (!is_hbd) {
+    const uint8_t *src = x->plane[0].src.buf;
+
+    for (row = 0; row < bh; ++row) {
+      for (col = 0; col < bw; ++col) {
+        wsrc_buf[col] = src[col] * src_scale - wsrc_buf[col];
+      }
+      wsrc_buf += wsrc_stride;
+      src += x->plane[0].src.stride;
+    }
+#if CONFIG_VP9_HIGHBITDEPTH
+  } else {
+    const uint16_t *src = CONVERT_TO_SHORTPTR(x->plane[0].src.buf);
+
+    for (row = 0; row < bh; ++row) {
+      for (col = 0; col < bw; ++col) {
+        wsrc_buf[col] = src[col] * src_scale - wsrc_buf[col];
+      }
+      wsrc_buf += wsrc_stride;
+      src += x->plane[0].src.stride;
+    }
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+  }
+}
+#endif  // CONFIG_OBMC
diff --git a/av1/encoder/rdopt.h b/av1/encoder/rdopt.h
new file mode 100644
index 0000000..4ce2879
--- /dev/null
+++ b/av1/encoder/rdopt.h
@@ -0,0 +1,91 @@
+/*
+ *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP10_ENCODER_RDOPT_H_
+#define VP10_ENCODER_RDOPT_H_
+
+#include "av1/common/blockd.h"
+
+#include "av1/encoder/block.h"
+#include "av1/encoder/context_tree.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct TileInfo;
+struct VP10_COMP;
+struct macroblock;
+struct RD_COST;
+
+void vp10_rd_pick_intra_mode_sb(struct VP10_COMP *cpi, struct macroblock *x,
+                                struct RD_COST *rd_cost, BLOCK_SIZE bsize,
+                                PICK_MODE_CONTEXT *ctx, int64_t best_rd);
+
+unsigned int vp10_get_sby_perpixel_variance(VP10_COMP *cpi,
+                                            const struct buf_2d *ref,
+                                            BLOCK_SIZE bs);
+#if CONFIG_VP9_HIGHBITDEPTH
+unsigned int vp10_high_get_sby_perpixel_variance(VP10_COMP *cpi,
+                                                 const struct buf_2d *ref,
+                                                 BLOCK_SIZE bs, int bd);
+#endif
+
+void vp10_rd_pick_inter_mode_sb(struct VP10_COMP *cpi,
+                                struct TileDataEnc *tile_data,
+                                struct macroblock *x, int mi_row, int mi_col,
+                                struct RD_COST *rd_cost,
+#if CONFIG_SUPERTX
+                                int *returnrate_nocoef,
+#endif  // CONFIG_SUPERTX
+                                BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
+                                int64_t best_rd_so_far);
+
+void vp10_rd_pick_inter_mode_sb_seg_skip(
+    struct VP10_COMP *cpi, struct TileDataEnc *tile_data, struct macroblock *x,
+    struct RD_COST *rd_cost, BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
+    int64_t best_rd_so_far);
+
+int vp10_internal_image_edge(struct VP10_COMP *cpi);
+int vp10_active_h_edge(struct VP10_COMP *cpi, int mi_row, int mi_step);
+int vp10_active_v_edge(struct VP10_COMP *cpi, int mi_col, int mi_step);
+int vp10_active_edge_sb(struct VP10_COMP *cpi, int mi_row, int mi_col);
+
+void vp10_rd_pick_inter_mode_sub8x8(struct VP10_COMP *cpi,
+                                    struct TileDataEnc *tile_data,
+                                    struct macroblock *x, int mi_row,
+                                    int mi_col, struct RD_COST *rd_cost,
+#if CONFIG_SUPERTX
+                                    int *returnrate_nocoef,
+#endif  // CONFIG_SUPERTX
+                                    BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
+                                    int64_t best_rd_so_far);
+
+#if CONFIG_SUPERTX
+#if CONFIG_VAR_TX
+void vp10_tx_block_rd_b(const VP10_COMP *cpi, MACROBLOCK *x, TX_SIZE tx_size,
+                        int blk_row, int blk_col, int plane, int block,
+                        int plane_bsize, int coeff_ctx, int *rate,
+                        int64_t *dist, int64_t *bsse, int *skip);
+#endif
+
+void vp10_txfm_rd_in_plane_supertx(MACROBLOCK *x, const VP10_COMP *cpi,
+                                   int *rate, int64_t *distortion,
+                                   int *skippable, int64_t *sse,
+                                   int64_t ref_best_rd, int plane,
+                                   BLOCK_SIZE bsize, TX_SIZE tx_size,
+                                   int use_fast_coef_casting);
+#endif  // CONFIG_SUPERTX
+
+#ifdef __cplusplus
+}  // extern "C"
+#endif
+
+#endif  // VP10_ENCODER_RDOPT_H_
diff --git a/av1/encoder/resize.c b/av1/encoder/resize.c
new file mode 100644
index 0000000..e209b21
--- /dev/null
+++ b/av1/encoder/resize.c
@@ -0,0 +1,819 @@
+/*
+ *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include <limits.h>
+#include <math.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#if CONFIG_VP9_HIGHBITDEPTH
+#include "aom_dsp/vpx_dsp_common.h"
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+#include "aom_ports/mem.h"
+#include "av1/common/common.h"
+#include "av1/encoder/resize.h"
+
+#define FILTER_BITS 7
+
+#define INTERP_TAPS 8
+#define SUBPEL_BITS 5
+#define SUBPEL_MASK ((1 << SUBPEL_BITS) - 1)
+#define INTERP_PRECISION_BITS 32
+
+typedef int16_t interp_kernel[INTERP_TAPS];
+
+// Filters for interpolation (0.5-band) - note this also filters integer pels.
+static const interp_kernel filteredinterp_filters500[(1 << SUBPEL_BITS)] = {
+  { -3, 0, 35, 64, 35, 0, -3, 0 },    { -3, -1, 34, 64, 36, 1, -3, 0 },
+  { -3, -1, 32, 64, 38, 1, -3, 0 },   { -2, -2, 31, 63, 39, 2, -3, 0 },
+  { -2, -2, 29, 63, 41, 2, -3, 0 },   { -2, -2, 28, 63, 42, 3, -4, 0 },
+  { -2, -3, 27, 63, 43, 4, -4, 0 },   { -2, -3, 25, 62, 45, 5, -4, 0 },
+  { -2, -3, 24, 62, 46, 5, -4, 0 },   { -2, -3, 23, 61, 47, 6, -4, 0 },
+  { -2, -3, 21, 60, 49, 7, -4, 0 },   { -1, -4, 20, 60, 50, 8, -4, -1 },
+  { -1, -4, 19, 59, 51, 9, -4, -1 },  { -1, -4, 17, 58, 52, 10, -4, 0 },
+  { -1, -4, 16, 57, 53, 12, -4, -1 }, { -1, -4, 15, 56, 54, 13, -4, -1 },
+  { -1, -4, 14, 55, 55, 14, -4, -1 }, { -1, -4, 13, 54, 56, 15, -4, -1 },
+  { -1, -4, 12, 53, 57, 16, -4, -1 }, { 0, -4, 10, 52, 58, 17, -4, -1 },
+  { -1, -4, 9, 51, 59, 19, -4, -1 },  { -1, -4, 8, 50, 60, 20, -4, -1 },
+  { 0, -4, 7, 49, 60, 21, -3, -2 },   { 0, -4, 6, 47, 61, 23, -3, -2 },
+  { 0, -4, 5, 46, 62, 24, -3, -2 },   { 0, -4, 5, 45, 62, 25, -3, -2 },
+  { 0, -4, 4, 43, 63, 27, -3, -2 },   { 0, -4, 3, 42, 63, 28, -2, -2 },
+  { 0, -3, 2, 41, 63, 29, -2, -2 },   { 0, -3, 2, 39, 63, 31, -2, -2 },
+  { 0, -3, 1, 38, 64, 32, -1, -3 },   { 0, -3, 1, 36, 64, 34, -1, -3 }
+};
+
+// Filters for interpolation (0.625-band) - note this also filters integer pels.
+static const interp_kernel filteredinterp_filters625[(1 << SUBPEL_BITS)] = {
+  { -1, -8, 33, 80, 33, -8, -1, 0 }, { -1, -8, 30, 80, 35, -8, -1, 1 },
+  { -1, -8, 28, 80, 37, -7, -2, 1 }, { 0, -8, 26, 79, 39, -7, -2, 1 },
+  { 0, -8, 24, 79, 41, -7, -2, 1 },  { 0, -8, 22, 78, 43, -6, -2, 1 },
+  { 0, -8, 20, 78, 45, -5, -3, 1 },  { 0, -8, 18, 77, 48, -5, -3, 1 },
+  { 0, -8, 16, 76, 50, -4, -3, 1 },  { 0, -8, 15, 75, 52, -3, -4, 1 },
+  { 0, -7, 13, 74, 54, -3, -4, 1 },  { 0, -7, 11, 73, 56, -2, -4, 1 },
+  { 0, -7, 10, 71, 58, -1, -4, 1 },  { 1, -7, 8, 70, 60, 0, -5, 1 },
+  { 1, -6, 6, 68, 62, 1, -5, 1 },    { 1, -6, 5, 67, 63, 2, -5, 1 },
+  { 1, -6, 4, 65, 65, 4, -6, 1 },    { 1, -5, 2, 63, 67, 5, -6, 1 },
+  { 1, -5, 1, 62, 68, 6, -6, 1 },    { 1, -5, 0, 60, 70, 8, -7, 1 },
+  { 1, -4, -1, 58, 71, 10, -7, 0 },  { 1, -4, -2, 56, 73, 11, -7, 0 },
+  { 1, -4, -3, 54, 74, 13, -7, 0 },  { 1, -4, -3, 52, 75, 15, -8, 0 },
+  { 1, -3, -4, 50, 76, 16, -8, 0 },  { 1, -3, -5, 48, 77, 18, -8, 0 },
+  { 1, -3, -5, 45, 78, 20, -8, 0 },  { 1, -2, -6, 43, 78, 22, -8, 0 },
+  { 1, -2, -7, 41, 79, 24, -8, 0 },  { 1, -2, -7, 39, 79, 26, -8, 0 },
+  { 1, -2, -7, 37, 80, 28, -8, -1 }, { 1, -1, -8, 35, 80, 30, -8, -1 },
+};
+
+// Filters for interpolation (0.75-band) - note this also filters integer pels.
+static const interp_kernel filteredinterp_filters750[(1 << SUBPEL_BITS)] = {
+  { 2, -11, 25, 96, 25, -11, 2, 0 }, { 2, -11, 22, 96, 28, -11, 2, 0 },
+  { 2, -10, 19, 95, 31, -11, 2, 0 }, { 2, -10, 17, 95, 34, -12, 2, 0 },
+  { 2, -9, 14, 94, 37, -12, 2, 0 },  { 2, -8, 12, 93, 40, -12, 1, 0 },
+  { 2, -8, 9, 92, 43, -12, 1, 1 },   { 2, -7, 7, 91, 46, -12, 1, 0 },
+  { 2, -7, 5, 90, 49, -12, 1, 0 },   { 2, -6, 3, 88, 52, -12, 0, 1 },
+  { 2, -5, 1, 86, 55, -12, 0, 1 },   { 2, -5, -1, 84, 58, -11, 0, 1 },
+  { 2, -4, -2, 82, 61, -11, -1, 1 }, { 2, -4, -4, 80, 64, -10, -1, 1 },
+  { 1, -3, -5, 77, 67, -9, -1, 1 },  { 1, -3, -6, 75, 70, -8, -2, 1 },
+  { 1, -2, -7, 72, 72, -7, -2, 1 },  { 1, -2, -8, 70, 75, -6, -3, 1 },
+  { 1, -1, -9, 67, 77, -5, -3, 1 },  { 1, -1, -10, 64, 80, -4, -4, 2 },
+  { 1, -1, -11, 61, 82, -2, -4, 2 }, { 1, 0, -11, 58, 84, -1, -5, 2 },
+  { 1, 0, -12, 55, 86, 1, -5, 2 },   { 1, 0, -12, 52, 88, 3, -6, 2 },
+  { 0, 1, -12, 49, 90, 5, -7, 2 },   { 0, 1, -12, 46, 91, 7, -7, 2 },
+  { 1, 1, -12, 43, 92, 9, -8, 2 },   { 0, 1, -12, 40, 93, 12, -8, 2 },
+  { 0, 2, -12, 37, 94, 14, -9, 2 },  { 0, 2, -12, 34, 95, 17, -10, 2 },
+  { 0, 2, -11, 31, 95, 19, -10, 2 }, { 0, 2, -11, 28, 96, 22, -11, 2 }
+};
+
+// Filters for interpolation (0.875-band) - note this also filters integer pels.
+static const interp_kernel filteredinterp_filters875[(1 << SUBPEL_BITS)] = {
+  { 3, -8, 13, 112, 13, -8, 3, 0 },   { 3, -7, 10, 112, 17, -9, 3, -1 },
+  { 2, -6, 7, 111, 21, -9, 3, -1 },   { 2, -5, 4, 111, 24, -10, 3, -1 },
+  { 2, -4, 1, 110, 28, -11, 3, -1 },  { 1, -3, -1, 108, 32, -12, 4, -1 },
+  { 1, -2, -3, 106, 36, -13, 4, -1 }, { 1, -1, -6, 105, 40, -14, 4, -1 },
+  { 1, -1, -7, 102, 44, -14, 4, -1 }, { 1, 0, -9, 100, 48, -15, 4, -1 },
+  { 1, 1, -11, 97, 53, -16, 4, -1 },  { 0, 1, -12, 95, 57, -16, 4, -1 },
+  { 0, 2, -13, 91, 61, -16, 4, -1 },  { 0, 2, -14, 88, 65, -16, 4, -1 },
+  { 0, 3, -15, 84, 69, -17, 4, 0 },   { 0, 3, -16, 81, 73, -16, 3, 0 },
+  { 0, 3, -16, 77, 77, -16, 3, 0 },   { 0, 3, -16, 73, 81, -16, 3, 0 },
+  { 0, 4, -17, 69, 84, -15, 3, 0 },   { -1, 4, -16, 65, 88, -14, 2, 0 },
+  { -1, 4, -16, 61, 91, -13, 2, 0 },  { -1, 4, -16, 57, 95, -12, 1, 0 },
+  { -1, 4, -16, 53, 97, -11, 1, 1 },  { -1, 4, -15, 48, 100, -9, 0, 1 },
+  { -1, 4, -14, 44, 102, -7, -1, 1 }, { -1, 4, -14, 40, 105, -6, -1, 1 },
+  { -1, 4, -13, 36, 106, -3, -2, 1 }, { -1, 4, -12, 32, 108, -1, -3, 1 },
+  { -1, 3, -11, 28, 110, 1, -4, 2 },  { -1, 3, -10, 24, 111, 4, -5, 2 },
+  { -1, 3, -9, 21, 111, 7, -6, 2 },   { -1, 3, -9, 17, 112, 10, -7, 3 }
+};
+
+// Filters for interpolation (full-band) - no filtering for integer pixels
+static const interp_kernel filteredinterp_filters1000[(1 << SUBPEL_BITS)] = {
+  { 0, 0, 0, 128, 0, 0, 0, 0 },        { 0, 1, -3, 128, 3, -1, 0, 0 },
+  { -1, 2, -6, 127, 7, -2, 1, 0 },     { -1, 3, -9, 126, 12, -4, 1, 0 },
+  { -1, 4, -12, 125, 16, -5, 1, 0 },   { -1, 4, -14, 123, 20, -6, 2, 0 },
+  { -1, 5, -15, 120, 25, -8, 2, 0 },   { -1, 5, -17, 118, 30, -9, 3, -1 },
+  { -1, 6, -18, 114, 35, -10, 3, -1 }, { -1, 6, -19, 111, 41, -12, 3, -1 },
+  { -1, 6, -20, 107, 46, -13, 4, -1 }, { -1, 6, -21, 103, 52, -14, 4, -1 },
+  { -1, 6, -21, 99, 57, -16, 5, -1 },  { -1, 6, -21, 94, 63, -17, 5, -1 },
+  { -1, 6, -20, 89, 68, -18, 5, -1 },  { -1, 6, -20, 84, 73, -19, 6, -1 },
+  { -1, 6, -20, 79, 79, -20, 6, -1 },  { -1, 6, -19, 73, 84, -20, 6, -1 },
+  { -1, 5, -18, 68, 89, -20, 6, -1 },  { -1, 5, -17, 63, 94, -21, 6, -1 },
+  { -1, 5, -16, 57, 99, -21, 6, -1 },  { -1, 4, -14, 52, 103, -21, 6, -1 },
+  { -1, 4, -13, 46, 107, -20, 6, -1 }, { -1, 3, -12, 41, 111, -19, 6, -1 },
+  { -1, 3, -10, 35, 114, -18, 6, -1 }, { -1, 3, -9, 30, 118, -17, 5, -1 },
+  { 0, 2, -8, 25, 120, -15, 5, -1 },   { 0, 2, -6, 20, 123, -14, 4, -1 },
+  { 0, 1, -5, 16, 125, -12, 4, -1 },   { 0, 1, -4, 12, 126, -9, 3, -1 },
+  { 0, 1, -2, 7, 127, -6, 2, -1 },     { 0, 0, -1, 3, 128, -3, 1, 0 }
+};
+
+// Filters for factor of 2 downsampling.
+static const int16_t vp10_down2_symeven_half_filter[] = { 56, 12, -3, -1 };
+static const int16_t vp10_down2_symodd_half_filter[] = { 64, 35, 0, -3 };
+
+static const interp_kernel *choose_interp_filter(int inlength, int outlength) {
+  int outlength16 = outlength * 16;
+  if (outlength16 >= inlength * 16)
+    return filteredinterp_filters1000;
+  else if (outlength16 >= inlength * 13)
+    return filteredinterp_filters875;
+  else if (outlength16 >= inlength * 11)
+    return filteredinterp_filters750;
+  else if (outlength16 >= inlength * 9)
+    return filteredinterp_filters625;
+  else
+    return filteredinterp_filters500;
+}
+
+static void interpolate(const uint8_t *const input, int inlength,
+                        uint8_t *output, int outlength) {
+  const int64_t delta =
+      (((uint64_t)inlength << 32) + outlength / 2) / outlength;
+  const int64_t offset =
+      inlength > outlength
+          ? (((int64_t)(inlength - outlength) << 31) + outlength / 2) /
+                outlength
+          : -(((int64_t)(outlength - inlength) << 31) + outlength / 2) /
+                outlength;
+  uint8_t *optr = output;
+  int x, x1, x2, sum, k, int_pel, sub_pel;
+  int64_t y;
+
+  const interp_kernel *interp_filters =
+      choose_interp_filter(inlength, outlength);
+
+  x = 0;
+  y = offset;
+  while ((y >> INTERP_PRECISION_BITS) < (INTERP_TAPS / 2 - 1)) {
+    x++;
+    y += delta;
+  }
+  x1 = x;
+  x = outlength - 1;
+  y = delta * x + offset;
+  while ((y >> INTERP_PRECISION_BITS) + (int64_t)(INTERP_TAPS / 2) >=
+         inlength) {
+    x--;
+    y -= delta;
+  }
+  x2 = x;
+  if (x1 > x2) {
+    for (x = 0, y = offset; x < outlength; ++x, y += delta) {
+      const int16_t *filter;
+      int_pel = y >> INTERP_PRECISION_BITS;
+      sub_pel = (y >> (INTERP_PRECISION_BITS - SUBPEL_BITS)) & SUBPEL_MASK;
+      filter = interp_filters[sub_pel];
+      sum = 0;
+      for (k = 0; k < INTERP_TAPS; ++k) {
+        const int pk = int_pel - INTERP_TAPS / 2 + 1 + k;
+        sum += filter[k] *
+               input[(pk < 0 ? 0 : (pk >= inlength ? inlength - 1 : pk))];
+      }
+      *optr++ = clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS));
+    }
+  } else {
+    // Initial part.
+    for (x = 0, y = offset; x < x1; ++x, y += delta) {
+      const int16_t *filter;
+      int_pel = y >> INTERP_PRECISION_BITS;
+      sub_pel = (y >> (INTERP_PRECISION_BITS - SUBPEL_BITS)) & SUBPEL_MASK;
+      filter = interp_filters[sub_pel];
+      sum = 0;
+      for (k = 0; k < INTERP_TAPS; ++k)
+        sum += filter[k] * input[(int_pel - INTERP_TAPS / 2 + 1 + k < 0
+                                      ? 0
+                                      : int_pel - INTERP_TAPS / 2 + 1 + k)];
+      *optr++ = clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS));
+    }
+    // Middle part.
+    for (; x <= x2; ++x, y += delta) {
+      const int16_t *filter;
+      int_pel = y >> INTERP_PRECISION_BITS;
+      sub_pel = (y >> (INTERP_PRECISION_BITS - SUBPEL_BITS)) & SUBPEL_MASK;
+      filter = interp_filters[sub_pel];
+      sum = 0;
+      for (k = 0; k < INTERP_TAPS; ++k)
+        sum += filter[k] * input[int_pel - INTERP_TAPS / 2 + 1 + k];
+      *optr++ = clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS));
+    }
+    // End part.
+    for (; x < outlength; ++x, y += delta) {
+      const int16_t *filter;
+      int_pel = y >> INTERP_PRECISION_BITS;
+      sub_pel = (y >> (INTERP_PRECISION_BITS - SUBPEL_BITS)) & SUBPEL_MASK;
+      filter = interp_filters[sub_pel];
+      sum = 0;
+      for (k = 0; k < INTERP_TAPS; ++k)
+        sum += filter[k] * input[(int_pel - INTERP_TAPS / 2 + 1 + k >= inlength
+                                      ? inlength - 1
+                                      : int_pel - INTERP_TAPS / 2 + 1 + k)];
+      *optr++ = clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS));
+    }
+  }
+}
+
+static void down2_symeven(const uint8_t *const input, int length,
+                          uint8_t *output) {
+  // Actual filter len = 2 * filter_len_half.
+  const int16_t *filter = vp10_down2_symeven_half_filter;
+  const int filter_len_half = sizeof(vp10_down2_symeven_half_filter) / 2;
+  int i, j;
+  uint8_t *optr = output;
+  int l1 = filter_len_half;
+  int l2 = (length - filter_len_half);
+  l1 += (l1 & 1);
+  l2 += (l2 & 1);
+  if (l1 > l2) {
+    // Short input length.
+    for (i = 0; i < length; i += 2) {
+      int sum = (1 << (FILTER_BITS - 1));
+      for (j = 0; j < filter_len_half; ++j) {
+        sum += (input[(i - j < 0 ? 0 : i - j)] +
+                input[(i + 1 + j >= length ? length - 1 : i + 1 + j)]) *
+               filter[j];
+      }
+      sum >>= FILTER_BITS;
+      *optr++ = clip_pixel(sum);
+    }
+  } else {
+    // Initial part.
+    for (i = 0; i < l1; i += 2) {
+      int sum = (1 << (FILTER_BITS - 1));
+      for (j = 0; j < filter_len_half; ++j) {
+        sum += (input[(i - j < 0 ? 0 : i - j)] + input[i + 1 + j]) * filter[j];
+      }
+      sum >>= FILTER_BITS;
+      *optr++ = clip_pixel(sum);
+    }
+    // Middle part.
+    for (; i < l2; i += 2) {
+      int sum = (1 << (FILTER_BITS - 1));
+      for (j = 0; j < filter_len_half; ++j) {
+        sum += (input[i - j] + input[i + 1 + j]) * filter[j];
+      }
+      sum >>= FILTER_BITS;
+      *optr++ = clip_pixel(sum);
+    }
+    // End part.
+    for (; i < length; i += 2) {
+      int sum = (1 << (FILTER_BITS - 1));
+      for (j = 0; j < filter_len_half; ++j) {
+        sum += (input[i - j] +
+                input[(i + 1 + j >= length ? length - 1 : i + 1 + j)]) *
+               filter[j];
+      }
+      sum >>= FILTER_BITS;
+      *optr++ = clip_pixel(sum);
+    }
+  }
+}
+
+static void down2_symodd(const uint8_t *const input, int length,
+                         uint8_t *output) {
+  // Actual filter len = 2 * filter_len_half - 1.
+  const int16_t *filter = vp10_down2_symodd_half_filter;
+  const int filter_len_half = sizeof(vp10_down2_symodd_half_filter) / 2;
+  int i, j;
+  uint8_t *optr = output;
+  int l1 = filter_len_half - 1;
+  int l2 = (length - filter_len_half + 1);
+  l1 += (l1 & 1);
+  l2 += (l2 & 1);
+  if (l1 > l2) {
+    // Short input length.
+    for (i = 0; i < length; i += 2) {
+      int sum = (1 << (FILTER_BITS - 1)) + input[i] * filter[0];
+      for (j = 1; j < filter_len_half; ++j) {
+        sum += (input[(i - j < 0 ? 0 : i - j)] +
+                input[(i + j >= length ? length - 1 : i + j)]) *
+               filter[j];
+      }
+      sum >>= FILTER_BITS;
+      *optr++ = clip_pixel(sum);
+    }
+  } else {
+    // Initial part.
+    for (i = 0; i < l1; i += 2) {
+      int sum = (1 << (FILTER_BITS - 1)) + input[i] * filter[0];
+      for (j = 1; j < filter_len_half; ++j) {
+        sum += (input[(i - j < 0 ? 0 : i - j)] + input[i + j]) * filter[j];
+      }
+      sum >>= FILTER_BITS;
+      *optr++ = clip_pixel(sum);
+    }
+    // Middle part.
+    for (; i < l2; i += 2) {
+      int sum = (1 << (FILTER_BITS - 1)) + input[i] * filter[0];
+      for (j = 1; j < filter_len_half; ++j) {
+        sum += (input[i - j] + input[i + j]) * filter[j];
+      }
+      sum >>= FILTER_BITS;
+      *optr++ = clip_pixel(sum);
+    }
+    // End part.
+    for (; i < length; i += 2) {
+      int sum = (1 << (FILTER_BITS - 1)) + input[i] * filter[0];
+      for (j = 1; j < filter_len_half; ++j) {
+        sum += (input[i - j] + input[(i + j >= length ? length - 1 : i + j)]) *
+               filter[j];
+      }
+      sum >>= FILTER_BITS;
+      *optr++ = clip_pixel(sum);
+    }
+  }
+}
+
+static int get_down2_length(int length, int steps) {
+  int s;
+  for (s = 0; s < steps; ++s) length = (length + 1) >> 1;
+  return length;
+}
+
+static int get_down2_steps(int in_length, int out_length) {
+  int steps = 0;
+  int proj_in_length;
+  while ((proj_in_length = get_down2_length(in_length, 1)) >= out_length) {
+    ++steps;
+    in_length = proj_in_length;
+  }
+  return steps;
+}
+
+static void resize_multistep(const uint8_t *const input, int length,
+                             uint8_t *output, int olength, uint8_t *otmp) {
+  int steps;
+  if (length == olength) {
+    memcpy(output, input, sizeof(output[0]) * length);
+    return;
+  }
+  steps = get_down2_steps(length, olength);
+
+  if (steps > 0) {
+    int s;
+    uint8_t *out = NULL;
+    uint8_t *otmp2;
+    int filteredlength = length;
+
+    assert(otmp != NULL);
+    otmp2 = otmp + get_down2_length(length, 1);
+    for (s = 0; s < steps; ++s) {
+      const int proj_filteredlength = get_down2_length(filteredlength, 1);
+      const uint8_t *const in = (s == 0 ? input : out);
+      if (s == steps - 1 && proj_filteredlength == olength)
+        out = output;
+      else
+        out = (s & 1 ? otmp2 : otmp);
+      if (filteredlength & 1)
+        down2_symodd(in, filteredlength, out);
+      else
+        down2_symeven(in, filteredlength, out);
+      filteredlength = proj_filteredlength;
+    }
+    if (filteredlength != olength) {
+      interpolate(out, filteredlength, output, olength);
+    }
+  } else {
+    interpolate(input, length, output, olength);
+  }
+}
+
+static void fill_col_to_arr(uint8_t *img, int stride, int len, uint8_t *arr) {
+  int i;
+  uint8_t *iptr = img;
+  uint8_t *aptr = arr;
+  for (i = 0; i < len; ++i, iptr += stride) {
+    *aptr++ = *iptr;
+  }
+}
+
+static void fill_arr_to_col(uint8_t *img, int stride, int len, uint8_t *arr) {
+  int i;
+  uint8_t *iptr = img;
+  uint8_t *aptr = arr;
+  for (i = 0; i < len; ++i, iptr += stride) {
+    *iptr = *aptr++;
+  }
+}
+
+void vp10_resize_plane(const uint8_t *const input, int height, int width,
+                       int in_stride, uint8_t *output, int height2, int width2,
+                       int out_stride) {
+  int i;
+  uint8_t *intbuf = (uint8_t *)malloc(sizeof(uint8_t) * width2 * height);
+  uint8_t *tmpbuf =
+      (uint8_t *)malloc(sizeof(uint8_t) * (width < height ? height : width));
+  uint8_t *arrbuf = (uint8_t *)malloc(sizeof(uint8_t) * height);
+  uint8_t *arrbuf2 = (uint8_t *)malloc(sizeof(uint8_t) * height2);
+  if (intbuf == NULL || tmpbuf == NULL || arrbuf == NULL || arrbuf2 == NULL)
+    goto Error;
+  assert(width > 0);
+  assert(height > 0);
+  assert(width2 > 0);
+  assert(height2 > 0);
+  for (i = 0; i < height; ++i)
+    resize_multistep(input + in_stride * i, width, intbuf + width2 * i, width2,
+                     tmpbuf);
+  for (i = 0; i < width2; ++i) {
+    fill_col_to_arr(intbuf + i, width2, height, arrbuf);
+    resize_multistep(arrbuf, height, arrbuf2, height2, tmpbuf);
+    fill_arr_to_col(output + i, out_stride, height2, arrbuf2);
+  }
+
+Error:
+  free(intbuf);
+  free(tmpbuf);
+  free(arrbuf);
+  free(arrbuf2);
+}
+
+#if CONFIG_VP9_HIGHBITDEPTH
+static void highbd_interpolate(const uint16_t *const input, int inlength,
+                               uint16_t *output, int outlength, int bd) {
+  const int64_t delta =
+      (((uint64_t)inlength << 32) + outlength / 2) / outlength;
+  const int64_t offset =
+      inlength > outlength
+          ? (((int64_t)(inlength - outlength) << 31) + outlength / 2) /
+                outlength
+          : -(((int64_t)(outlength - inlength) << 31) + outlength / 2) /
+                outlength;
+  uint16_t *optr = output;
+  int x, x1, x2, sum, k, int_pel, sub_pel;
+  int64_t y;
+
+  const interp_kernel *interp_filters =
+      choose_interp_filter(inlength, outlength);
+
+  x = 0;
+  y = offset;
+  while ((y >> INTERP_PRECISION_BITS) < (INTERP_TAPS / 2 - 1)) {
+    x++;
+    y += delta;
+  }
+  x1 = x;
+  x = outlength - 1;
+  y = delta * x + offset;
+  while ((y >> INTERP_PRECISION_BITS) + (int64_t)(INTERP_TAPS / 2) >=
+         inlength) {
+    x--;
+    y -= delta;
+  }
+  x2 = x;
+  if (x1 > x2) {
+    for (x = 0, y = offset; x < outlength; ++x, y += delta) {
+      const int16_t *filter;
+      int_pel = y >> INTERP_PRECISION_BITS;
+      sub_pel = (y >> (INTERP_PRECISION_BITS - SUBPEL_BITS)) & SUBPEL_MASK;
+      filter = interp_filters[sub_pel];
+      sum = 0;
+      for (k = 0; k < INTERP_TAPS; ++k) {
+        const int pk = int_pel - INTERP_TAPS / 2 + 1 + k;
+        sum += filter[k] *
+               input[(pk < 0 ? 0 : (pk >= inlength ? inlength - 1 : pk))];
+      }
+      *optr++ = clip_pixel_highbd(ROUND_POWER_OF_TWO(sum, FILTER_BITS), bd);
+    }
+  } else {
+    // Initial part.
+    for (x = 0, y = offset; x < x1; ++x, y += delta) {
+      const int16_t *filter;
+      int_pel = y >> INTERP_PRECISION_BITS;
+      sub_pel = (y >> (INTERP_PRECISION_BITS - SUBPEL_BITS)) & SUBPEL_MASK;
+      filter = interp_filters[sub_pel];
+      sum = 0;
+      for (k = 0; k < INTERP_TAPS; ++k)
+        sum += filter[k] * input[(int_pel - INTERP_TAPS / 2 + 1 + k < 0
+                                      ? 0
+                                      : int_pel - INTERP_TAPS / 2 + 1 + k)];
+      *optr++ = clip_pixel_highbd(ROUND_POWER_OF_TWO(sum, FILTER_BITS), bd);
+    }
+    // Middle part.
+    for (; x <= x2; ++x, y += delta) {
+      const int16_t *filter;
+      int_pel = y >> INTERP_PRECISION_BITS;
+      sub_pel = (y >> (INTERP_PRECISION_BITS - SUBPEL_BITS)) & SUBPEL_MASK;
+      filter = interp_filters[sub_pel];
+      sum = 0;
+      for (k = 0; k < INTERP_TAPS; ++k)
+        sum += filter[k] * input[int_pel - INTERP_TAPS / 2 + 1 + k];
+      *optr++ = clip_pixel_highbd(ROUND_POWER_OF_TWO(sum, FILTER_BITS), bd);
+    }
+    // End part.
+    for (; x < outlength; ++x, y += delta) {
+      const int16_t *filter;
+      int_pel = y >> INTERP_PRECISION_BITS;
+      sub_pel = (y >> (INTERP_PRECISION_BITS - SUBPEL_BITS)) & SUBPEL_MASK;
+      filter = interp_filters[sub_pel];
+      sum = 0;
+      for (k = 0; k < INTERP_TAPS; ++k)
+        sum += filter[k] * input[(int_pel - INTERP_TAPS / 2 + 1 + k >= inlength
+                                      ? inlength - 1
+                                      : int_pel - INTERP_TAPS / 2 + 1 + k)];
+      *optr++ = clip_pixel_highbd(ROUND_POWER_OF_TWO(sum, FILTER_BITS), bd);
+    }
+  }
+}
+
+static void highbd_down2_symeven(const uint16_t *const input, int length,
+                                 uint16_t *output, int bd) {
+  // Actual filter len = 2 * filter_len_half.
+  static const int16_t *filter = vp10_down2_symeven_half_filter;
+  const int filter_len_half = sizeof(vp10_down2_symeven_half_filter) / 2;
+  int i, j;
+  uint16_t *optr = output;
+  int l1 = filter_len_half;
+  int l2 = (length - filter_len_half);
+  l1 += (l1 & 1);
+  l2 += (l2 & 1);
+  if (l1 > l2) {
+    // Short input length.
+    for (i = 0; i < length; i += 2) {
+      int sum = (1 << (FILTER_BITS - 1));
+      for (j = 0; j < filter_len_half; ++j) {
+        sum += (input[(i - j < 0 ? 0 : i - j)] +
+                input[(i + 1 + j >= length ? length - 1 : i + 1 + j)]) *
+               filter[j];
+      }
+      sum >>= FILTER_BITS;
+      *optr++ = clip_pixel_highbd(sum, bd);
+    }
+  } else {
+    // Initial part.
+    for (i = 0; i < l1; i += 2) {
+      int sum = (1 << (FILTER_BITS - 1));
+      for (j = 0; j < filter_len_half; ++j) {
+        sum += (input[(i - j < 0 ? 0 : i - j)] + input[i + 1 + j]) * filter[j];
+      }
+      sum >>= FILTER_BITS;
+      *optr++ = clip_pixel_highbd(sum, bd);
+    }
+    // Middle part.
+    for (; i < l2; i += 2) {
+      int sum = (1 << (FILTER_BITS - 1));
+      for (j = 0; j < filter_len_half; ++j) {
+        sum += (input[i - j] + input[i + 1 + j]) * filter[j];
+      }
+      sum >>= FILTER_BITS;
+      *optr++ = clip_pixel_highbd(sum, bd);
+    }
+    // End part.
+    for (; i < length; i += 2) {
+      int sum = (1 << (FILTER_BITS - 1));
+      for (j = 0; j < filter_len_half; ++j) {
+        sum += (input[i - j] +
+                input[(i + 1 + j >= length ? length - 1 : i + 1 + j)]) *
+               filter[j];
+      }
+      sum >>= FILTER_BITS;
+      *optr++ = clip_pixel_highbd(sum, bd);
+    }
+  }
+}
+
+static void highbd_down2_symodd(const uint16_t *const input, int length,
+                                uint16_t *output, int bd) {
+  // Actual filter len = 2 * filter_len_half - 1.
+  static const int16_t *filter = vp10_down2_symodd_half_filter;
+  const int filter_len_half = sizeof(vp10_down2_symodd_half_filter) / 2;
+  int i, j;
+  uint16_t *optr = output;
+  int l1 = filter_len_half - 1;
+  int l2 = (length - filter_len_half + 1);
+  l1 += (l1 & 1);
+  l2 += (l2 & 1);
+  if (l1 > l2) {
+    // Short input length.
+    for (i = 0; i < length; i += 2) {
+      int sum = (1 << (FILTER_BITS - 1)) + input[i] * filter[0];
+      for (j = 1; j < filter_len_half; ++j) {
+        sum += (input[(i - j < 0 ? 0 : i - j)] +
+                input[(i + j >= length ? length - 1 : i + j)]) *
+               filter[j];
+      }
+      sum >>= FILTER_BITS;
+      *optr++ = clip_pixel_highbd(sum, bd);
+    }
+  } else {
+    // Initial part.
+    for (i = 0; i < l1; i += 2) {
+      int sum = (1 << (FILTER_BITS - 1)) + input[i] * filter[0];
+      for (j = 1; j < filter_len_half; ++j) {
+        sum += (input[(i - j < 0 ? 0 : i - j)] + input[i + j]) * filter[j];
+      }
+      sum >>= FILTER_BITS;
+      *optr++ = clip_pixel_highbd(sum, bd);
+    }
+    // Middle part.
+    for (; i < l2; i += 2) {
+      int sum = (1 << (FILTER_BITS - 1)) + input[i] * filter[0];
+      for (j = 1; j < filter_len_half; ++j) {
+        sum += (input[i - j] + input[i + j]) * filter[j];
+      }
+      sum >>= FILTER_BITS;
+      *optr++ = clip_pixel_highbd(sum, bd);
+    }
+    // End part.
+    for (; i < length; i += 2) {
+      int sum = (1 << (FILTER_BITS - 1)) + input[i] * filter[0];
+      for (j = 1; j < filter_len_half; ++j) {
+        sum += (input[i - j] + input[(i + j >= length ? length - 1 : i + j)]) *
+               filter[j];
+      }
+      sum >>= FILTER_BITS;
+      *optr++ = clip_pixel_highbd(sum, bd);
+    }
+  }
+}
+
+static void highbd_resize_multistep(const uint16_t *const input, int length,
+                                    uint16_t *output, int olength,
+                                    uint16_t *otmp, int bd) {
+  int steps;
+  if (length == olength) {
+    memcpy(output, input, sizeof(output[0]) * length);
+    return;
+  }
+  steps = get_down2_steps(length, olength);
+
+  if (steps > 0) {
+    int s;
+    uint16_t *out = NULL;
+    uint16_t *otmp2;
+    int filteredlength = length;
+
+    assert(otmp != NULL);
+    otmp2 = otmp + get_down2_length(length, 1);
+    for (s = 0; s < steps; ++s) {
+      const int proj_filteredlength = get_down2_length(filteredlength, 1);
+      const uint16_t *const in = (s == 0 ? input : out);
+      if (s == steps - 1 && proj_filteredlength == olength)
+        out = output;
+      else
+        out = (s & 1 ? otmp2 : otmp);
+      if (filteredlength & 1)
+        highbd_down2_symodd(in, filteredlength, out, bd);
+      else
+        highbd_down2_symeven(in, filteredlength, out, bd);
+      filteredlength = proj_filteredlength;
+    }
+    if (filteredlength != olength) {
+      highbd_interpolate(out, filteredlength, output, olength, bd);
+    }
+  } else {
+    highbd_interpolate(input, length, output, olength, bd);
+  }
+}
+
+static void highbd_fill_col_to_arr(uint16_t *img, int stride, int len,
+                                   uint16_t *arr) {
+  int i;
+  uint16_t *iptr = img;
+  uint16_t *aptr = arr;
+  for (i = 0; i < len; ++i, iptr += stride) {
+    *aptr++ = *iptr;
+  }
+}
+
+static void highbd_fill_arr_to_col(uint16_t *img, int stride, int len,
+                                   uint16_t *arr) {
+  int i;
+  uint16_t *iptr = img;
+  uint16_t *aptr = arr;
+  for (i = 0; i < len; ++i, iptr += stride) {
+    *iptr = *aptr++;
+  }
+}
+
+void vp10_highbd_resize_plane(const uint8_t *const input, int height, int width,
+                              int in_stride, uint8_t *output, int height2,
+                              int width2, int out_stride, int bd) {
+  int i;
+  uint16_t *intbuf = (uint16_t *)malloc(sizeof(uint16_t) * width2 * height);
+  uint16_t *tmpbuf =
+      (uint16_t *)malloc(sizeof(uint16_t) * (width < height ? height : width));
+  uint16_t *arrbuf = (uint16_t *)malloc(sizeof(uint16_t) * height);
+  uint16_t *arrbuf2 = (uint16_t *)malloc(sizeof(uint16_t) * height2);
+  if (intbuf == NULL || tmpbuf == NULL || arrbuf == NULL || arrbuf2 == NULL)
+    goto Error;
+  for (i = 0; i < height; ++i) {
+    highbd_resize_multistep(CONVERT_TO_SHORTPTR(input + in_stride * i), width,
+                            intbuf + width2 * i, width2, tmpbuf, bd);
+  }
+  for (i = 0; i < width2; ++i) {
+    highbd_fill_col_to_arr(intbuf + i, width2, height, arrbuf);
+    highbd_resize_multistep(arrbuf, height, arrbuf2, height2, tmpbuf, bd);
+    highbd_fill_arr_to_col(CONVERT_TO_SHORTPTR(output + i), out_stride, height2,
+                           arrbuf2);
+  }
+
+Error:
+  free(intbuf);
+  free(tmpbuf);
+  free(arrbuf);
+  free(arrbuf2);
+}
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+void vp10_resize_frame420(const uint8_t *const y, int y_stride,
+                          const uint8_t *const u, const uint8_t *const v,
+                          int uv_stride, int height, int width, uint8_t *oy,
+                          int oy_stride, uint8_t *ou, uint8_t *ov,
+                          int ouv_stride, int oheight, int owidth) {
+  vp10_resize_plane(y, height, width, y_stride, oy, oheight, owidth, oy_stride);
+  vp10_resize_plane(u, height / 2, width / 2, uv_stride, ou, oheight / 2,
+                    owidth / 2, ouv_stride);
+  vp10_resize_plane(v, height / 2, width / 2, uv_stride, ov, oheight / 2,
+                    owidth / 2, ouv_stride);
+}
+
+void vp10_resize_frame422(const uint8_t *const y, int y_stride,
+                          const uint8_t *const u, const uint8_t *const v,
+                          int uv_stride, int height, int width, uint8_t *oy,
+                          int oy_stride, uint8_t *ou, uint8_t *ov,
+                          int ouv_stride, int oheight, int owidth) {
+  vp10_resize_plane(y, height, width, y_stride, oy, oheight, owidth, oy_stride);
+  vp10_resize_plane(u, height, width / 2, uv_stride, ou, oheight, owidth / 2,
+                    ouv_stride);
+  vp10_resize_plane(v, height, width / 2, uv_stride, ov, oheight, owidth / 2,
+                    ouv_stride);
+}
+
+void vp10_resize_frame444(const uint8_t *const y, int y_stride,
+                          const uint8_t *const u, const uint8_t *const v,
+                          int uv_stride, int height, int width, uint8_t *oy,
+                          int oy_stride, uint8_t *ou, uint8_t *ov,
+                          int ouv_stride, int oheight, int owidth) {
+  vp10_resize_plane(y, height, width, y_stride, oy, oheight, owidth, oy_stride);
+  vp10_resize_plane(u, height, width, uv_stride, ou, oheight, owidth,
+                    ouv_stride);
+  vp10_resize_plane(v, height, width, uv_stride, ov, oheight, owidth,
+                    ouv_stride);
+}
+
+#if CONFIG_VP9_HIGHBITDEPTH
+void vp10_highbd_resize_frame420(const uint8_t *const y, int y_stride,
+                                 const uint8_t *const u, const uint8_t *const v,
+                                 int uv_stride, int height, int width,
+                                 uint8_t *oy, int oy_stride, uint8_t *ou,
+                                 uint8_t *ov, int ouv_stride, int oheight,
+                                 int owidth, int bd) {
+  vp10_highbd_resize_plane(y, height, width, y_stride, oy, oheight, owidth,
+                           oy_stride, bd);
+  vp10_highbd_resize_plane(u, height / 2, width / 2, uv_stride, ou, oheight / 2,
+                           owidth / 2, ouv_stride, bd);
+  vp10_highbd_resize_plane(v, height / 2, width / 2, uv_stride, ov, oheight / 2,
+                           owidth / 2, ouv_stride, bd);
+}
+
+void vp10_highbd_resize_frame422(const uint8_t *const y, int y_stride,
+                                 const uint8_t *const u, const uint8_t *const v,
+                                 int uv_stride, int height, int width,
+                                 uint8_t *oy, int oy_stride, uint8_t *ou,
+                                 uint8_t *ov, int ouv_stride, int oheight,
+                                 int owidth, int bd) {
+  vp10_highbd_resize_plane(y, height, width, y_stride, oy, oheight, owidth,
+                           oy_stride, bd);
+  vp10_highbd_resize_plane(u, height, width / 2, uv_stride, ou, oheight,
+                           owidth / 2, ouv_stride, bd);
+  vp10_highbd_resize_plane(v, height, width / 2, uv_stride, ov, oheight,
+                           owidth / 2, ouv_stride, bd);
+}
+
+void vp10_highbd_resize_frame444(const uint8_t *const y, int y_stride,
+                                 const uint8_t *const u, const uint8_t *const v,
+                                 int uv_stride, int height, int width,
+                                 uint8_t *oy, int oy_stride, uint8_t *ou,
+                                 uint8_t *ov, int ouv_stride, int oheight,
+                                 int owidth, int bd) {
+  vp10_highbd_resize_plane(y, height, width, y_stride, oy, oheight, owidth,
+                           oy_stride, bd);
+  vp10_highbd_resize_plane(u, height, width, uv_stride, ou, oheight, owidth,
+                           ouv_stride, bd);
+  vp10_highbd_resize_plane(v, height, width, uv_stride, ov, oheight, owidth,
+                           ouv_stride, bd);
+}
+#endif  // CONFIG_VP9_HIGHBITDEPTH
diff --git a/av1/encoder/resize.h b/av1/encoder/resize.h
new file mode 100644
index 0000000..8fe1d1b
--- /dev/null
+++ b/av1/encoder/resize.h
@@ -0,0 +1,68 @@
+/*
+ *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP10_ENCODER_RESIZE_H_
+#define VP10_ENCODER_RESIZE_H_
+
+#include <stdio.h>
+#include "aom/vpx_integer.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void vp10_resize_plane(const uint8_t *const input, int height, int width,
+                       int in_stride, uint8_t *output, int height2, int width2,
+                       int out_stride);
+void vp10_resize_frame420(const uint8_t *const y, int y_stride,
+                          const uint8_t *const u, const uint8_t *const v,
+                          int uv_stride, int height, int width, uint8_t *oy,
+                          int oy_stride, uint8_t *ou, uint8_t *ov,
+                          int ouv_stride, int oheight, int owidth);
+void vp10_resize_frame422(const uint8_t *const y, int y_stride,
+                          const uint8_t *const u, const uint8_t *const v,
+                          int uv_stride, int height, int width, uint8_t *oy,
+                          int oy_stride, uint8_t *ou, uint8_t *ov,
+                          int ouv_stride, int oheight, int owidth);
+void vp10_resize_frame444(const uint8_t *const y, int y_stride,
+                          const uint8_t *const u, const uint8_t *const v,
+                          int uv_stride, int height, int width, uint8_t *oy,
+                          int oy_stride, uint8_t *ou, uint8_t *ov,
+                          int ouv_stride, int oheight, int owidth);
+
+#if CONFIG_VP9_HIGHBITDEPTH
+void vp10_highbd_resize_plane(const uint8_t *const input, int height, int width,
+                              int in_stride, uint8_t *output, int height2,
+                              int width2, int out_stride, int bd);
+void vp10_highbd_resize_frame420(const uint8_t *const y, int y_stride,
+                                 const uint8_t *const u, const uint8_t *const v,
+                                 int uv_stride, int height, int width,
+                                 uint8_t *oy, int oy_stride, uint8_t *ou,
+                                 uint8_t *ov, int ouv_stride, int oheight,
+                                 int owidth, int bd);
+void vp10_highbd_resize_frame422(const uint8_t *const y, int y_stride,
+                                 const uint8_t *const u, const uint8_t *const v,
+                                 int uv_stride, int height, int width,
+                                 uint8_t *oy, int oy_stride, uint8_t *ou,
+                                 uint8_t *ov, int ouv_stride, int oheight,
+                                 int owidth, int bd);
+void vp10_highbd_resize_frame444(const uint8_t *const y, int y_stride,
+                                 const uint8_t *const u, const uint8_t *const v,
+                                 int uv_stride, int height, int width,
+                                 uint8_t *oy, int oy_stride, uint8_t *ou,
+                                 uint8_t *ov, int ouv_stride, int oheight,
+                                 int owidth, int bd);
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+#ifdef __cplusplus
+}  // extern "C"
+#endif
+
+#endif  // VP10_ENCODER_RESIZE_H_
diff --git a/av1/encoder/segmentation.c b/av1/encoder/segmentation.c
new file mode 100644
index 0000000..5ac1283
--- /dev/null
+++ b/av1/encoder/segmentation.c
@@ -0,0 +1,378 @@
+/*
+ *  Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <limits.h>
+
+#include "aom_mem/vpx_mem.h"
+
+#include "av1/common/pred_common.h"
+#include "av1/common/tile_common.h"
+
+#include "av1/encoder/cost.h"
+#include "av1/encoder/segmentation.h"
+#include "av1/encoder/subexp.h"
+
+void vp10_enable_segmentation(struct segmentation *seg) {
+  seg->enabled = 1;
+  seg->update_map = 1;
+  seg->update_data = 1;
+}
+
+void vp10_disable_segmentation(struct segmentation *seg) {
+  seg->enabled = 0;
+  seg->update_map = 0;
+  seg->update_data = 0;
+}
+
+void vp10_set_segment_data(struct segmentation *seg, signed char *feature_data,
+                           unsigned char abs_delta) {
+  seg->abs_delta = abs_delta;
+
+  memcpy(seg->feature_data, feature_data, sizeof(seg->feature_data));
+}
+void vp10_disable_segfeature(struct segmentation *seg, int segment_id,
+                             SEG_LVL_FEATURES feature_id) {
+  seg->feature_mask[segment_id] &= ~(1 << feature_id);
+}
+
+void vp10_clear_segdata(struct segmentation *seg, int segment_id,
+                        SEG_LVL_FEATURES feature_id) {
+  seg->feature_data[segment_id][feature_id] = 0;
+}
+
+// Based on set of segment counts calculate a probability tree
+static void calc_segtree_probs(unsigned *segcounts,
+                               vpx_prob *segment_tree_probs,
+                               const vpx_prob *cur_tree_probs) {
+  // Work out probabilities of each segment
+  const unsigned cc[4] = { segcounts[0] + segcounts[1],
+                           segcounts[2] + segcounts[3],
+                           segcounts[4] + segcounts[5],
+                           segcounts[6] + segcounts[7] };
+  const unsigned ccc[2] = { cc[0] + cc[1], cc[2] + cc[3] };
+  int i;
+
+  segment_tree_probs[0] = get_binary_prob(ccc[0], ccc[1]);
+  segment_tree_probs[1] = get_binary_prob(cc[0], cc[1]);
+  segment_tree_probs[2] = get_binary_prob(cc[2], cc[3]);
+  segment_tree_probs[3] = get_binary_prob(segcounts[0], segcounts[1]);
+  segment_tree_probs[4] = get_binary_prob(segcounts[2], segcounts[3]);
+  segment_tree_probs[5] = get_binary_prob(segcounts[4], segcounts[5]);
+  segment_tree_probs[6] = get_binary_prob(segcounts[6], segcounts[7]);
+
+  for (i = 0; i < 7; i++) {
+    const unsigned *ct =
+        i == 0 ? ccc : i < 3 ? cc + (i & 2) : segcounts + (i - 3) * 2;
+    vp10_prob_diff_update_savings_search(
+        ct, cur_tree_probs[i], &segment_tree_probs[i], DIFF_UPDATE_PROB);
+  }
+}
+
+// Based on set of segment counts and probabilities calculate a cost estimate
+static int cost_segmap(unsigned *segcounts, vpx_prob *probs) {
+  const int c01 = segcounts[0] + segcounts[1];
+  const int c23 = segcounts[2] + segcounts[3];
+  const int c45 = segcounts[4] + segcounts[5];
+  const int c67 = segcounts[6] + segcounts[7];
+  const int c0123 = c01 + c23;
+  const int c4567 = c45 + c67;
+
+  // Cost the top node of the tree
+  int cost = c0123 * vp10_cost_zero(probs[0]) + c4567 * vp10_cost_one(probs[0]);
+
+  // Cost subsequent levels
+  if (c0123 > 0) {
+    cost += c01 * vp10_cost_zero(probs[1]) + c23 * vp10_cost_one(probs[1]);
+
+    if (c01 > 0)
+      cost += segcounts[0] * vp10_cost_zero(probs[3]) +
+              segcounts[1] * vp10_cost_one(probs[3]);
+    if (c23 > 0)
+      cost += segcounts[2] * vp10_cost_zero(probs[4]) +
+              segcounts[3] * vp10_cost_one(probs[4]);
+  }
+
+  if (c4567 > 0) {
+    cost += c45 * vp10_cost_zero(probs[2]) + c67 * vp10_cost_one(probs[2]);
+
+    if (c45 > 0)
+      cost += segcounts[4] * vp10_cost_zero(probs[5]) +
+              segcounts[5] * vp10_cost_one(probs[5]);
+    if (c67 > 0)
+      cost += segcounts[6] * vp10_cost_zero(probs[6]) +
+              segcounts[7] * vp10_cost_one(probs[6]);
+  }
+
+  return cost;
+}
+
+static void count_segs(const VP10_COMMON *cm, MACROBLOCKD *xd,
+                       const TileInfo *tile, MODE_INFO **mi,
+                       unsigned *no_pred_segcounts,
+                       unsigned (*temporal_predictor_count)[2],
+                       unsigned *t_unpred_seg_counts, int bw, int bh,
+                       int mi_row, int mi_col) {
+  int segment_id;
+
+  if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
+
+  xd->mi = mi;
+  segment_id = xd->mi[0]->mbmi.segment_id;
+
+  set_mi_row_col(xd, tile, mi_row, bh, mi_col, bw, cm->mi_rows, cm->mi_cols);
+
+  // Count the number of hits on each segment with no prediction
+  no_pred_segcounts[segment_id]++;
+
+  // Temporal prediction not allowed on key frames
+  if (cm->frame_type != KEY_FRAME) {
+    const BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
+    // Test to see if the segment id matches the predicted value.
+    const int pred_segment_id =
+        get_segment_id(cm, cm->last_frame_seg_map, bsize, mi_row, mi_col);
+    const int pred_flag = pred_segment_id == segment_id;
+    const int pred_context = vp10_get_pred_context_seg_id(xd);
+
+    // Store the prediction status for this mb and update counts
+    // as appropriate
+    xd->mi[0]->mbmi.seg_id_predicted = pred_flag;
+    temporal_predictor_count[pred_context][pred_flag]++;
+
+    // Update the "unpredicted" segment count
+    if (!pred_flag) t_unpred_seg_counts[segment_id]++;
+  }
+}
+
+static void count_segs_sb(const VP10_COMMON *cm, MACROBLOCKD *xd,
+                          const TileInfo *tile, MODE_INFO **mi,
+                          unsigned *no_pred_segcounts,
+                          unsigned (*temporal_predictor_count)[2],
+                          unsigned *t_unpred_seg_counts, int mi_row, int mi_col,
+                          BLOCK_SIZE bsize) {
+  const int mis = cm->mi_stride;
+  const int bs = num_8x8_blocks_wide_lookup[bsize], hbs = bs / 2;
+#if CONFIG_EXT_PARTITION_TYPES
+  PARTITION_TYPE partition;
+#else
+  int bw, bh;
+#endif  // CONFIG_EXT_PARTITION_TYPES
+
+  if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
+
+#if CONFIG_EXT_PARTITION_TYPES
+  if (bsize == BLOCK_8X8)
+    partition = PARTITION_NONE;
+  else
+    partition = get_partition(cm, mi_row, mi_col, bsize);
+  switch (partition) {
+    case PARTITION_NONE:
+      count_segs(cm, xd, tile, mi, no_pred_segcounts, temporal_predictor_count,
+                 t_unpred_seg_counts, bs, bs, mi_row, mi_col);
+      break;
+    case PARTITION_HORZ:
+      count_segs(cm, xd, tile, mi, no_pred_segcounts, temporal_predictor_count,
+                 t_unpred_seg_counts, bs, hbs, mi_row, mi_col);
+      count_segs(cm, xd, tile, mi + hbs * mis, no_pred_segcounts,
+                 temporal_predictor_count, t_unpred_seg_counts, bs, hbs,
+                 mi_row + hbs, mi_col);
+      break;
+    case PARTITION_VERT:
+      count_segs(cm, xd, tile, mi, no_pred_segcounts, temporal_predictor_count,
+                 t_unpred_seg_counts, hbs, bs, mi_row, mi_col);
+      count_segs(cm, xd, tile, mi + hbs, no_pred_segcounts,
+                 temporal_predictor_count, t_unpred_seg_counts, hbs, bs, mi_row,
+                 mi_col + hbs);
+      break;
+    case PARTITION_HORZ_A:
+      count_segs(cm, xd, tile, mi, no_pred_segcounts, temporal_predictor_count,
+                 t_unpred_seg_counts, hbs, hbs, mi_row, mi_col);
+      count_segs(cm, xd, tile, mi + hbs, no_pred_segcounts,
+                 temporal_predictor_count, t_unpred_seg_counts, hbs, hbs,
+                 mi_row, mi_col + hbs);
+      count_segs(cm, xd, tile, mi + hbs * mis, no_pred_segcounts,
+                 temporal_predictor_count, t_unpred_seg_counts, bs, hbs,
+                 mi_row + hbs, mi_col);
+      break;
+    case PARTITION_HORZ_B:
+      count_segs(cm, xd, tile, mi, no_pred_segcounts, temporal_predictor_count,
+                 t_unpred_seg_counts, bs, hbs, mi_row, mi_col);
+      count_segs(cm, xd, tile, mi + hbs * mis, no_pred_segcounts,
+                 temporal_predictor_count, t_unpred_seg_counts, hbs, hbs,
+                 mi_row + hbs, mi_col);
+      count_segs(cm, xd, tile, mi + hbs + hbs * mis, no_pred_segcounts,
+                 temporal_predictor_count, t_unpred_seg_counts, hbs, hbs,
+                 mi_row + hbs, mi_col + hbs);
+      break;
+    case PARTITION_VERT_A:
+      count_segs(cm, xd, tile, mi, no_pred_segcounts, temporal_predictor_count,
+                 t_unpred_seg_counts, hbs, hbs, mi_row, mi_col);
+      count_segs(cm, xd, tile, mi + hbs * mis, no_pred_segcounts,
+                 temporal_predictor_count, t_unpred_seg_counts, hbs, hbs,
+                 mi_row + hbs, mi_col);
+      count_segs(cm, xd, tile, mi + hbs, no_pred_segcounts,
+                 temporal_predictor_count, t_unpred_seg_counts, hbs, bs, mi_row,
+                 mi_col + hbs);
+      break;
+    case PARTITION_VERT_B:
+      count_segs(cm, xd, tile, mi, no_pred_segcounts, temporal_predictor_count,
+                 t_unpred_seg_counts, hbs, bs, mi_row, mi_col);
+      count_segs(cm, xd, tile, mi + hbs, no_pred_segcounts,
+                 temporal_predictor_count, t_unpred_seg_counts, hbs, hbs,
+                 mi_row, mi_col + hbs);
+      count_segs(cm, xd, tile, mi + hbs + hbs * mis, no_pred_segcounts,
+                 temporal_predictor_count, t_unpred_seg_counts, hbs, hbs,
+                 mi_row + hbs, mi_col + hbs);
+      break;
+    case PARTITION_SPLIT: {
+      const BLOCK_SIZE subsize = subsize_lookup[PARTITION_SPLIT][bsize];
+      int n;
+
+      assert(num_8x8_blocks_wide_lookup[mi[0]->mbmi.sb_type] < bs &&
+             num_8x8_blocks_high_lookup[mi[0]->mbmi.sb_type] < bs);
+
+      for (n = 0; n < 4; n++) {
+        const int mi_dc = hbs * (n & 1);
+        const int mi_dr = hbs * (n >> 1);
+
+        count_segs_sb(cm, xd, tile, &mi[mi_dr * mis + mi_dc], no_pred_segcounts,
+                      temporal_predictor_count, t_unpred_seg_counts,
+                      mi_row + mi_dr, mi_col + mi_dc, subsize);
+      }
+    } break;
+    default: assert(0);
+  }
+#else
+  bw = num_8x8_blocks_wide_lookup[mi[0]->mbmi.sb_type];
+  bh = num_8x8_blocks_high_lookup[mi[0]->mbmi.sb_type];
+
+  if (bw == bs && bh == bs) {
+    count_segs(cm, xd, tile, mi, no_pred_segcounts, temporal_predictor_count,
+               t_unpred_seg_counts, bs, bs, mi_row, mi_col);
+  } else if (bw == bs && bh < bs) {
+    count_segs(cm, xd, tile, mi, no_pred_segcounts, temporal_predictor_count,
+               t_unpred_seg_counts, bs, hbs, mi_row, mi_col);
+    count_segs(cm, xd, tile, mi + hbs * mis, no_pred_segcounts,
+               temporal_predictor_count, t_unpred_seg_counts, bs, hbs,
+               mi_row + hbs, mi_col);
+  } else if (bw < bs && bh == bs) {
+    count_segs(cm, xd, tile, mi, no_pred_segcounts, temporal_predictor_count,
+               t_unpred_seg_counts, hbs, bs, mi_row, mi_col);
+    count_segs(cm, xd, tile, mi + hbs, no_pred_segcounts,
+               temporal_predictor_count, t_unpred_seg_counts, hbs, bs, mi_row,
+               mi_col + hbs);
+  } else {
+    const BLOCK_SIZE subsize = subsize_lookup[PARTITION_SPLIT][bsize];
+    int n;
+
+    assert(bw < bs && bh < bs);
+
+    for (n = 0; n < 4; n++) {
+      const int mi_dc = hbs * (n & 1);
+      const int mi_dr = hbs * (n >> 1);
+
+      count_segs_sb(cm, xd, tile, &mi[mi_dr * mis + mi_dc], no_pred_segcounts,
+                    temporal_predictor_count, t_unpred_seg_counts,
+                    mi_row + mi_dr, mi_col + mi_dc, subsize);
+    }
+  }
+#endif  // CONFIG_EXT_PARTITION_TYPES
+}
+
+void vp10_choose_segmap_coding_method(VP10_COMMON *cm, MACROBLOCKD *xd) {
+  struct segmentation *seg = &cm->seg;
+  struct segmentation_probs *segp = &cm->fc->seg;
+
+  int no_pred_cost;
+  int t_pred_cost = INT_MAX;
+
+  int i, tile_col, tile_row, mi_row, mi_col;
+
+  unsigned(*temporal_predictor_count)[2] = cm->counts.seg.pred;
+  unsigned *no_pred_segcounts = cm->counts.seg.tree_total;
+  unsigned *t_unpred_seg_counts = cm->counts.seg.tree_mispred;
+
+  vpx_prob no_pred_tree[SEG_TREE_PROBS];
+  vpx_prob t_pred_tree[SEG_TREE_PROBS];
+  vpx_prob t_nopred_prob[PREDICTION_PROBS];
+
+  (void)xd;
+
+  // We are about to recompute all the segment counts, so zero the accumulators.
+  vp10_zero(cm->counts.seg);
+
+  // First of all generate stats regarding how well the last segment map
+  // predicts this one
+  for (tile_row = 0; tile_row < cm->tile_rows; tile_row++) {
+    TileInfo tile_info;
+    vp10_tile_set_row(&tile_info, cm, tile_row);
+    for (tile_col = 0; tile_col < cm->tile_cols; tile_col++) {
+      MODE_INFO **mi_ptr;
+      vp10_tile_set_col(&tile_info, cm, tile_col);
+      mi_ptr = cm->mi_grid_visible + tile_info.mi_row_start * cm->mi_stride +
+               tile_info.mi_col_start;
+      for (mi_row = tile_info.mi_row_start; mi_row < tile_info.mi_row_end;
+           mi_row += cm->mib_size, mi_ptr += cm->mib_size * cm->mi_stride) {
+        MODE_INFO **mi = mi_ptr;
+        for (mi_col = tile_info.mi_col_start; mi_col < tile_info.mi_col_end;
+             mi_col += cm->mib_size, mi += cm->mib_size) {
+          count_segs_sb(cm, xd, &tile_info, mi, no_pred_segcounts,
+                        temporal_predictor_count, t_unpred_seg_counts, mi_row,
+                        mi_col, cm->sb_size);
+        }
+      }
+    }
+  }
+
+  // Work out probability tree for coding segments without prediction
+  // and the cost.
+  calc_segtree_probs(no_pred_segcounts, no_pred_tree, segp->tree_probs);
+  no_pred_cost = cost_segmap(no_pred_segcounts, no_pred_tree);
+
+  // Key frames cannot use temporal prediction
+  if (!frame_is_intra_only(cm) && !cm->error_resilient_mode) {
+    // Work out probability tree for coding those segments not
+    // predicted using the temporal method and the cost.
+    calc_segtree_probs(t_unpred_seg_counts, t_pred_tree, segp->tree_probs);
+    t_pred_cost = cost_segmap(t_unpred_seg_counts, t_pred_tree);
+
+    // Add in the cost of the signaling for each prediction context.
+    for (i = 0; i < PREDICTION_PROBS; i++) {
+      const int count0 = temporal_predictor_count[i][0];
+      const int count1 = temporal_predictor_count[i][1];
+
+      t_nopred_prob[i] = get_binary_prob(count0, count1);
+      vp10_prob_diff_update_savings_search(temporal_predictor_count[i],
+                                           segp->pred_probs[i],
+                                           &t_nopred_prob[i], DIFF_UPDATE_PROB);
+
+      // Add in the predictor signaling cost
+      t_pred_cost += count0 * vp10_cost_zero(t_nopred_prob[i]) +
+                     count1 * vp10_cost_one(t_nopred_prob[i]);
+    }
+  }
+
+  // Now choose which coding method to use.
+  if (t_pred_cost < no_pred_cost) {
+    assert(!cm->error_resilient_mode);
+    seg->temporal_update = 1;
+  } else {
+    seg->temporal_update = 0;
+  }
+}
+
+void vp10_reset_segment_features(VP10_COMMON *cm) {
+  struct segmentation *seg = &cm->seg;
+
+  // Set up default state for MB feature flags
+  seg->enabled = 0;
+  seg->update_map = 0;
+  seg->update_data = 0;
+  vp10_clearall_segfeatures(seg);
+}
diff --git a/av1/encoder/segmentation.h b/av1/encoder/segmentation.h
new file mode 100644
index 0000000..3c79bd1
--- /dev/null
+++ b/av1/encoder/segmentation.h
@@ -0,0 +1,50 @@
+/*
+ *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP10_ENCODER_SEGMENTATION_H_
+#define VP10_ENCODER_SEGMENTATION_H_
+
+#include "av1/common/blockd.h"
+#include "av1/encoder/encoder.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void vp10_enable_segmentation(struct segmentation *seg);
+void vp10_disable_segmentation(struct segmentation *seg);
+
+void vp10_disable_segfeature(struct segmentation *seg, int segment_id,
+                             SEG_LVL_FEATURES feature_id);
+void vp10_clear_segdata(struct segmentation *seg, int segment_id,
+                        SEG_LVL_FEATURES feature_id);
+
+// The values given for each segment can be either deltas (from the default
+// value chosen for the frame) or absolute values.
+//
+// Valid range for abs values is (0-127 for MB_LVL_ALT_Q), (0-63 for
+// SEGMENT_ALT_LF)
+// Valid range for delta values are (+/-127 for MB_LVL_ALT_Q), (+/-63 for
+// SEGMENT_ALT_LF)
+//
+// abs_delta = SEGMENT_DELTADATA (deltas) abs_delta = SEGMENT_ABSDATA (use
+// the absolute values given).
+void vp10_set_segment_data(struct segmentation *seg, signed char *feature_data,
+                           unsigned char abs_delta);
+
+void vp10_choose_segmap_coding_method(VP10_COMMON *cm, MACROBLOCKD *xd);
+
+void vp10_reset_segment_features(VP10_COMMON *cm);
+
+#ifdef __cplusplus
+}  // extern "C"
+#endif
+
+#endif  // VP10_ENCODER_SEGMENTATION_H_
diff --git a/av1/encoder/speed_features.c b/av1/encoder/speed_features.c
new file mode 100644
index 0000000..ab66250
--- /dev/null
+++ b/av1/encoder/speed_features.c
@@ -0,0 +1,635 @@
+/*
+ *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <limits.h>
+
+#include "av1/encoder/encoder.h"
+#include "av1/encoder/speed_features.h"
+#include "av1/encoder/rdopt.h"
+
+#include "aom_dsp/vpx_dsp_common.h"
+
+// Mesh search patters for various speed settings
+static MESH_PATTERN best_quality_mesh_pattern[MAX_MESH_STEP] = {
+  { 64, 4 }, { 28, 2 }, { 15, 1 }, { 7, 1 }
+};
+
+#define MAX_MESH_SPEED 5  // Max speed setting for mesh motion method
+static MESH_PATTERN
+    good_quality_mesh_patterns[MAX_MESH_SPEED + 1][MAX_MESH_STEP] = {
+      { { 64, 8 }, { 28, 4 }, { 15, 1 }, { 7, 1 } },
+      { { 64, 8 }, { 28, 4 }, { 15, 1 }, { 7, 1 } },
+      { { 64, 8 }, { 14, 2 }, { 7, 1 }, { 7, 1 } },
+      { { 64, 16 }, { 24, 8 }, { 12, 4 }, { 7, 1 } },
+      { { 64, 16 }, { 24, 8 }, { 12, 4 }, { 7, 1 } },
+      { { 64, 16 }, { 24, 8 }, { 12, 4 }, { 7, 1 } },
+    };
+static unsigned char good_quality_max_mesh_pct[MAX_MESH_SPEED + 1] = {
+  50, 25, 15, 5, 1, 1
+};
+
+// Intra only frames, golden frames (except alt ref overlays) and
+// alt ref frames tend to be coded at a higher than ambient quality
+static int frame_is_boosted(const VP10_COMP *cpi) {
+  return frame_is_kf_gf_arf(cpi);
+}
+
+// Sets a partition size down to which the auto partition code will always
+// search (can go lower), based on the image dimensions. The logic here
+// is that the extent to which ringing artefacts are offensive, depends
+// partly on the screen area that over which they propogate. Propogation is
+// limited by transform block size but the screen area take up by a given block
+// size will be larger for a small image format stretched to full screen.
+static BLOCK_SIZE set_partition_min_limit(VP10_COMMON *const cm) {
+  unsigned int screen_area = (cm->width * cm->height);
+
+  // Select block size based on image format size.
+  if (screen_area < 1280 * 720) {
+    // Formats smaller in area than 720P
+    return BLOCK_4X4;
+  } else if (screen_area < 1920 * 1080) {
+    // Format >= 720P and < 1080P
+    return BLOCK_8X8;
+  } else {
+    // Formats 1080P and up
+    return BLOCK_16X16;
+  }
+}
+
+static void set_good_speed_feature_framesize_dependent(VP10_COMP *cpi,
+                                                       SPEED_FEATURES *sf,
+                                                       int speed) {
+  VP10_COMMON *const cm = &cpi->common;
+
+  if (speed >= 1) {
+    if (VPXMIN(cm->width, cm->height) >= 720) {
+      sf->disable_split_mask =
+          cm->show_frame ? DISABLE_ALL_SPLIT : DISABLE_ALL_INTER_SPLIT;
+      sf->partition_search_breakout_dist_thr = (1 << 23);
+    } else {
+      sf->disable_split_mask = DISABLE_COMPOUND_SPLIT;
+      sf->partition_search_breakout_dist_thr = (1 << 21);
+    }
+  }
+
+  if (speed >= 2) {
+    if (VPXMIN(cm->width, cm->height) >= 720) {
+      sf->disable_split_mask =
+          cm->show_frame ? DISABLE_ALL_SPLIT : DISABLE_ALL_INTER_SPLIT;
+      sf->adaptive_pred_interp_filter = 0;
+      sf->partition_search_breakout_dist_thr = (1 << 24);
+      sf->partition_search_breakout_rate_thr = 120;
+    } else {
+      sf->disable_split_mask = LAST_AND_INTRA_SPLIT_ONLY;
+      sf->partition_search_breakout_dist_thr = (1 << 22);
+      sf->partition_search_breakout_rate_thr = 100;
+    }
+    sf->rd_auto_partition_min_limit = set_partition_min_limit(cm);
+  }
+
+  if (speed >= 3) {
+    if (VPXMIN(cm->width, cm->height) >= 720) {
+      sf->disable_split_mask = DISABLE_ALL_SPLIT;
+      sf->schedule_mode_search = cm->base_qindex < 220 ? 1 : 0;
+      sf->partition_search_breakout_dist_thr = (1 << 25);
+      sf->partition_search_breakout_rate_thr = 200;
+    } else {
+      sf->max_intra_bsize = BLOCK_32X32;
+      sf->disable_split_mask = DISABLE_ALL_INTER_SPLIT;
+      sf->schedule_mode_search = cm->base_qindex < 175 ? 1 : 0;
+      sf->partition_search_breakout_dist_thr = (1 << 23);
+      sf->partition_search_breakout_rate_thr = 120;
+    }
+  }
+
+  // If this is a two pass clip that fits the criteria for animated or
+  // graphics content then reset disable_split_mask for speeds 1-4.
+  // Also if the image edge is internal to the coded area.
+  if ((speed >= 1) && (cpi->oxcf.pass == 2) &&
+      ((cpi->twopass.fr_content_type == FC_GRAPHICS_ANIMATION) ||
+       (vp10_internal_image_edge(cpi)))) {
+    sf->disable_split_mask = DISABLE_COMPOUND_SPLIT;
+  }
+
+  if (speed >= 4) {
+    if (VPXMIN(cm->width, cm->height) >= 720) {
+      sf->partition_search_breakout_dist_thr = (1 << 26);
+    } else {
+      sf->partition_search_breakout_dist_thr = (1 << 24);
+    }
+    sf->disable_split_mask = DISABLE_ALL_SPLIT;
+  }
+}
+
+static void set_good_speed_feature(VP10_COMP *cpi, VP10_COMMON *cm,
+                                   SPEED_FEATURES *sf, int speed) {
+  const int boosted = frame_is_boosted(cpi);
+
+  if (speed >= 1) {
+    if ((cpi->twopass.fr_content_type == FC_GRAPHICS_ANIMATION) ||
+        vp10_internal_image_edge(cpi)) {
+      sf->use_square_partition_only = !frame_is_boosted(cpi);
+    } else {
+      sf->use_square_partition_only = !frame_is_intra_only(cm);
+    }
+
+    sf->less_rectangular_check = 1;
+
+    sf->use_rd_breakout = 1;
+    sf->adaptive_motion_search = 1;
+    sf->mv.auto_mv_step_size = 1;
+    sf->adaptive_rd_thresh = 1;
+    sf->mv.subpel_iters_per_step = 1;
+    sf->mode_skip_start = 10;
+    sf->adaptive_pred_interp_filter = 1;
+
+    sf->recode_loop = ALLOW_RECODE_KFARFGF;
+    sf->intra_y_mode_mask[TX_32X32] = INTRA_DC_H_V;
+    sf->intra_uv_mode_mask[TX_32X32] = INTRA_DC_H_V;
+    sf->intra_y_mode_mask[TX_16X16] = INTRA_DC_H_V;
+    sf->intra_uv_mode_mask[TX_16X16] = INTRA_DC_H_V;
+
+    sf->tx_size_search_breakout = 1;
+    sf->partition_search_breakout_rate_thr = 80;
+    sf->tx_type_search.prune_mode = PRUNE_ONE;
+    sf->tx_type_search.fast_intra_tx_type_search = 1;
+    sf->tx_type_search.fast_inter_tx_type_search = 1;
+    // Use transform domain distortion.
+    // Note var-tx expt always uses pixel domain distortion.
+    sf->use_transform_domain_distortion = 1;
+#if CONFIG_EXT_INTER
+    sf->disable_wedge_search_var_thresh = 100;
+    sf->fast_wedge_sign_estimate = 1;
+#endif  // CONFIG_EXT_INTER
+  }
+
+  if (speed >= 2) {
+    sf->tx_size_search_method =
+        frame_is_boosted(cpi) ? USE_FULL_RD : USE_LARGESTALL;
+    sf->mode_search_skip_flags =
+        (cm->frame_type == KEY_FRAME) ? 0 : FLAG_SKIP_INTRA_DIRMISMATCH |
+                                                FLAG_SKIP_INTRA_BESTINTER |
+                                                FLAG_SKIP_COMP_BESTINTRA |
+                                                FLAG_SKIP_INTRA_LOWVAR;
+    sf->disable_filter_search_var_thresh = 100;
+    sf->comp_inter_joint_search_thresh = BLOCK_SIZES;
+    sf->auto_min_max_partition_size = RELAXED_NEIGHBORING_MIN_MAX;
+    sf->allow_partition_search_skip = 1;
+    sf->use_upsampled_references = 0;
+    sf->adaptive_rd_thresh = 2;
+#if CONFIG_EXT_TX
+    sf->tx_type_search.prune_mode = PRUNE_TWO;
+#endif
+  }
+
+  if (speed >= 3) {
+    sf->use_square_partition_only = !frame_is_intra_only(cm);
+    sf->tx_size_search_method =
+        frame_is_intra_only(cm) ? USE_FULL_RD : USE_LARGESTALL;
+    sf->mv.subpel_search_method = SUBPEL_TREE_PRUNED;
+    sf->adaptive_pred_interp_filter = 0;
+    sf->adaptive_mode_search = 1;
+    sf->cb_partition_search = !boosted;
+    sf->cb_pred_filter_search = 1;
+    sf->alt_ref_search_fp = 1;
+    sf->recode_loop = ALLOW_RECODE_KFMAXBW;
+    sf->adaptive_rd_thresh = 3;
+    sf->mode_skip_start = 6;
+    sf->intra_y_mode_mask[TX_32X32] = INTRA_DC;
+    sf->intra_uv_mode_mask[TX_32X32] = INTRA_DC;
+    sf->adaptive_interp_filter_search = 1;
+  }
+
+  if (speed >= 4) {
+    sf->use_square_partition_only = 1;
+    sf->tx_size_search_method = USE_LARGESTALL;
+    sf->mv.search_method = BIGDIA;
+    sf->mv.subpel_search_method = SUBPEL_TREE_PRUNED_MORE;
+    sf->adaptive_rd_thresh = 4;
+    if (cm->frame_type != KEY_FRAME)
+      sf->mode_search_skip_flags |= FLAG_EARLY_TERMINATE;
+    sf->disable_filter_search_var_thresh = 200;
+    sf->use_lp32x32fdct = 1;
+    sf->use_fast_coef_updates = ONE_LOOP_REDUCED;
+    sf->use_fast_coef_costing = 1;
+    sf->partition_search_breakout_rate_thr = 300;
+  }
+
+  if (speed >= 5) {
+    int i;
+    sf->optimize_coefficients = 0;
+    sf->mv.search_method = HEX;
+    sf->disable_filter_search_var_thresh = 500;
+    for (i = 0; i < TX_SIZES; ++i) {
+      sf->intra_y_mode_mask[i] = INTRA_DC;
+      sf->intra_uv_mode_mask[i] = INTRA_DC;
+    }
+    sf->partition_search_breakout_rate_thr = 500;
+    sf->mv.reduce_first_step_size = 1;
+    sf->simple_model_rd_from_var = 1;
+  }
+}
+
+static void set_rt_speed_feature_framesize_dependent(VP10_COMP *cpi,
+                                                     SPEED_FEATURES *sf,
+                                                     int speed) {
+  VP10_COMMON *const cm = &cpi->common;
+  if (speed >= 1) {
+    if (VPXMIN(cm->width, cm->height) >= 720) {
+      sf->disable_split_mask =
+          cm->show_frame ? DISABLE_ALL_SPLIT : DISABLE_ALL_INTER_SPLIT;
+    } else {
+      sf->disable_split_mask = DISABLE_COMPOUND_SPLIT;
+    }
+  }
+
+  if (speed >= 2) {
+    if (VPXMIN(cm->width, cm->height) >= 720) {
+      sf->disable_split_mask =
+          cm->show_frame ? DISABLE_ALL_SPLIT : DISABLE_ALL_INTER_SPLIT;
+    } else {
+      sf->disable_split_mask = LAST_AND_INTRA_SPLIT_ONLY;
+    }
+  }
+
+  if (speed >= 5) {
+    if (VPXMIN(cm->width, cm->height) >= 720) {
+      sf->partition_search_breakout_dist_thr = (1 << 25);
+    } else {
+      sf->partition_search_breakout_dist_thr = (1 << 23);
+    }
+  }
+
+  if (speed >= 7) {
+    sf->encode_breakout_thresh =
+        (VPXMIN(cm->width, cm->height) >= 720) ? 800 : 300;
+  }
+}
+
+static void set_rt_speed_feature(VP10_COMP *cpi, SPEED_FEATURES *sf, int speed,
+                                 vpx_tune_content content) {
+  VP10_COMMON *const cm = &cpi->common;
+  const int is_keyframe = cm->frame_type == KEY_FRAME;
+  const int frames_since_key = is_keyframe ? 0 : cpi->rc.frames_since_key;
+  sf->static_segmentation = 0;
+  sf->adaptive_rd_thresh = 1;
+  sf->use_fast_coef_costing = 1;
+  sf->allow_exhaustive_searches = 0;
+  sf->exhaustive_searches_thresh = INT_MAX;
+  sf->use_upsampled_references = 0;
+#if CONFIG_EXT_INTER
+  sf->disable_wedge_search_var_thresh = 100;
+  sf->fast_wedge_sign_estimate = 1;
+#endif  // CONFIG_EXT_INTER
+
+  // Use transform domain distortion computation
+  // Note var-tx expt always uses pixel domain distortion.
+  sf->use_transform_domain_distortion = 1;
+
+  if (speed >= 1) {
+    sf->use_square_partition_only = !frame_is_intra_only(cm);
+    sf->less_rectangular_check = 1;
+    sf->tx_size_search_method =
+        frame_is_intra_only(cm) ? USE_FULL_RD : USE_LARGESTALL;
+
+    sf->use_rd_breakout = 1;
+
+    sf->adaptive_motion_search = 1;
+    sf->adaptive_pred_interp_filter = 1;
+    sf->mv.auto_mv_step_size = 1;
+    sf->adaptive_rd_thresh = 2;
+    sf->intra_y_mode_mask[TX_32X32] = INTRA_DC_H_V;
+    sf->intra_uv_mode_mask[TX_32X32] = INTRA_DC_H_V;
+    sf->intra_uv_mode_mask[TX_16X16] = INTRA_DC_H_V;
+  }
+
+  if (speed >= 2) {
+    sf->mode_search_skip_flags =
+        (cm->frame_type == KEY_FRAME) ? 0 : FLAG_SKIP_INTRA_DIRMISMATCH |
+                                                FLAG_SKIP_INTRA_BESTINTER |
+                                                FLAG_SKIP_COMP_BESTINTRA |
+                                                FLAG_SKIP_INTRA_LOWVAR;
+    sf->adaptive_pred_interp_filter = 2;
+    sf->disable_filter_search_var_thresh = 50;
+    sf->comp_inter_joint_search_thresh = BLOCK_SIZES;
+    sf->auto_min_max_partition_size = RELAXED_NEIGHBORING_MIN_MAX;
+    sf->lf_motion_threshold = LOW_MOTION_THRESHOLD;
+    sf->adjust_partitioning_from_last_frame = 1;
+    sf->last_partitioning_redo_frequency = 3;
+    sf->use_lp32x32fdct = 1;
+    sf->mode_skip_start = 11;
+    sf->intra_y_mode_mask[TX_16X16] = INTRA_DC_H_V;
+  }
+
+  if (speed >= 3) {
+    sf->use_square_partition_only = 1;
+    sf->disable_filter_search_var_thresh = 100;
+    sf->use_uv_intra_rd_estimate = 1;
+    sf->mv.subpel_iters_per_step = 1;
+    sf->adaptive_rd_thresh = 4;
+    sf->mode_skip_start = 6;
+    sf->optimize_coefficients = 0;
+    sf->disable_split_mask = DISABLE_ALL_SPLIT;
+    sf->lpf_pick = LPF_PICK_FROM_Q;
+  }
+
+  if (speed >= 4) {
+    int i;
+    sf->last_partitioning_redo_frequency = 4;
+    sf->adaptive_rd_thresh = 5;
+    sf->use_fast_coef_costing = 0;
+    sf->auto_min_max_partition_size = STRICT_NEIGHBORING_MIN_MAX;
+    sf->adjust_partitioning_from_last_frame =
+        cm->last_frame_type != cm->frame_type ||
+        (0 == (frames_since_key + 1) % sf->last_partitioning_redo_frequency);
+    sf->mv.subpel_force_stop = 1;
+    for (i = 0; i < TX_SIZES; i++) {
+      sf->intra_y_mode_mask[i] = INTRA_DC_H_V;
+      sf->intra_uv_mode_mask[i] = INTRA_DC;
+    }
+    sf->intra_y_mode_mask[TX_32X32] = INTRA_DC;
+    sf->frame_parameter_update = 0;
+    sf->mv.search_method = FAST_HEX;
+
+    sf->inter_mode_mask[BLOCK_32X32] = INTER_NEAREST_NEAR_NEW;
+    sf->inter_mode_mask[BLOCK_32X64] = INTER_NEAREST;
+    sf->inter_mode_mask[BLOCK_64X32] = INTER_NEAREST;
+    sf->inter_mode_mask[BLOCK_64X64] = INTER_NEAREST;
+#if CONFIG_EXT_PARTITION
+    sf->inter_mode_mask[BLOCK_64X128] = INTER_NEAREST;
+    sf->inter_mode_mask[BLOCK_128X64] = INTER_NEAREST;
+    sf->inter_mode_mask[BLOCK_128X128] = INTER_NEAREST;
+#endif  // CONFIG_EXT_PARTITION
+    sf->max_intra_bsize = BLOCK_32X32;
+  }
+
+  if (speed >= 5) {
+    sf->auto_min_max_partition_size =
+        is_keyframe ? RELAXED_NEIGHBORING_MIN_MAX : STRICT_NEIGHBORING_MIN_MAX;
+    sf->default_max_partition_size = BLOCK_32X32;
+    sf->default_min_partition_size = BLOCK_8X8;
+    sf->force_frame_boost =
+        is_keyframe ||
+        (frames_since_key % (sf->last_partitioning_redo_frequency << 1) == 1);
+    sf->max_delta_qindex = is_keyframe ? 20 : 15;
+    sf->partition_search_type = REFERENCE_PARTITION;
+    sf->inter_mode_mask[BLOCK_32X32] = INTER_NEAREST_NEW_ZERO;
+    sf->inter_mode_mask[BLOCK_32X64] = INTER_NEAREST_NEW_ZERO;
+    sf->inter_mode_mask[BLOCK_64X32] = INTER_NEAREST_NEW_ZERO;
+    sf->inter_mode_mask[BLOCK_64X64] = INTER_NEAREST_NEW_ZERO;
+#if CONFIG_EXT_PARTITION
+    sf->inter_mode_mask[BLOCK_64X128] = INTER_NEAREST_NEW_ZERO;
+    sf->inter_mode_mask[BLOCK_128X64] = INTER_NEAREST_NEW_ZERO;
+    sf->inter_mode_mask[BLOCK_128X128] = INTER_NEAREST_NEW_ZERO;
+#endif  // CONFIG_EXT_PARTITION
+    sf->adaptive_rd_thresh = 2;
+    // This feature is only enabled when partition search is disabled.
+    sf->reuse_inter_pred_sby = 1;
+    sf->partition_search_breakout_rate_thr = 200;
+    sf->coeff_prob_appx_step = 4;
+    sf->use_fast_coef_updates = is_keyframe ? TWO_LOOP : ONE_LOOP_REDUCED;
+    sf->mode_search_skip_flags = FLAG_SKIP_INTRA_DIRMISMATCH;
+    sf->tx_size_search_method = is_keyframe ? USE_LARGESTALL : USE_TX_8X8;
+    sf->simple_model_rd_from_var = 1;
+
+    if (!is_keyframe) {
+      int i;
+      if (content == VPX_CONTENT_SCREEN) {
+        for (i = 0; i < BLOCK_SIZES; ++i)
+          sf->intra_y_mode_bsize_mask[i] = INTRA_DC_TM_H_V;
+      } else {
+        for (i = 0; i < BLOCK_SIZES; ++i)
+          if (i >= BLOCK_16X16)
+            sf->intra_y_mode_bsize_mask[i] = INTRA_DC;
+          else
+            // Use H and V intra mode for block sizes <= 16X16.
+            sf->intra_y_mode_bsize_mask[i] = INTRA_DC_H_V;
+      }
+    }
+  }
+
+  if (speed >= 6) {
+    // Adaptively switch between SOURCE_VAR_BASED_PARTITION and FIXED_PARTITION.
+    sf->partition_search_type = VAR_BASED_PARTITION;
+    // Turn on this to use non-RD key frame coding mode.
+    sf->mv.search_method = NSTEP;
+    sf->mv.reduce_first_step_size = 1;
+  }
+
+  if (speed >= 7) {
+    sf->adaptive_rd_thresh = 3;
+    sf->mv.search_method = FAST_DIAMOND;
+    sf->mv.fullpel_search_step_param = 10;
+  }
+  if (speed >= 8) {
+    sf->adaptive_rd_thresh = 4;
+    sf->mv.subpel_force_stop = 2;
+    sf->lpf_pick = LPF_PICK_MINIMAL_LPF;
+  }
+}
+
+void vp10_set_speed_features_framesize_dependent(VP10_COMP *cpi) {
+  SPEED_FEATURES *const sf = &cpi->sf;
+  const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+  RD_OPT *const rd = &cpi->rd;
+  int i;
+
+  if (oxcf->mode == REALTIME) {
+    set_rt_speed_feature_framesize_dependent(cpi, sf, oxcf->speed);
+  } else if (oxcf->mode == GOOD) {
+    set_good_speed_feature_framesize_dependent(cpi, sf, oxcf->speed);
+  }
+
+  if (sf->disable_split_mask == DISABLE_ALL_SPLIT) {
+    sf->adaptive_pred_interp_filter = 0;
+  }
+
+  if (cpi->encode_breakout && oxcf->mode == REALTIME &&
+      sf->encode_breakout_thresh > cpi->encode_breakout) {
+    cpi->encode_breakout = sf->encode_breakout_thresh;
+  }
+
+  // Check for masked out split cases.
+  for (i = 0; i < MAX_REFS; ++i) {
+    if (sf->disable_split_mask & (1 << i)) {
+      rd->thresh_mult_sub8x8[i] = INT_MAX;
+    }
+  }
+}
+
+void vp10_set_speed_features_framesize_independent(VP10_COMP *cpi) {
+  SPEED_FEATURES *const sf = &cpi->sf;
+  VP10_COMMON *const cm = &cpi->common;
+  MACROBLOCK *const x = &cpi->td.mb;
+  const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+  int i;
+
+  // best quality defaults
+  sf->frame_parameter_update = 1;
+  sf->mv.search_method = NSTEP;
+  sf->recode_loop = ALLOW_RECODE;
+  sf->mv.subpel_search_method = SUBPEL_TREE;
+  sf->mv.subpel_iters_per_step = 2;
+  sf->mv.subpel_force_stop = 0;
+  sf->optimize_coefficients = !is_lossless_requested(&cpi->oxcf);
+  sf->mv.reduce_first_step_size = 0;
+  sf->coeff_prob_appx_step = 1;
+  sf->mv.auto_mv_step_size = 0;
+  sf->mv.fullpel_search_step_param = 6;
+  sf->comp_inter_joint_search_thresh = BLOCK_4X4;
+  sf->adaptive_rd_thresh = 0;
+  sf->tx_size_search_method = USE_FULL_RD;
+  sf->use_lp32x32fdct = 0;
+  sf->adaptive_motion_search = 0;
+  sf->adaptive_pred_interp_filter = 0;
+  sf->adaptive_mode_search = 0;
+  sf->cb_pred_filter_search = 0;
+  sf->cb_partition_search = 0;
+  sf->alt_ref_search_fp = 0;
+  sf->partition_search_type = SEARCH_PARTITION;
+  sf->tx_type_search.prune_mode = NO_PRUNE;
+  sf->tx_type_search.fast_intra_tx_type_search = 0;
+  sf->tx_type_search.fast_inter_tx_type_search = 0;
+  sf->less_rectangular_check = 0;
+  sf->use_square_partition_only = 0;
+  sf->auto_min_max_partition_size = NOT_IN_USE;
+  sf->rd_auto_partition_min_limit = BLOCK_4X4;
+  sf->default_max_partition_size = BLOCK_LARGEST;
+  sf->default_min_partition_size = BLOCK_4X4;
+  sf->adjust_partitioning_from_last_frame = 0;
+  sf->last_partitioning_redo_frequency = 4;
+  sf->disable_split_mask = 0;
+  sf->mode_search_skip_flags = 0;
+  sf->force_frame_boost = 0;
+  sf->max_delta_qindex = 0;
+  sf->disable_filter_search_var_thresh = 0;
+  sf->adaptive_interp_filter_search = 0;
+  sf->allow_partition_search_skip = 0;
+#if CONFIG_EXT_TILE
+  sf->use_upsampled_references = 0;
+#else
+  sf->use_upsampled_references = 1;
+#endif  // CONFIG_EXT_TILE
+#if CONFIG_EXT_INTER
+  sf->disable_wedge_search_var_thresh = 0;
+  sf->fast_wedge_sign_estimate = 0;
+#endif  // CONFIG_EXT_INTER
+
+  for (i = 0; i < TX_SIZES; i++) {
+    sf->intra_y_mode_mask[i] = INTRA_ALL;
+    sf->intra_uv_mode_mask[i] = INTRA_ALL;
+  }
+  sf->use_rd_breakout = 0;
+  sf->use_uv_intra_rd_estimate = 0;
+  sf->lpf_pick = LPF_PICK_FROM_FULL_IMAGE;
+  sf->use_fast_coef_updates = TWO_LOOP;
+  sf->use_fast_coef_costing = 0;
+  sf->mode_skip_start = MAX_MODES;  // Mode index at which mode skip mask set
+  sf->schedule_mode_search = 0;
+  for (i = 0; i < BLOCK_SIZES; ++i) sf->inter_mode_mask[i] = INTER_ALL;
+  sf->max_intra_bsize = BLOCK_LARGEST;
+  sf->reuse_inter_pred_sby = 0;
+  // This setting only takes effect when partition_search_type is set
+  // to FIXED_PARTITION.
+  sf->always_this_block_size = BLOCK_16X16;
+  sf->search_type_check_frequency = 50;
+  sf->encode_breakout_thresh = 0;
+  // Recode loop tolerance %.
+  sf->recode_tolerance = 25;
+  sf->default_interp_filter = SWITCHABLE;
+  sf->tx_size_search_breakout = 0;
+  sf->partition_search_breakout_dist_thr = 0;
+  sf->partition_search_breakout_rate_thr = 0;
+  sf->simple_model_rd_from_var = 0;
+
+// Set this at the appropriate speed levels
+#if CONFIG_EXT_TILE
+  sf->use_transform_domain_distortion = 1;
+#else
+  sf->use_transform_domain_distortion = 0;
+#endif  // CONFIG_EXT_TILE
+
+  if (oxcf->mode == REALTIME)
+    set_rt_speed_feature(cpi, sf, oxcf->speed, oxcf->content);
+  else if (oxcf->mode == GOOD)
+    set_good_speed_feature(cpi, cm, sf, oxcf->speed);
+
+  // sf->partition_search_breakout_dist_thr is set assuming max 64x64
+  // blocks. Normalise this if the blocks are bigger.
+  if (MAX_SB_SIZE_LOG2 > 6) {
+    sf->partition_search_breakout_dist_thr <<= 2 * (MAX_SB_SIZE_LOG2 - 6);
+  }
+
+  cpi->full_search_sad = vp10_full_search_sad;
+  cpi->diamond_search_sad = vp10_diamond_search_sad;
+
+  sf->allow_exhaustive_searches = 1;
+  if (oxcf->mode == BEST) {
+    if (cpi->twopass.fr_content_type == FC_GRAPHICS_ANIMATION)
+      sf->exhaustive_searches_thresh = (1 << 20);
+    else
+      sf->exhaustive_searches_thresh = (1 << 21);
+    sf->max_exaustive_pct = 100;
+    for (i = 0; i < MAX_MESH_STEP; ++i) {
+      sf->mesh_patterns[i].range = best_quality_mesh_pattern[i].range;
+      sf->mesh_patterns[i].interval = best_quality_mesh_pattern[i].interval;
+    }
+  } else {
+    int speed = (oxcf->speed > MAX_MESH_SPEED) ? MAX_MESH_SPEED : oxcf->speed;
+    if (cpi->twopass.fr_content_type == FC_GRAPHICS_ANIMATION)
+      sf->exhaustive_searches_thresh = (1 << 22);
+    else
+      sf->exhaustive_searches_thresh = (1 << 23);
+    sf->max_exaustive_pct = good_quality_max_mesh_pct[speed];
+    if (speed > 0)
+      sf->exhaustive_searches_thresh = sf->exhaustive_searches_thresh << 1;
+
+    for (i = 0; i < MAX_MESH_STEP; ++i) {
+      sf->mesh_patterns[i].range = good_quality_mesh_patterns[speed][i].range;
+      sf->mesh_patterns[i].interval =
+          good_quality_mesh_patterns[speed][i].interval;
+    }
+  }
+
+  // Slow quant, dct and trellis not worthwhile for first pass
+  // so make sure they are always turned off.
+  if (oxcf->pass == 1) sf->optimize_coefficients = 0;
+
+  // No recode for 1 pass.
+  if (oxcf->pass == 0) {
+    sf->recode_loop = DISALLOW_RECODE;
+    sf->optimize_coefficients = 0;
+  }
+
+  if (sf->mv.subpel_search_method == SUBPEL_TREE) {
+    cpi->find_fractional_mv_step = vp10_find_best_sub_pixel_tree;
+  } else if (sf->mv.subpel_search_method == SUBPEL_TREE_PRUNED) {
+    cpi->find_fractional_mv_step = vp10_find_best_sub_pixel_tree_pruned;
+  } else if (sf->mv.subpel_search_method == SUBPEL_TREE_PRUNED_MORE) {
+    cpi->find_fractional_mv_step = vp10_find_best_sub_pixel_tree_pruned_more;
+  } else if (sf->mv.subpel_search_method == SUBPEL_TREE_PRUNED_EVENMORE) {
+    cpi->find_fractional_mv_step =
+        vp10_find_best_sub_pixel_tree_pruned_evenmore;
+  }
+
+#if !CONFIG_AOM_QM
+  x->optimize = sf->optimize_coefficients == 1 && oxcf->pass != 1;
+#else
+  // FIXME: trellis not very efficient for quantisation matrices
+  x->optimize = 0;
+#endif
+
+  x->min_partition_size = sf->default_min_partition_size;
+  x->max_partition_size = sf->default_max_partition_size;
+
+  if (!cpi->oxcf.frame_periodic_boost) {
+    sf->max_delta_qindex = 0;
+  }
+}
diff --git a/av1/encoder/speed_features.h b/av1/encoder/speed_features.h
new file mode 100644
index 0000000..2457c5b
--- /dev/null
+++ b/av1/encoder/speed_features.h
@@ -0,0 +1,494 @@
+/*
+ *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP10_ENCODER_SPEED_FEATURES_H_
+#define VP10_ENCODER_SPEED_FEATURES_H_
+
+#include "av1/common/enums.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+enum {
+  INTRA_ALL = (1 << DC_PRED) | (1 << V_PRED) | (1 << H_PRED) | (1 << D45_PRED) |
+              (1 << D135_PRED) | (1 << D117_PRED) | (1 << D153_PRED) |
+              (1 << D207_PRED) | (1 << D63_PRED) | (1 << TM_PRED),
+  INTRA_DC = (1 << DC_PRED),
+  INTRA_DC_TM = (1 << DC_PRED) | (1 << TM_PRED),
+  INTRA_DC_H_V = (1 << DC_PRED) | (1 << V_PRED) | (1 << H_PRED),
+  INTRA_DC_TM_H_V =
+      (1 << DC_PRED) | (1 << TM_PRED) | (1 << V_PRED) | (1 << H_PRED)
+};
+
+#if CONFIG_EXT_INTER
+enum {
+  INTER_ALL = (1 << NEARESTMV) | (1 << NEARMV) | (1 << ZEROMV) | (1 << NEWMV) |
+              (1 << NEWFROMNEARMV) | (1 << NEAREST_NEARESTMV) |
+              (1 << NEAR_NEARMV) | (1 << NEAREST_NEARMV) |
+              (1 << NEAR_NEARESTMV) | (1 << NEW_NEWMV) | (1 << NEAREST_NEWMV) |
+              (1 << NEAR_NEWMV) | (1 << NEW_NEARMV) | (1 << NEW_NEARESTMV) |
+              (1 << ZERO_ZEROMV),
+  INTER_NEAREST = (1 << NEARESTMV) | (1 << NEAREST_NEARESTMV) |
+                  (1 << NEAREST_NEARMV) | (1 << NEAR_NEARESTMV) |
+                  (1 << NEW_NEARESTMV) | (1 << NEAREST_NEWMV),
+  INTER_NEAREST_NEW = (1 << NEARESTMV) | (1 << NEWMV) | (1 << NEWFROMNEARMV) |
+                      (1 << NEAREST_NEARESTMV) | (1 << NEW_NEWMV) |
+                      (1 << NEAR_NEARESTMV) | (1 << NEAREST_NEARMV) |
+                      (1 << NEW_NEARESTMV) | (1 << NEAREST_NEWMV) |
+                      (1 << NEW_NEARMV) | (1 << NEAR_NEWMV),
+  INTER_NEAREST_ZERO = (1 << NEARESTMV) | (1 << ZEROMV) |
+                       (1 << NEAREST_NEARESTMV) | (1 << ZERO_ZEROMV) |
+                       (1 << NEAREST_NEARMV) | (1 << NEAR_NEARESTMV) |
+                       (1 << NEAREST_NEWMV) | (1 << NEW_NEARESTMV),
+  INTER_NEAREST_NEW_ZERO =
+      (1 << NEARESTMV) | (1 << ZEROMV) | (1 << NEWMV) | (1 << NEWFROMNEARMV) |
+      (1 << NEAREST_NEARESTMV) | (1 << ZERO_ZEROMV) | (1 << NEW_NEWMV) |
+      (1 << NEAREST_NEARMV) | (1 << NEAR_NEARESTMV) | (1 << NEW_NEARESTMV) |
+      (1 << NEAREST_NEWMV) | (1 << NEW_NEARMV) | (1 << NEAR_NEWMV),
+  INTER_NEAREST_NEAR_NEW =
+      (1 << NEARESTMV) | (1 << NEARMV) | (1 << NEWMV) | (1 << NEWFROMNEARMV) |
+      (1 << NEAREST_NEARESTMV) | (1 << NEW_NEWMV) | (1 << NEAREST_NEARMV) |
+      (1 << NEAR_NEARESTMV) | (1 << NEW_NEARESTMV) | (1 << NEAREST_NEWMV) |
+      (1 << NEW_NEARMV) | (1 << NEAR_NEWMV) | (1 << NEAR_NEARMV),
+  INTER_NEAREST_NEAR_ZERO =
+      (1 << NEARESTMV) | (1 << NEARMV) | (1 << ZEROMV) |
+      (1 << NEAREST_NEARESTMV) | (1 << ZERO_ZEROMV) | (1 << NEAREST_NEARMV) |
+      (1 << NEAR_NEARESTMV) | (1 << NEAREST_NEWMV) | (1 << NEW_NEARESTMV) |
+      (1 << NEW_NEARMV) | (1 << NEAR_NEWMV) | (1 << NEAR_NEARMV),
+};
+#else
+enum {
+  INTER_ALL = (1 << NEARESTMV) | (1 << NEARMV) | (1 << ZEROMV) | (1 << NEWMV),
+  INTER_NEAREST = (1 << NEARESTMV),
+  INTER_NEAREST_NEW = (1 << NEARESTMV) | (1 << NEWMV),
+  INTER_NEAREST_ZERO = (1 << NEARESTMV) | (1 << ZEROMV),
+  INTER_NEAREST_NEW_ZERO = (1 << NEARESTMV) | (1 << ZEROMV) | (1 << NEWMV),
+  INTER_NEAREST_NEAR_NEW = (1 << NEARESTMV) | (1 << NEARMV) | (1 << NEWMV),
+  INTER_NEAREST_NEAR_ZERO = (1 << NEARESTMV) | (1 << NEARMV) | (1 << ZEROMV),
+};
+#endif  // CONFIG_EXT_INTER
+
+enum {
+  DISABLE_ALL_INTER_SPLIT = (1 << THR_COMP_GA) | (1 << THR_COMP_LA) |
+                            (1 << THR_ALTR) | (1 << THR_GOLD) | (1 << THR_LAST),
+
+  DISABLE_ALL_SPLIT = (1 << THR_INTRA) | DISABLE_ALL_INTER_SPLIT,
+
+  DISABLE_COMPOUND_SPLIT = (1 << THR_COMP_GA) | (1 << THR_COMP_LA),
+
+  LAST_AND_INTRA_SPLIT_ONLY = (1 << THR_COMP_GA) | (1 << THR_COMP_LA) |
+                              (1 << THR_ALTR) | (1 << THR_GOLD)
+};
+
+typedef enum {
+  DIAMOND = 0,
+  NSTEP = 1,
+  HEX = 2,
+  BIGDIA = 3,
+  SQUARE = 4,
+  FAST_HEX = 5,
+  FAST_DIAMOND = 6
+} SEARCH_METHODS;
+
+typedef enum {
+  // No recode.
+  DISALLOW_RECODE = 0,
+  // Allow recode for KF and exceeding maximum frame bandwidth.
+  ALLOW_RECODE_KFMAXBW = 1,
+  // Allow recode only for KF/ARF/GF frames.
+  ALLOW_RECODE_KFARFGF = 2,
+  // Allow recode for all frames based on bitrate constraints.
+  ALLOW_RECODE = 3,
+} RECODE_LOOP_TYPE;
+
+typedef enum {
+  SUBPEL_TREE = 0,
+  SUBPEL_TREE_PRUNED = 1,           // Prunes 1/2-pel searches
+  SUBPEL_TREE_PRUNED_MORE = 2,      // Prunes 1/2-pel searches more aggressively
+  SUBPEL_TREE_PRUNED_EVENMORE = 3,  // Prunes 1/2- and 1/4-pel searches
+  // Other methods to come
+} SUBPEL_SEARCH_METHODS;
+
+typedef enum {
+  NO_MOTION_THRESHOLD = 0,
+  LOW_MOTION_THRESHOLD = 7
+} MOTION_THRESHOLD;
+
+typedef enum {
+  USE_FULL_RD = 0,
+  USE_LARGESTALL,
+  USE_TX_8X8
+} TX_SIZE_SEARCH_METHOD;
+
+typedef enum {
+  NOT_IN_USE = 0,
+  RELAXED_NEIGHBORING_MIN_MAX = 1,
+  STRICT_NEIGHBORING_MIN_MAX = 2
+} AUTO_MIN_MAX_MODE;
+
+typedef enum {
+  // Try the full image with different values.
+  LPF_PICK_FROM_FULL_IMAGE,
+  // Try a small portion of the image with different values.
+  LPF_PICK_FROM_SUBIMAGE,
+  // Estimate the level based on quantizer and frame type
+  LPF_PICK_FROM_Q,
+  // Pick 0 to disable LPF if LPF was enabled last frame
+  LPF_PICK_MINIMAL_LPF
+} LPF_PICK_METHOD;
+
+typedef enum {
+  // Terminate search early based on distortion so far compared to
+  // qp step, distortion in the neighborhood of the frame, etc.
+  FLAG_EARLY_TERMINATE = 1 << 0,
+
+  // Skips comp inter modes if the best so far is an intra mode.
+  FLAG_SKIP_COMP_BESTINTRA = 1 << 1,
+
+  // Skips oblique intra modes if the best so far is an inter mode.
+  FLAG_SKIP_INTRA_BESTINTER = 1 << 3,
+
+  // Skips oblique intra modes  at angles 27, 63, 117, 153 if the best
+  // intra so far is not one of the neighboring directions.
+  FLAG_SKIP_INTRA_DIRMISMATCH = 1 << 4,
+
+  // Skips intra modes other than DC_PRED if the source variance is small
+  FLAG_SKIP_INTRA_LOWVAR = 1 << 5,
+} MODE_SEARCH_SKIP_LOGIC;
+
+typedef enum {
+  FLAG_SKIP_EIGHTTAP_REGULAR = 1 << EIGHTTAP_REGULAR,
+  FLAG_SKIP_EIGHTTAP_SMOOTH = 1 << EIGHTTAP_SMOOTH,
+  FLAG_SKIP_MULTITAP_SHARP = 1 << MULTITAP_SHARP,
+} INTERP_FILTER_MASK;
+
+typedef enum {
+  NO_PRUNE = 0,
+  // eliminates one tx type in vertical and horizontal direction
+  PRUNE_ONE = 1,
+#if CONFIG_EXT_TX
+  // eliminates two tx types in each direction
+  PRUNE_TWO = 2,
+#endif
+} TX_TYPE_PRUNE_MODE;
+
+typedef struct {
+  TX_TYPE_PRUNE_MODE prune_mode;
+  int fast_intra_tx_type_search;
+  int fast_inter_tx_type_search;
+} TX_TYPE_SEARCH;
+
+typedef enum {
+  // Search partitions using RD criterion
+  SEARCH_PARTITION,
+
+  // Always use a fixed size partition
+  FIXED_PARTITION,
+
+  REFERENCE_PARTITION,
+
+  // Use an arbitrary partitioning scheme based on source variance within
+  // a 64X64 SB
+  VAR_BASED_PARTITION,
+
+  // Use non-fixed partitions based on source variance
+  SOURCE_VAR_BASED_PARTITION
+} PARTITION_SEARCH_TYPE;
+
+typedef enum {
+  // Does a dry run to see if any of the contexts need to be updated or not,
+  // before the final run.
+  TWO_LOOP = 0,
+
+  // No dry run, also only half the coef contexts and bands are updated.
+  // The rest are not updated at all.
+  ONE_LOOP_REDUCED = 1
+} FAST_COEFF_UPDATE;
+
+typedef struct MV_SPEED_FEATURES {
+  // Motion search method (Diamond, NSTEP, Hex, Big Diamond, Square, etc).
+  SEARCH_METHODS search_method;
+
+  // This parameter controls which step in the n-step process we start at.
+  // It's changed adaptively based on circumstances.
+  int reduce_first_step_size;
+
+  // If this is set to 1, we limit the motion search range to 2 times the
+  // largest motion vector found in the last frame.
+  int auto_mv_step_size;
+
+  // Subpel_search_method can only be subpel_tree which does a subpixel
+  // logarithmic search that keeps stepping at 1/2 pixel units until
+  // you stop getting a gain, and then goes on to 1/4 and repeats
+  // the same process. Along the way it skips many diagonals.
+  SUBPEL_SEARCH_METHODS subpel_search_method;
+
+  // Maximum number of steps in logarithmic subpel search before giving up.
+  int subpel_iters_per_step;
+
+  // Control when to stop subpel search
+  int subpel_force_stop;
+
+  // This variable sets the step_param used in full pel motion search.
+  int fullpel_search_step_param;
+} MV_SPEED_FEATURES;
+
+#define MAX_MESH_STEP 4
+
+typedef struct MESH_PATTERN {
+  int range;
+  int interval;
+} MESH_PATTERN;
+
+typedef struct SPEED_FEATURES {
+  MV_SPEED_FEATURES mv;
+
+  // Frame level coding parameter update
+  int frame_parameter_update;
+
+  RECODE_LOOP_TYPE recode_loop;
+
+  // Trellis (dynamic programming) optimization of quantized values (+1, 0).
+  int optimize_coefficients;
+
+  // Always set to 0. If on it enables 0 cost background transmission
+  // (except for the initial transmission of the segmentation). The feature is
+  // disabled because the addition of very large block sizes make the
+  // backgrounds very to cheap to encode, and the segmentation we have
+  // adds overhead.
+  int static_segmentation;
+
+  // If 1 we iterate finding a best reference for 2 ref frames together - via
+  // a log search that iterates 4 times (check around mv for last for best
+  // error of combined predictor then check around mv for alt). If 0 we
+  // we just use the best motion vector found for each frame by itself.
+  BLOCK_SIZE comp_inter_joint_search_thresh;
+
+  // This variable is used to cap the maximum number of times we skip testing a
+  // mode to be evaluated. A high value means we will be faster.
+  int adaptive_rd_thresh;
+
+  // Coefficient probability model approximation step size
+  int coeff_prob_appx_step;
+
+  // The threshold is to determine how slow the motino is, it is used when
+  // use_lastframe_partitioning is set to LAST_FRAME_PARTITION_LOW_MOTION
+  MOTION_THRESHOLD lf_motion_threshold;
+
+  // Determine which method we use to determine transform size. We can choose
+  // between options like full rd, largest for prediction size, largest
+  // for intra and model coefs for the rest.
+  TX_SIZE_SEARCH_METHOD tx_size_search_method;
+
+  // Low precision 32x32 fdct keeps everything in 16 bits and thus is less
+  // precise but significantly faster than the non lp version.
+  int use_lp32x32fdct;
+
+  // After looking at the first set of modes (set by index here), skip
+  // checking modes for reference frames that don't match the reference frame
+  // of the best so far.
+  int mode_skip_start;
+
+  PARTITION_SEARCH_TYPE partition_search_type;
+
+  TX_TYPE_SEARCH tx_type_search;
+
+  // Used if partition_search_type = FIXED_SIZE_PARTITION
+  BLOCK_SIZE always_this_block_size;
+
+  // Skip rectangular partition test when partition type none gives better
+  // rd than partition type split.
+  int less_rectangular_check;
+
+  // Disable testing non square partitions. (eg 16x32)
+  int use_square_partition_only;
+
+  // Sets min and max partition sizes for this superblock based on the
+  // same superblock in last encoded frame, and the left and above neighbor.
+  AUTO_MIN_MAX_MODE auto_min_max_partition_size;
+  // Ensures the rd based auto partition search will always
+  // go down at least to the specified level.
+  BLOCK_SIZE rd_auto_partition_min_limit;
+
+  // Min and max partition size we enable (block_size) as per auto
+  // min max, but also used by adjust partitioning, and pick_partitioning.
+  BLOCK_SIZE default_min_partition_size;
+  BLOCK_SIZE default_max_partition_size;
+
+  // Whether or not we allow partitions one smaller or one greater than the last
+  // frame's partitioning. Only used if use_lastframe_partitioning is set.
+  int adjust_partitioning_from_last_frame;
+
+  // How frequently we re do the partitioning from scratch. Only used if
+  // use_lastframe_partitioning is set.
+  int last_partitioning_redo_frequency;
+
+  // Disables sub 8x8 blocksizes in different scenarios: Choices are to disable
+  // it always, to allow it for only Last frame and Intra, disable it for all
+  // inter modes or to enable it always.
+  int disable_split_mask;
+
+  // TODO(jingning): combine the related motion search speed features
+  // This allows us to use motion search at other sizes as a starting
+  // point for this motion search and limits the search range around it.
+  int adaptive_motion_search;
+
+  // Flag for allowing some use of exhaustive searches;
+  int allow_exhaustive_searches;
+
+  // Threshold for allowing exhaistive motion search.
+  int exhaustive_searches_thresh;
+
+  // Maximum number of exhaustive searches for a frame.
+  int max_exaustive_pct;
+
+  // Pattern to be used for any exhaustive mesh searches.
+  MESH_PATTERN mesh_patterns[MAX_MESH_STEP];
+
+  int schedule_mode_search;
+
+  // Allows sub 8x8 modes to use the prediction filter that was determined
+  // best for 8x8 mode. If set to 0 we always re check all the filters for
+  // sizes less than 8x8, 1 means we check all filter modes if no 8x8 filter
+  // was selected, and 2 means we use 8 tap if no 8x8 filter mode was selected.
+  int adaptive_pred_interp_filter;
+
+  // Adaptive prediction mode search
+  int adaptive_mode_search;
+
+  // Chessboard pattern prediction filter type search
+  int cb_pred_filter_search;
+
+  int cb_partition_search;
+
+  int alt_ref_search_fp;
+
+  // Use finer quantizer in every other few frames that run variable block
+  // partition type search.
+  int force_frame_boost;
+
+  // Maximally allowed base quantization index fluctuation.
+  int max_delta_qindex;
+
+  // Implements various heuristics to skip searching modes
+  // The heuristics selected are based on  flags
+  // defined in the MODE_SEARCH_SKIP_HEURISTICS enum
+  unsigned int mode_search_skip_flags;
+
+  // A source variance threshold below which filter search is disabled
+  // Choose a very large value (UINT_MAX) to use 8-tap always
+  unsigned int disable_filter_search_var_thresh;
+
+#if CONFIG_EXT_INTER
+  // A source variance threshold below which wedge search is disabled
+  unsigned int disable_wedge_search_var_thresh;
+
+  // Whether fast wedge sign estimate is used
+  int fast_wedge_sign_estimate;
+#endif  // CONFIG_EXT_INTER
+
+  // These bit masks allow you to enable or disable intra modes for each
+  // transform size separately.
+  int intra_y_mode_mask[TX_SIZES];
+  int intra_uv_mode_mask[TX_SIZES];
+
+  // These bit masks allow you to enable or disable intra modes for each
+  // prediction block size separately.
+  int intra_y_mode_bsize_mask[BLOCK_SIZES];
+
+  // This variable enables an early break out of mode testing if the model for
+  // rd built from the prediction signal indicates a value that's much
+  // higher than the best rd we've seen so far.
+  int use_rd_breakout;
+
+  // This enables us to use an estimate for intra rd based on dc mode rather
+  // than choosing an actual uv mode in the stage of encoding before the actual
+  // final encode.
+  int use_uv_intra_rd_estimate;
+
+  // This feature controls how the loop filter level is determined.
+  LPF_PICK_METHOD lpf_pick;
+
+  // This feature limits the number of coefficients updates we actually do
+  // by only looking at counts from 1/2 the bands.
+  FAST_COEFF_UPDATE use_fast_coef_updates;
+
+  // A binary mask indicating if NEARESTMV, NEARMV, ZEROMV, NEWMV
+  // modes are used in order from LSB to MSB for each BLOCK_SIZE.
+  int inter_mode_mask[BLOCK_SIZES];
+
+  // This feature controls whether we do the expensive context update and
+  // calculation in the rd coefficient costing loop.
+  int use_fast_coef_costing;
+
+  // This feature controls the tolerence vs target used in deciding whether to
+  // recode a frame. It has no meaning if recode is disabled.
+  int recode_tolerance;
+
+  // This variable controls the maximum block size where intra blocks can be
+  // used in inter frames.
+  // TODO(aconverse): Fold this into one of the other many mode skips
+  BLOCK_SIZE max_intra_bsize;
+
+  // The frequency that we check if SOURCE_VAR_BASED_PARTITION or
+  // FIXED_PARTITION search type should be used.
+  int search_type_check_frequency;
+
+  // When partition is pre-set, the inter prediction result from pick_inter_mode
+  // can be reused in final block encoding process. It is enabled only for real-
+  // time mode speed 6.
+  int reuse_inter_pred_sby;
+
+  // This variable sets the encode_breakout threshold. Currently, it is only
+  // enabled in real time mode.
+  int encode_breakout_thresh;
+
+  // default interp filter choice
+  INTERP_FILTER default_interp_filter;
+
+  // Early termination in transform size search, which only applies while
+  // tx_size_search_method is USE_FULL_RD.
+  int tx_size_search_breakout;
+
+  // adaptive interp_filter search to allow skip of certain filter types.
+  int adaptive_interp_filter_search;
+
+  // mask for skip evaluation of certain interp_filter type.
+  INTERP_FILTER_MASK interp_filter_search_mask;
+
+  // Partition search early breakout thresholds.
+  int64_t partition_search_breakout_dist_thr;
+  int partition_search_breakout_rate_thr;
+
+  // Allow skipping partition search for still image frame
+  int allow_partition_search_skip;
+
+  // Fast approximation of vp10_model_rd_from_var_lapndz
+  int simple_model_rd_from_var;
+
+  // Do sub-pixel search in up-sampled reference frames
+  int use_upsampled_references;
+
+  // Whether to compute distortion in the image domain (slower but
+  // more accurate), or in the transform domain (faster but less acurate).
+  int use_transform_domain_distortion;
+} SPEED_FEATURES;
+
+struct VP10_COMP;
+
+void vp10_set_speed_features_framesize_independent(struct VP10_COMP *cpi);
+void vp10_set_speed_features_framesize_dependent(struct VP10_COMP *cpi);
+
+#ifdef __cplusplus
+}  // extern "C"
+#endif
+
+#endif  // VP10_ENCODER_SPEED_FEATURES_H_
diff --git a/av1/encoder/subexp.c b/av1/encoder/subexp.c
new file mode 100644
index 0000000..d722654
--- /dev/null
+++ b/av1/encoder/subexp.c
@@ -0,0 +1,288 @@
+/*
+ *  Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+#include "av1/encoder/bitwriter.h"
+
+#include "av1/common/common.h"
+#include "av1/common/entropy.h"
+#include "av1/encoder/cost.h"
+#include "av1/encoder/subexp.h"
+
+#define vp10_cost_upd256 ((int)(vp10_cost_one(upd) - vp10_cost_zero(upd)))
+
+static const uint8_t update_bits[255] = {
+  5,  5,  5,  5,  5,  5,  5,  5,  5,  5,  5,  5,  5,  5,  5,  5,  6,  6,  6,
+  6,  6,  6,  6,  6,  6,  6,  6,  6,  6,  6,  6,  6,  8,  8,  8,  8,  8,  8,
+  8,  8,  8,  8,  8,  8,  8,  8,  8,  8,  8,  8,  8,  8,  8,  8,  8,  8,  8,
+  8,  8,  8,  8,  8,  8,  8,  10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+  10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+  10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+  10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11,
+  11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+  11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+  11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+  11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+  11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+  11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+  11, 11, 11, 11, 11, 11, 11, 0,
+};
+
+static int recenter_nonneg(int v, int m) {
+  if (v > (m << 1))
+    return v;
+  else if (v >= m)
+    return ((v - m) << 1);
+  else
+    return ((m - v) << 1) - 1;
+}
+
+static int remap_prob(int v, int m) {
+  int i;
+  static const uint8_t map_table[MAX_PROB - 1] = {
+    // generated by:
+    //   map_table[j] = split_index(j, MAX_PROB - 1, MODULUS_PARAM);
+    20,  21,  22,  23,  24,  25,  0,   26,  27,  28,  29,  30,  31,  32,  33,
+    34,  35,  36,  37,  1,   38,  39,  40,  41,  42,  43,  44,  45,  46,  47,
+    48,  49,  2,   50,  51,  52,  53,  54,  55,  56,  57,  58,  59,  60,  61,
+    3,   62,  63,  64,  65,  66,  67,  68,  69,  70,  71,  72,  73,  4,   74,
+    75,  76,  77,  78,  79,  80,  81,  82,  83,  84,  85,  5,   86,  87,  88,
+    89,  90,  91,  92,  93,  94,  95,  96,  97,  6,   98,  99,  100, 101, 102,
+    103, 104, 105, 106, 107, 108, 109, 7,   110, 111, 112, 113, 114, 115, 116,
+    117, 118, 119, 120, 121, 8,   122, 123, 124, 125, 126, 127, 128, 129, 130,
+    131, 132, 133, 9,   134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144,
+    145, 10,  146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 11,
+    158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 12,  170, 171,
+    172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 13,  182, 183, 184, 185,
+    186, 187, 188, 189, 190, 191, 192, 193, 14,  194, 195, 196, 197, 198, 199,
+    200, 201, 202, 203, 204, 205, 15,  206, 207, 208, 209, 210, 211, 212, 213,
+    214, 215, 216, 217, 16,  218, 219, 220, 221, 222, 223, 224, 225, 226, 227,
+    228, 229, 17,  230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241,
+    18,  242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 19,
+  };
+  v--;
+  m--;
+  if ((m << 1) <= MAX_PROB)
+    i = recenter_nonneg(v, m) - 1;
+  else
+    i = recenter_nonneg(MAX_PROB - 1 - v, MAX_PROB - 1 - m) - 1;
+
+  i = map_table[i];
+  return i;
+}
+
+static int prob_diff_update_cost(vpx_prob newp, vpx_prob oldp) {
+  int delp = remap_prob(newp, oldp);
+  return update_bits[delp] << VP10_PROB_COST_SHIFT;
+}
+
+static void encode_uniform(vp10_writer *w, int v) {
+  const int l = 8;
+  const int m = (1 << l) - 190;
+  if (v < m) {
+    vp10_write_literal(w, v, l - 1);
+  } else {
+    vp10_write_literal(w, m + ((v - m) >> 1), l - 1);
+    vp10_write_literal(w, (v - m) & 1, 1);
+  }
+}
+
+static INLINE int write_bit_gte(vp10_writer *w, int word, int test) {
+  vp10_write_literal(w, word >= test, 1);
+  return word >= test;
+}
+
+static void encode_term_subexp(vp10_writer *w, int word) {
+  if (!write_bit_gte(w, word, 16)) {
+    vp10_write_literal(w, word, 4);
+  } else if (!write_bit_gte(w, word, 32)) {
+    vp10_write_literal(w, word - 16, 4);
+  } else if (!write_bit_gte(w, word, 64)) {
+    vp10_write_literal(w, word - 32, 5);
+  } else {
+    encode_uniform(w, word - 64);
+  }
+}
+
+void vp10_write_prob_diff_update(vp10_writer *w, vpx_prob newp, vpx_prob oldp) {
+  const int delp = remap_prob(newp, oldp);
+  encode_term_subexp(w, delp);
+}
+
+int vp10_prob_diff_update_savings_search(const unsigned int *ct, vpx_prob oldp,
+                                         vpx_prob *bestp, vpx_prob upd) {
+  const int old_b = cost_branch256(ct, oldp);
+  int bestsavings = 0;
+  vpx_prob newp, bestnewp = oldp;
+  const int step = *bestp > oldp ? -1 : 1;
+
+  for (newp = *bestp; newp != oldp; newp += step) {
+    const int new_b = cost_branch256(ct, newp);
+    const int update_b = prob_diff_update_cost(newp, oldp) + vp10_cost_upd256;
+    const int savings = old_b - new_b - update_b;
+    if (savings > bestsavings) {
+      bestsavings = savings;
+      bestnewp = newp;
+    }
+  }
+  *bestp = bestnewp;
+  return bestsavings;
+}
+
+int vp10_prob_diff_update_savings_search_model(const unsigned int *ct,
+                                               const vpx_prob *oldp,
+                                               vpx_prob *bestp, vpx_prob upd,
+                                               int stepsize) {
+  int i, old_b, new_b, update_b, savings, bestsavings;
+  int newp;
+  const int step_sign = *bestp > oldp[PIVOT_NODE] ? -1 : 1;
+  const int step = stepsize * step_sign;
+  vpx_prob bestnewp, newplist[ENTROPY_NODES], oldplist[ENTROPY_NODES];
+  vp10_model_to_full_probs(oldp, oldplist);
+  memcpy(newplist, oldp, sizeof(vpx_prob) * UNCONSTRAINED_NODES);
+  for (i = UNCONSTRAINED_NODES, old_b = 0; i < ENTROPY_NODES; ++i)
+    old_b += cost_branch256(ct + 2 * i, oldplist[i]);
+  old_b += cost_branch256(ct + 2 * PIVOT_NODE, oldplist[PIVOT_NODE]);
+
+  bestsavings = 0;
+  bestnewp = oldp[PIVOT_NODE];
+
+  assert(stepsize > 0);
+
+  for (newp = *bestp; (newp - oldp[PIVOT_NODE]) * step_sign < 0; newp += step) {
+    if (newp < 1 || newp > 255) continue;
+    newplist[PIVOT_NODE] = newp;
+    vp10_model_to_full_probs(newplist, newplist);
+    for (i = UNCONSTRAINED_NODES, new_b = 0; i < ENTROPY_NODES; ++i)
+      new_b += cost_branch256(ct + 2 * i, newplist[i]);
+    new_b += cost_branch256(ct + 2 * PIVOT_NODE, newplist[PIVOT_NODE]);
+    update_b = prob_diff_update_cost(newp, oldp[PIVOT_NODE]) + vp10_cost_upd256;
+    savings = old_b - new_b - update_b;
+    if (savings > bestsavings) {
+      bestsavings = savings;
+      bestnewp = newp;
+    }
+  }
+
+  *bestp = bestnewp;
+  return bestsavings;
+}
+
+#if CONFIG_ENTROPY
+static int get_cost(unsigned int ct[][2], vpx_prob p, int n) {
+  int i, p0 = p;
+  unsigned int total_ct[2] = { 0, 0 };
+  int cost = 0;
+
+  for (i = 0; i <= n; ++i) {
+    cost += cost_branch256(ct[i], p);
+    total_ct[0] += ct[i][0];
+    total_ct[1] += ct[i][1];
+    if (i < n)
+      p = vp10_merge_probs(p0, total_ct, COEF_COUNT_SAT_BITS,
+                           COEF_MAX_UPDATE_FACTOR_BITS);
+  }
+  return cost;
+}
+
+int vp10_prob_update_search_subframe(unsigned int ct[][2], vpx_prob oldp,
+                                     vpx_prob *bestp, vpx_prob upd, int n) {
+  const int old_b = get_cost(ct, oldp, n);
+  int bestsavings = 0;
+  vpx_prob newp, bestnewp = oldp;
+  const int step = *bestp > oldp ? -1 : 1;
+
+  for (newp = *bestp; newp != oldp; newp += step) {
+    const int new_b = get_cost(ct, newp, n);
+    const int update_b = prob_diff_update_cost(newp, oldp) + vp10_cost_upd256;
+    const int savings = old_b - new_b - update_b;
+    if (savings > bestsavings) {
+      bestsavings = savings;
+      bestnewp = newp;
+    }
+  }
+  *bestp = bestnewp;
+  return bestsavings;
+}
+
+int vp10_prob_update_search_model_subframe(
+    unsigned int ct[ENTROPY_NODES][COEF_PROBS_BUFS][2], const vpx_prob *oldp,
+    vpx_prob *bestp, vpx_prob upd, int stepsize, int n) {
+  int i, old_b, new_b, update_b, savings, bestsavings;
+  int newp;
+  const int step_sign = *bestp > oldp[PIVOT_NODE] ? -1 : 1;
+  const int step = stepsize * step_sign;
+  vpx_prob bestnewp, newplist[ENTROPY_NODES], oldplist[ENTROPY_NODES];
+  vp10_model_to_full_probs(oldp, oldplist);
+  memcpy(newplist, oldp, sizeof(vpx_prob) * UNCONSTRAINED_NODES);
+  for (i = UNCONSTRAINED_NODES, old_b = 0; i < ENTROPY_NODES; ++i)
+    old_b += get_cost(ct[i], oldplist[i], n);
+  old_b += get_cost(ct[PIVOT_NODE], oldplist[PIVOT_NODE], n);
+
+  bestsavings = 0;
+  bestnewp = oldp[PIVOT_NODE];
+
+  assert(stepsize > 0);
+
+  for (newp = *bestp; (newp - oldp[PIVOT_NODE]) * step_sign < 0; newp += step) {
+    if (newp < 1 || newp > 255) continue;
+    newplist[PIVOT_NODE] = newp;
+    vp10_model_to_full_probs(newplist, newplist);
+    for (i = UNCONSTRAINED_NODES, new_b = 0; i < ENTROPY_NODES; ++i)
+      new_b += get_cost(ct[i], newplist[i], n);
+    new_b += get_cost(ct[PIVOT_NODE], newplist[PIVOT_NODE], n);
+    update_b = prob_diff_update_cost(newp, oldp[PIVOT_NODE]) + vp10_cost_upd256;
+    savings = old_b - new_b - update_b;
+    if (savings > bestsavings) {
+      bestsavings = savings;
+      bestnewp = newp;
+    }
+  }
+
+  *bestp = bestnewp;
+  return bestsavings;
+}
+#endif  // CONFIG_ENTROPY
+
+void vp10_cond_prob_diff_update(vp10_writer *w, vpx_prob *oldp,
+                                const unsigned int ct[2]) {
+  const vpx_prob upd = DIFF_UPDATE_PROB;
+  vpx_prob newp = get_binary_prob(ct[0], ct[1]);
+  const int savings =
+      vp10_prob_diff_update_savings_search(ct, *oldp, &newp, upd);
+  assert(newp >= 1);
+  if (savings > 0) {
+    vp10_write(w, 1, upd);
+    vp10_write_prob_diff_update(w, newp, *oldp);
+    *oldp = newp;
+  } else {
+    vp10_write(w, 0, upd);
+  }
+}
+
+int vp10_cond_prob_diff_update_savings(vpx_prob *oldp,
+                                       const unsigned int ct[2]) {
+  const vpx_prob upd = DIFF_UPDATE_PROB;
+  vpx_prob newp = get_binary_prob(ct[0], ct[1]);
+  const int savings =
+      vp10_prob_diff_update_savings_search(ct, *oldp, &newp, upd);
+  return savings;
+}
+
+void vp10_write_primitive_symmetric(vp10_writer *w, int word,
+                                    unsigned int abs_bits) {
+  if (word == 0) {
+    vp10_write_bit(w, 0);
+  } else {
+    const int x = abs(word);
+    const int s = word < 0;
+    vp10_write_bit(w, 1);
+    vp10_write_bit(w, s);
+    vp10_write_literal(w, x - 1, abs_bits);
+  }
+}
diff --git a/av1/encoder/subexp.h b/av1/encoder/subexp.h
new file mode 100644
index 0000000..82ce2e0
--- /dev/null
+++ b/av1/encoder/subexp.h
@@ -0,0 +1,57 @@
+/*
+ *  Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP10_ENCODER_SUBEXP_H_
+#define VP10_ENCODER_SUBEXP_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "aom_dsp/prob.h"
+
+struct vp10_writer;
+
+void vp10_write_prob_diff_update(struct vp10_writer *w, vpx_prob newp,
+                                 vpx_prob oldp);
+
+void vp10_cond_prob_diff_update(struct vp10_writer *w, vpx_prob *oldp,
+                                const unsigned int ct[2]);
+
+int vp10_prob_diff_update_savings_search(const unsigned int *ct, vpx_prob oldp,
+                                         vpx_prob *bestp, vpx_prob upd);
+
+int vp10_prob_diff_update_savings_search_model(const unsigned int *ct,
+                                               const vpx_prob *oldp,
+                                               vpx_prob *bestp, vpx_prob upd,
+                                               int stepsize);
+int vp10_cond_prob_diff_update_savings(vpx_prob *oldp,
+                                       const unsigned int ct[2]);
+
+#if CONFIG_ENTROPY
+int vp10_prob_update_search_subframe(unsigned int ct[][2], vpx_prob oldp,
+                                     vpx_prob *bestp, vpx_prob upd, int n);
+int vp10_prob_update_search_model_subframe(
+    unsigned int ct[ENTROPY_NODES][COEF_PROBS_BUFS][2], const vpx_prob *oldp,
+    vpx_prob *bestp, vpx_prob upd, int stepsize, int n);
+#endif  // CONFIG_ENTROPY
+
+//
+// mag_bits is number of bits for magnitude. The alphabet is of size
+// 2 * 2^mag_bits + 1, symmetric around 0, where one bit is used to
+// indicate 0 or non-zero, mag_bits bits are used to indicate magnitide
+// and 1 more bit for the sign if non-zero.
+void vp10_write_primitive_symmetric(vp10_writer *w, int word,
+                                    unsigned int mag_bits);
+#ifdef __cplusplus
+}  // extern "C"
+#endif
+
+#endif  // VP10_ENCODER_SUBEXP_H_
diff --git a/av1/encoder/temporal_filter.c b/av1/encoder/temporal_filter.c
new file mode 100644
index 0000000..32490cc
--- /dev/null
+++ b/av1/encoder/temporal_filter.c
@@ -0,0 +1,683 @@
+/*
+ *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <math.h>
+#include <limits.h>
+
+#include "av1/common/alloccommon.h"
+#include "av1/common/onyxc_int.h"
+#include "av1/common/quant_common.h"
+#include "av1/common/reconinter.h"
+#include "av1/common/odintrin.h"
+#include "av1/encoder/extend.h"
+#include "av1/encoder/firstpass.h"
+#include "av1/encoder/mcomp.h"
+#include "av1/encoder/encoder.h"
+#include "av1/encoder/quantize.h"
+#include "av1/encoder/ratectrl.h"
+#include "av1/encoder/segmentation.h"
+#include "av1/encoder/temporal_filter.h"
+#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_mem/vpx_mem.h"
+#include "aom_ports/mem.h"
+#include "aom_ports/vpx_timer.h"
+#include "aom_scale/vpx_scale.h"
+
+static void temporal_filter_predictors_mb_c(
+    MACROBLOCKD *xd, uint8_t *y_mb_ptr, uint8_t *u_mb_ptr, uint8_t *v_mb_ptr,
+    int stride, int uv_block_width, int uv_block_height, int mv_row, int mv_col,
+    uint8_t *pred, struct scale_factors *scale, int x, int y) {
+  const int which_mv = 0;
+  const MV mv = { mv_row, mv_col };
+  enum mv_precision mv_precision_uv;
+  int uv_stride;
+
+#if USE_TEMPORALFILTER_12TAP
+#if CONFIG_DUAL_FILTER
+  const INTERP_FILTER interp_filter[4] = { TEMPORALFILTER_12TAP,
+                                           TEMPORALFILTER_12TAP,
+                                           TEMPORALFILTER_12TAP,
+                                           TEMPORALFILTER_12TAP };
+#else
+  const INTERP_FILTER interp_filter = TEMPORALFILTER_12TAP;
+#endif
+  (void)xd;
+#else
+  const INTERP_FILTER interp_filter = xd->mi[0]->mbmi.interp_filter;
+#endif  // USE_TEMPORALFILTER_12TAP
+
+  if (uv_block_width == 8) {
+    uv_stride = (stride + 1) >> 1;
+    mv_precision_uv = MV_PRECISION_Q4;
+  } else {
+    uv_stride = stride;
+    mv_precision_uv = MV_PRECISION_Q3;
+  }
+
+#if CONFIG_VP9_HIGHBITDEPTH
+  if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+    vp10_highbd_build_inter_predictor(y_mb_ptr, stride, &pred[0], 16, &mv,
+                                      scale, 16, 16, which_mv, interp_filter,
+                                      MV_PRECISION_Q3, x, y, xd->bd);
+
+    vp10_highbd_build_inter_predictor(
+        u_mb_ptr, uv_stride, &pred[256], uv_block_width, &mv, scale,
+        uv_block_width, uv_block_height, which_mv, interp_filter,
+        mv_precision_uv, x, y, xd->bd);
+
+    vp10_highbd_build_inter_predictor(
+        v_mb_ptr, uv_stride, &pred[512], uv_block_width, &mv, scale,
+        uv_block_width, uv_block_height, which_mv, interp_filter,
+        mv_precision_uv, x, y, xd->bd);
+    return;
+  }
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+  vp10_build_inter_predictor(y_mb_ptr, stride, &pred[0], 16, &mv, scale, 16, 16,
+                             which_mv, interp_filter, MV_PRECISION_Q3, x, y);
+
+  vp10_build_inter_predictor(u_mb_ptr, uv_stride, &pred[256], uv_block_width,
+                             &mv, scale, uv_block_width, uv_block_height,
+                             which_mv, interp_filter, mv_precision_uv, x, y);
+
+  vp10_build_inter_predictor(v_mb_ptr, uv_stride, &pred[512], uv_block_width,
+                             &mv, scale, uv_block_width, uv_block_height,
+                             which_mv, interp_filter, mv_precision_uv, x, y);
+}
+
+void vp10_temporal_filter_apply_c(uint8_t *frame1, unsigned int stride,
+                                  uint8_t *frame2, unsigned int block_width,
+                                  unsigned int block_height, int strength,
+                                  int filter_weight, unsigned int *accumulator,
+                                  uint16_t *count) {
+  unsigned int i, j, k;
+  int modifier;
+  int byte = 0;
+  const int rounding = strength > 0 ? 1 << (strength - 1) : 0;
+
+  for (i = 0, k = 0; i < block_height; i++) {
+    for (j = 0; j < block_width; j++, k++) {
+      int pixel_value = *frame2;
+
+      // non-local mean approach
+      int diff_sse[9] = { 0 };
+      int idx, idy, index = 0;
+
+      for (idy = -1; idy <= 1; ++idy) {
+        for (idx = -1; idx <= 1; ++idx) {
+          int row = i + idy;
+          int col = j + idx;
+
+          if (row >= 0 && row < (int)block_height && col >= 0 &&
+              col < (int)block_width) {
+            int diff = frame1[byte + idy * (int)stride + idx] -
+                       frame2[idy * (int)block_width + idx];
+            diff_sse[index] = diff * diff;
+            ++index;
+          }
+        }
+      }
+
+      assert(index > 0);
+
+      modifier = 0;
+      for (idx = 0; idx < 9; ++idx) modifier += diff_sse[idx];
+
+      modifier *= 3;
+      modifier /= index;
+
+      ++frame2;
+
+      modifier += rounding;
+      modifier >>= strength;
+
+      if (modifier > 16) modifier = 16;
+
+      modifier = 16 - modifier;
+      modifier *= filter_weight;
+
+      count[k] += modifier;
+      accumulator[k] += modifier * pixel_value;
+
+      byte++;
+    }
+
+    byte += stride - block_width;
+  }
+}
+
+#if CONFIG_VP9_HIGHBITDEPTH
+void vp10_highbd_temporal_filter_apply_c(
+    uint8_t *frame1_8, unsigned int stride, uint8_t *frame2_8,
+    unsigned int block_width, unsigned int block_height, int strength,
+    int filter_weight, unsigned int *accumulator, uint16_t *count) {
+  uint16_t *frame1 = CONVERT_TO_SHORTPTR(frame1_8);
+  uint16_t *frame2 = CONVERT_TO_SHORTPTR(frame2_8);
+  unsigned int i, j, k;
+  int modifier;
+  int byte = 0;
+  const int rounding = strength > 0 ? 1 << (strength - 1) : 0;
+
+  for (i = 0, k = 0; i < block_height; i++) {
+    for (j = 0; j < block_width; j++, k++) {
+      int pixel_value = *frame2;
+
+      // non-local mean approach
+      int diff_sse[9] = { 0 };
+      int idx, idy, index = 0;
+
+      for (idy = -1; idy <= 1; ++idy) {
+        for (idx = -1; idx <= 1; ++idx) {
+          int row = i + idy;
+          int col = j + idx;
+
+          if (row >= 0 && row < (int)block_height && col >= 0 &&
+              col < (int)block_width) {
+            int diff = frame1[byte + idy * (int)stride + idx] -
+                       frame2[idy * (int)block_width + idx];
+            diff_sse[index] = diff * diff;
+            ++index;
+          }
+        }
+      }
+
+      assert(index > 0);
+
+      modifier = 0;
+      for (idx = 0; idx < 9; ++idx) modifier += diff_sse[idx];
+
+      modifier *= 3;
+      modifier /= index;
+
+      ++frame2;
+
+      modifier += rounding;
+      modifier >>= strength;
+
+      if (modifier > 16) modifier = 16;
+
+      modifier = 16 - modifier;
+      modifier *= filter_weight;
+
+      count[k] += modifier;
+      accumulator[k] += modifier * pixel_value;
+
+      byte++;
+    }
+
+    byte += stride - block_width;
+  }
+}
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+static int temporal_filter_find_matching_mb_c(VP10_COMP *cpi,
+                                              uint8_t *arf_frame_buf,
+                                              uint8_t *frame_ptr_buf,
+                                              int stride) {
+  MACROBLOCK *const x = &cpi->td.mb;
+  MACROBLOCKD *const xd = &x->e_mbd;
+  const MV_SPEED_FEATURES *const mv_sf = &cpi->sf.mv;
+  int step_param;
+  int sadpb = x->sadperbit16;
+  int bestsme = INT_MAX;
+  int distortion;
+  unsigned int sse;
+  int cost_list[5];
+
+  MV best_ref_mv1 = { 0, 0 };
+  MV best_ref_mv1_full; /* full-pixel value of best_ref_mv1 */
+
+  // Save input state
+  struct buf_2d src = x->plane[0].src;
+  struct buf_2d pre = xd->plane[0].pre[0];
+
+  best_ref_mv1_full.col = best_ref_mv1.col >> 3;
+  best_ref_mv1_full.row = best_ref_mv1.row >> 3;
+
+  // Setup frame pointers
+  x->plane[0].src.buf = arf_frame_buf;
+  x->plane[0].src.stride = stride;
+  xd->plane[0].pre[0].buf = frame_ptr_buf;
+  xd->plane[0].pre[0].stride = stride;
+
+  step_param = mv_sf->reduce_first_step_size;
+  step_param = VPXMIN(step_param, MAX_MVSEARCH_STEPS - 2);
+
+#if CONFIG_REF_MV
+  x->mvcost = x->mv_cost_stack[0];
+  x->nmvjointcost = x->nmv_vec_cost[0];
+  x->mvsadcost = x->mvcost;
+  x->nmvjointsadcost = x->nmvjointcost;
+#endif
+
+  // Ignore mv costing by sending NULL pointer instead of cost arrays
+  vp10_hex_search(x, &best_ref_mv1_full, step_param, sadpb, 1,
+                  cond_cost_list(cpi, cost_list), &cpi->fn_ptr[BLOCK_16X16], 0,
+                  &best_ref_mv1);
+
+  // Ignore mv costing by sending NULL pointer instead of cost array
+  bestsme = cpi->find_fractional_mv_step(
+      x, &best_ref_mv1, cpi->common.allow_high_precision_mv, x->errorperbit,
+      &cpi->fn_ptr[BLOCK_16X16], 0, mv_sf->subpel_iters_per_step,
+      cond_cost_list(cpi, cost_list), NULL, NULL, &distortion, &sse, NULL, 0, 0,
+      0);
+
+  x->e_mbd.mi[0]->bmi[0].as_mv[0] = x->best_mv;
+
+  // Restore input state
+  x->plane[0].src = src;
+  xd->plane[0].pre[0] = pre;
+
+  return bestsme;
+}
+
+static void temporal_filter_iterate_c(VP10_COMP *cpi,
+                                      YV12_BUFFER_CONFIG **frames,
+                                      int frame_count, int alt_ref_index,
+                                      int strength,
+                                      struct scale_factors *scale) {
+  int byte;
+  int frame;
+  int mb_col, mb_row;
+  unsigned int filter_weight;
+  int mb_cols = (frames[alt_ref_index]->y_crop_width + 15) >> 4;
+  int mb_rows = (frames[alt_ref_index]->y_crop_height + 15) >> 4;
+  int mb_y_offset = 0;
+  int mb_uv_offset = 0;
+  DECLARE_ALIGNED(16, unsigned int, accumulator[16 * 16 * 3]);
+  DECLARE_ALIGNED(16, uint16_t, count[16 * 16 * 3]);
+  MACROBLOCKD *mbd = &cpi->td.mb.e_mbd;
+  YV12_BUFFER_CONFIG *f = frames[alt_ref_index];
+  uint8_t *dst1, *dst2;
+#if CONFIG_VP9_HIGHBITDEPTH
+  DECLARE_ALIGNED(16, uint16_t, predictor16[16 * 16 * 3]);
+  DECLARE_ALIGNED(16, uint8_t, predictor8[16 * 16 * 3]);
+  uint8_t *predictor;
+#else
+  DECLARE_ALIGNED(16, uint8_t, predictor[16 * 16 * 3]);
+#endif
+  const int mb_uv_height = 16 >> mbd->plane[1].subsampling_y;
+  const int mb_uv_width = 16 >> mbd->plane[1].subsampling_x;
+
+  // Save input state
+  uint8_t *input_buffer[MAX_MB_PLANE];
+  int i;
+#if CONFIG_VP9_HIGHBITDEPTH
+  if (mbd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+    predictor = CONVERT_TO_BYTEPTR(predictor16);
+  } else {
+    predictor = predictor8;
+  }
+#endif
+
+  for (i = 0; i < MAX_MB_PLANE; i++) input_buffer[i] = mbd->plane[i].pre[0].buf;
+
+  for (mb_row = 0; mb_row < mb_rows; mb_row++) {
+    // Source frames are extended to 16 pixels. This is different than
+    //  L/A/G reference frames that have a border of 32 (VP9ENCBORDERINPIXELS)
+    // A 6/8 tap filter is used for motion search.  This requires 2 pixels
+    //  before and 3 pixels after.  So the largest Y mv on a border would
+    //  then be 16 - VPX_INTERP_EXTEND. The UV blocks are half the size of the
+    //  Y and therefore only extended by 8.  The largest mv that a UV block
+    //  can support is 8 - VPX_INTERP_EXTEND.  A UV mv is half of a Y mv.
+    //  (16 - VPX_INTERP_EXTEND) >> 1 which is greater than
+    //  8 - VPX_INTERP_EXTEND.
+    // To keep the mv in play for both Y and UV planes the max that it
+    //  can be on a border is therefore 16 - (2*VPX_INTERP_EXTEND+1).
+    cpi->td.mb.mv_row_min = -((mb_row * 16) + (17 - 2 * VPX_INTERP_EXTEND));
+    cpi->td.mb.mv_row_max =
+        ((mb_rows - 1 - mb_row) * 16) + (17 - 2 * VPX_INTERP_EXTEND);
+
+    for (mb_col = 0; mb_col < mb_cols; mb_col++) {
+      int i, j, k;
+      int stride;
+
+      memset(accumulator, 0, 16 * 16 * 3 * sizeof(accumulator[0]));
+      memset(count, 0, 16 * 16 * 3 * sizeof(count[0]));
+
+      cpi->td.mb.mv_col_min = -((mb_col * 16) + (17 - 2 * VPX_INTERP_EXTEND));
+      cpi->td.mb.mv_col_max =
+          ((mb_cols - 1 - mb_col) * 16) + (17 - 2 * VPX_INTERP_EXTEND);
+
+      for (frame = 0; frame < frame_count; frame++) {
+        const int thresh_low = 10000;
+        const int thresh_high = 20000;
+
+        if (frames[frame] == NULL) continue;
+
+        mbd->mi[0]->bmi[0].as_mv[0].as_mv.row = 0;
+        mbd->mi[0]->bmi[0].as_mv[0].as_mv.col = 0;
+
+        if (frame == alt_ref_index) {
+          filter_weight = 2;
+        } else {
+          // Find best match in this frame by MC
+          int err = temporal_filter_find_matching_mb_c(
+              cpi, frames[alt_ref_index]->y_buffer + mb_y_offset,
+              frames[frame]->y_buffer + mb_y_offset, frames[frame]->y_stride);
+
+          // Assign higher weight to matching MB if it's error
+          // score is lower. If not applying MC default behavior
+          // is to weight all MBs equal.
+          filter_weight = err < thresh_low ? 2 : err < thresh_high ? 1 : 0;
+        }
+
+        if (filter_weight != 0) {
+          // Construct the predictors
+          temporal_filter_predictors_mb_c(
+              mbd, frames[frame]->y_buffer + mb_y_offset,
+              frames[frame]->u_buffer + mb_uv_offset,
+              frames[frame]->v_buffer + mb_uv_offset, frames[frame]->y_stride,
+              mb_uv_width, mb_uv_height, mbd->mi[0]->bmi[0].as_mv[0].as_mv.row,
+              mbd->mi[0]->bmi[0].as_mv[0].as_mv.col, predictor, scale,
+              mb_col * 16, mb_row * 16);
+
+#if CONFIG_VP9_HIGHBITDEPTH
+          if (mbd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+            int adj_strength = strength + 2 * (mbd->bd - 8);
+            // Apply the filter (YUV)
+            vp10_highbd_temporal_filter_apply(
+                f->y_buffer + mb_y_offset, f->y_stride, predictor, 16, 16,
+                adj_strength, filter_weight, accumulator, count);
+            vp10_highbd_temporal_filter_apply(
+                f->u_buffer + mb_uv_offset, f->uv_stride, predictor + 256,
+                mb_uv_width, mb_uv_height, adj_strength, filter_weight,
+                accumulator + 256, count + 256);
+            vp10_highbd_temporal_filter_apply(
+                f->v_buffer + mb_uv_offset, f->uv_stride, predictor + 512,
+                mb_uv_width, mb_uv_height, adj_strength, filter_weight,
+                accumulator + 512, count + 512);
+          } else {
+            // Apply the filter (YUV)
+            vp10_temporal_filter_apply_c(f->y_buffer + mb_y_offset, f->y_stride,
+                                         predictor, 16, 16, strength,
+                                         filter_weight, accumulator, count);
+            vp10_temporal_filter_apply_c(
+                f->u_buffer + mb_uv_offset, f->uv_stride, predictor + 256,
+                mb_uv_width, mb_uv_height, strength, filter_weight,
+                accumulator + 256, count + 256);
+            vp10_temporal_filter_apply_c(
+                f->v_buffer + mb_uv_offset, f->uv_stride, predictor + 512,
+                mb_uv_width, mb_uv_height, strength, filter_weight,
+                accumulator + 512, count + 512);
+          }
+#else
+          // Apply the filter (YUV)
+          vp10_temporal_filter_apply_c(f->y_buffer + mb_y_offset, f->y_stride,
+                                       predictor, 16, 16, strength,
+                                       filter_weight, accumulator, count);
+          vp10_temporal_filter_apply_c(f->u_buffer + mb_uv_offset, f->uv_stride,
+                                       predictor + 256, mb_uv_width,
+                                       mb_uv_height, strength, filter_weight,
+                                       accumulator + 256, count + 256);
+          vp10_temporal_filter_apply_c(f->v_buffer + mb_uv_offset, f->uv_stride,
+                                       predictor + 512, mb_uv_width,
+                                       mb_uv_height, strength, filter_weight,
+                                       accumulator + 512, count + 512);
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+        }
+      }
+
+#if CONFIG_VP9_HIGHBITDEPTH
+      if (mbd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+        uint16_t *dst1_16;
+        uint16_t *dst2_16;
+        // Normalize filter output to produce AltRef frame
+        dst1 = cpi->alt_ref_buffer.y_buffer;
+        dst1_16 = CONVERT_TO_SHORTPTR(dst1);
+        stride = cpi->alt_ref_buffer.y_stride;
+        byte = mb_y_offset;
+        for (i = 0, k = 0; i < 16; i++) {
+          for (j = 0; j < 16; j++, k++) {
+            dst1_16[byte] =
+                (uint16_t)OD_DIVU(accumulator[k] + (count[k] >> 1), count[k]);
+
+            // move to next pixel
+            byte++;
+          }
+
+          byte += stride - 16;
+        }
+
+        dst1 = cpi->alt_ref_buffer.u_buffer;
+        dst2 = cpi->alt_ref_buffer.v_buffer;
+        dst1_16 = CONVERT_TO_SHORTPTR(dst1);
+        dst2_16 = CONVERT_TO_SHORTPTR(dst2);
+        stride = cpi->alt_ref_buffer.uv_stride;
+        byte = mb_uv_offset;
+        for (i = 0, k = 256; i < mb_uv_height; i++) {
+          for (j = 0; j < mb_uv_width; j++, k++) {
+            int m = k + 256;
+
+            // U
+            dst1_16[byte] =
+                (uint16_t)OD_DIVU(accumulator[k] + (count[k] >> 1), count[k]);
+
+            // V
+            dst2_16[byte] =
+                (uint16_t)OD_DIVU(accumulator[m] + (count[m] >> 1), count[m]);
+
+            // move to next pixel
+            byte++;
+          }
+
+          byte += stride - mb_uv_width;
+        }
+      } else {
+        // Normalize filter output to produce AltRef frame
+        dst1 = cpi->alt_ref_buffer.y_buffer;
+        stride = cpi->alt_ref_buffer.y_stride;
+        byte = mb_y_offset;
+        for (i = 0, k = 0; i < 16; i++) {
+          for (j = 0; j < 16; j++, k++) {
+            dst1[byte] =
+                (uint8_t)OD_DIVU(accumulator[k] + (count[k] >> 1), count[k]);
+
+            // move to next pixel
+            byte++;
+          }
+          byte += stride - 16;
+        }
+
+        dst1 = cpi->alt_ref_buffer.u_buffer;
+        dst2 = cpi->alt_ref_buffer.v_buffer;
+        stride = cpi->alt_ref_buffer.uv_stride;
+        byte = mb_uv_offset;
+        for (i = 0, k = 256; i < mb_uv_height; i++) {
+          for (j = 0; j < mb_uv_width; j++, k++) {
+            int m = k + 256;
+
+            // U
+            dst1[byte] =
+                (uint8_t)OD_DIVU(accumulator[k] + (count[k] >> 1), count[k]);
+
+            // V
+            dst2[byte] =
+                (uint8_t)OD_DIVU(accumulator[m] + (count[m] >> 1), count[m]);
+
+            // move to next pixel
+            byte++;
+          }
+          byte += stride - mb_uv_width;
+        }
+      }
+#else
+      // Normalize filter output to produce AltRef frame
+      dst1 = cpi->alt_ref_buffer.y_buffer;
+      stride = cpi->alt_ref_buffer.y_stride;
+      byte = mb_y_offset;
+      for (i = 0, k = 0; i < 16; i++) {
+        for (j = 0; j < 16; j++, k++) {
+          dst1[byte] =
+              (uint8_t)OD_DIVU(accumulator[k] + (count[k] >> 1), count[k]);
+
+          // move to next pixel
+          byte++;
+        }
+        byte += stride - 16;
+      }
+
+      dst1 = cpi->alt_ref_buffer.u_buffer;
+      dst2 = cpi->alt_ref_buffer.v_buffer;
+      stride = cpi->alt_ref_buffer.uv_stride;
+      byte = mb_uv_offset;
+      for (i = 0, k = 256; i < mb_uv_height; i++) {
+        for (j = 0; j < mb_uv_width; j++, k++) {
+          int m = k + 256;
+
+          // U
+          dst1[byte] =
+              (uint8_t)OD_DIVU(accumulator[k] + (count[k] >> 1), count[k]);
+
+          // V
+          dst2[byte] =
+              (uint8_t)OD_DIVU(accumulator[m] + (count[m] >> 1), count[m]);
+
+          // move to next pixel
+          byte++;
+        }
+        byte += stride - mb_uv_width;
+      }
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+      mb_y_offset += 16;
+      mb_uv_offset += mb_uv_width;
+    }
+    mb_y_offset += 16 * (f->y_stride - mb_cols);
+    mb_uv_offset += mb_uv_height * f->uv_stride - mb_uv_width * mb_cols;
+  }
+
+  // Restore input state
+  for (i = 0; i < MAX_MB_PLANE; i++) mbd->plane[i].pre[0].buf = input_buffer[i];
+}
+
+// Apply buffer limits and context specific adjustments to arnr filter.
+static void adjust_arnr_filter(VP10_COMP *cpi, int distance, int group_boost,
+                               int *arnr_frames, int *arnr_strength) {
+  const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+  const int frames_after_arf =
+      vp10_lookahead_depth(cpi->lookahead) - distance - 1;
+  int frames_fwd = (cpi->oxcf.arnr_max_frames - 1) >> 1;
+  int frames_bwd;
+  int q, frames, strength;
+
+  // Define the forward and backwards filter limits for this arnr group.
+  if (frames_fwd > frames_after_arf) frames_fwd = frames_after_arf;
+  if (frames_fwd > distance) frames_fwd = distance;
+
+  frames_bwd = frames_fwd;
+
+  // For even length filter there is one more frame backward
+  // than forward: e.g. len=6 ==> bbbAff, len=7 ==> bbbAfff.
+  if (frames_bwd < distance) frames_bwd += (oxcf->arnr_max_frames + 1) & 0x1;
+
+  // Set the baseline active filter size.
+  frames = frames_bwd + 1 + frames_fwd;
+
+  // Adjust the strength based on active max q.
+  if (cpi->common.current_video_frame > 1)
+    q = ((int)vp10_convert_qindex_to_q(cpi->rc.avg_frame_qindex[INTER_FRAME],
+                                       cpi->common.bit_depth));
+  else
+    q = ((int)vp10_convert_qindex_to_q(cpi->rc.avg_frame_qindex[KEY_FRAME],
+                                       cpi->common.bit_depth));
+  if (q > 16) {
+    strength = oxcf->arnr_strength;
+  } else {
+    strength = oxcf->arnr_strength - ((16 - q) / 2);
+    if (strength < 0) strength = 0;
+  }
+
+  // Adjust number of frames in filter and strength based on gf boost level.
+  if (frames > group_boost / 150) {
+    frames = group_boost / 150;
+    frames += !(frames & 1);
+  }
+
+  if (strength > group_boost / 300) {
+    strength = group_boost / 300;
+  }
+
+  // Adjustments for second level arf in multi arf case.
+  if (cpi->oxcf.pass == 2 && cpi->multi_arf_allowed) {
+    const GF_GROUP *const gf_group = &cpi->twopass.gf_group;
+    if (gf_group->rf_level[gf_group->index] != GF_ARF_STD) {
+      strength >>= 1;
+    }
+  }
+
+  *arnr_frames = frames;
+  *arnr_strength = strength;
+}
+
+void vp10_temporal_filter(VP10_COMP *cpi, int distance) {
+  RATE_CONTROL *const rc = &cpi->rc;
+  int frame;
+  int frames_to_blur;
+  int start_frame;
+  int strength;
+  int frames_to_blur_backward;
+  int frames_to_blur_forward;
+  struct scale_factors sf;
+  YV12_BUFFER_CONFIG *frames[MAX_LAG_BUFFERS] = { NULL };
+#if CONFIG_EXT_REFS
+  const GF_GROUP *const gf_group = &cpi->twopass.gf_group;
+#endif
+
+  // Apply context specific adjustments to the arnr filter parameters.
+  adjust_arnr_filter(cpi, distance, rc->gfu_boost, &frames_to_blur, &strength);
+// TODO(weitinglin): Currently, we enforce the filtering strength on
+//                   extra ARFs' to be zeros. We should investigate in which
+//                   case it is more beneficial to use non-zero strength
+//                   filtering.
+#if CONFIG_EXT_REFS
+  if (gf_group->rf_level[gf_group->index] == GF_ARF_LOW) {
+    strength = 0;
+    frames_to_blur = 1;
+  }
+#endif
+
+#if CONFIG_EXT_REFS
+  if (strength == 0 && frames_to_blur == 1) {
+    cpi->is_arf_filter_off[gf_group->arf_update_idx[gf_group->index]] = 1;
+  } else {
+    cpi->is_arf_filter_off[gf_group->arf_update_idx[gf_group->index]] = 0;
+  }
+#endif
+
+  frames_to_blur_backward = (frames_to_blur / 2);
+  frames_to_blur_forward = ((frames_to_blur - 1) / 2);
+  start_frame = distance + frames_to_blur_forward;
+
+  // Setup frame pointers, NULL indicates frame not included in filter.
+  for (frame = 0; frame < frames_to_blur; ++frame) {
+    const int which_buffer = start_frame - frame;
+    struct lookahead_entry *buf =
+        vp10_lookahead_peek(cpi->lookahead, which_buffer);
+    frames[frames_to_blur - 1 - frame] = &buf->img;
+  }
+
+  if (frames_to_blur > 0) {
+// Setup scaling factors. Scaling on each of the arnr frames is not
+// supported.
+// ARF is produced at the native frame size and resized when coded.
+#if CONFIG_VP9_HIGHBITDEPTH
+    vp10_setup_scale_factors_for_frame(
+        &sf, frames[0]->y_crop_width, frames[0]->y_crop_height,
+        frames[0]->y_crop_width, frames[0]->y_crop_height,
+        cpi->common.use_highbitdepth);
+#else
+    vp10_setup_scale_factors_for_frame(
+        &sf, frames[0]->y_crop_width, frames[0]->y_crop_height,
+        frames[0]->y_crop_width, frames[0]->y_crop_height);
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+  }
+
+  temporal_filter_iterate_c(cpi, frames, frames_to_blur,
+                            frames_to_blur_backward, strength, &sf);
+}
diff --git a/av1/encoder/temporal_filter.h b/av1/encoder/temporal_filter.h
new file mode 100644
index 0000000..ce5291a
--- /dev/null
+++ b/av1/encoder/temporal_filter.h
@@ -0,0 +1,24 @@
+/*
+ *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP10_ENCODER_TEMPORAL_FILTER_H_
+#define VP10_ENCODER_TEMPORAL_FILTER_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void vp10_temporal_filter(VP10_COMP *cpi, int distance);
+
+#ifdef __cplusplus
+}  // extern "C"
+#endif
+
+#endif  // VP10_ENCODER_TEMPORAL_FILTER_H_
diff --git a/av1/encoder/tokenize.c b/av1/encoder/tokenize.c
new file mode 100644
index 0000000..c841fa6
--- /dev/null
+++ b/av1/encoder/tokenize.c
@@ -0,0 +1,737 @@
+/*
+ *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include <math.h>
+#include <stdio.h>
+#include <string.h>
+
+#include "aom_mem/vpx_mem.h"
+
+#include "av1/common/entropy.h"
+#include "av1/common/pred_common.h"
+#include "av1/common/scan.h"
+#include "av1/common/seg_common.h"
+
+#include "av1/encoder/cost.h"
+#include "av1/encoder/encoder.h"
+#include "av1/encoder/tokenize.h"
+
+static const TOKENVALUE dct_cat_lt_10_value_tokens[] = {
+  { 9, 63 }, { 9, 61 }, { 9, 59 }, { 9, 57 }, { 9, 55 }, { 9, 53 }, { 9, 51 },
+  { 9, 49 }, { 9, 47 }, { 9, 45 }, { 9, 43 }, { 9, 41 }, { 9, 39 }, { 9, 37 },
+  { 9, 35 }, { 9, 33 }, { 9, 31 }, { 9, 29 }, { 9, 27 }, { 9, 25 }, { 9, 23 },
+  { 9, 21 }, { 9, 19 }, { 9, 17 }, { 9, 15 }, { 9, 13 }, { 9, 11 }, { 9, 9 },
+  { 9, 7 },  { 9, 5 },  { 9, 3 },  { 9, 1 },  { 8, 31 }, { 8, 29 }, { 8, 27 },
+  { 8, 25 }, { 8, 23 }, { 8, 21 }, { 8, 19 }, { 8, 17 }, { 8, 15 }, { 8, 13 },
+  { 8, 11 }, { 8, 9 },  { 8, 7 },  { 8, 5 },  { 8, 3 },  { 8, 1 },  { 7, 15 },
+  { 7, 13 }, { 7, 11 }, { 7, 9 },  { 7, 7 },  { 7, 5 },  { 7, 3 },  { 7, 1 },
+  { 6, 7 },  { 6, 5 },  { 6, 3 },  { 6, 1 },  { 5, 3 },  { 5, 1 },  { 4, 1 },
+  { 3, 1 },  { 2, 1 },  { 1, 1 },  { 0, 0 },  { 1, 0 },  { 2, 0 },  { 3, 0 },
+  { 4, 0 },  { 5, 0 },  { 5, 2 },  { 6, 0 },  { 6, 2 },  { 6, 4 },  { 6, 6 },
+  { 7, 0 },  { 7, 2 },  { 7, 4 },  { 7, 6 },  { 7, 8 },  { 7, 10 }, { 7, 12 },
+  { 7, 14 }, { 8, 0 },  { 8, 2 },  { 8, 4 },  { 8, 6 },  { 8, 8 },  { 8, 10 },
+  { 8, 12 }, { 8, 14 }, { 8, 16 }, { 8, 18 }, { 8, 20 }, { 8, 22 }, { 8, 24 },
+  { 8, 26 }, { 8, 28 }, { 8, 30 }, { 9, 0 },  { 9, 2 },  { 9, 4 },  { 9, 6 },
+  { 9, 8 },  { 9, 10 }, { 9, 12 }, { 9, 14 }, { 9, 16 }, { 9, 18 }, { 9, 20 },
+  { 9, 22 }, { 9, 24 }, { 9, 26 }, { 9, 28 }, { 9, 30 }, { 9, 32 }, { 9, 34 },
+  { 9, 36 }, { 9, 38 }, { 9, 40 }, { 9, 42 }, { 9, 44 }, { 9, 46 }, { 9, 48 },
+  { 9, 50 }, { 9, 52 }, { 9, 54 }, { 9, 56 }, { 9, 58 }, { 9, 60 }, { 9, 62 }
+};
+const TOKENVALUE *vp10_dct_cat_lt_10_value_tokens =
+    dct_cat_lt_10_value_tokens +
+    (sizeof(dct_cat_lt_10_value_tokens) / sizeof(*dct_cat_lt_10_value_tokens)) /
+        2;
+// The corresponding costs of the extrabits for the tokens in the above table
+// are stored in the table below. The values are obtained from looking up the
+// entry for the specified extrabits in the table corresponding to the token
+// (as defined in cost element vp10_extra_bits)
+// e.g. {9, 63} maps to cat5_cost[63 >> 1], {1, 1} maps to sign_cost[1 >> 1]
+static const int dct_cat_lt_10_value_cost[] = {
+  3773, 3750, 3704, 3681, 3623, 3600, 3554, 3531, 3432, 3409, 3363, 3340, 3282,
+  3259, 3213, 3190, 3136, 3113, 3067, 3044, 2986, 2963, 2917, 2894, 2795, 2772,
+  2726, 2703, 2645, 2622, 2576, 2553, 3197, 3116, 3058, 2977, 2881, 2800, 2742,
+  2661, 2615, 2534, 2476, 2395, 2299, 2218, 2160, 2079, 2566, 2427, 2334, 2195,
+  2023, 1884, 1791, 1652, 1893, 1696, 1453, 1256, 1229, 864,  512,  512,  512,
+  512,  0,    512,  512,  512,  512,  864,  1229, 1256, 1453, 1696, 1893, 1652,
+  1791, 1884, 2023, 2195, 2334, 2427, 2566, 2079, 2160, 2218, 2299, 2395, 2476,
+  2534, 2615, 2661, 2742, 2800, 2881, 2977, 3058, 3116, 3197, 2553, 2576, 2622,
+  2645, 2703, 2726, 2772, 2795, 2894, 2917, 2963, 2986, 3044, 3067, 3113, 3136,
+  3190, 3213, 3259, 3282, 3340, 3363, 3409, 3432, 3531, 3554, 3600, 3623, 3681,
+  3704, 3750, 3773,
+};
+const int *vp10_dct_cat_lt_10_value_cost =
+    dct_cat_lt_10_value_cost +
+    (sizeof(dct_cat_lt_10_value_cost) / sizeof(*dct_cat_lt_10_value_cost)) / 2;
+
+// Array indices are identical to previously-existing CONTEXT_NODE indices
+/* clang-format off */
+const vpx_tree_index vp10_coef_tree[TREE_SIZE(ENTROPY_TOKENS)] = {
+  -EOB_TOKEN, 2,                       // 0  = EOB
+  -ZERO_TOKEN, 4,                      // 1  = ZERO
+  -ONE_TOKEN, 6,                       // 2  = ONE
+  8, 12,                               // 3  = LOW_VAL
+  -TWO_TOKEN, 10,                      // 4  = TWO
+  -THREE_TOKEN, -FOUR_TOKEN,           // 5  = THREE
+  14, 16,                              // 6  = HIGH_LOW
+  -CATEGORY1_TOKEN, -CATEGORY2_TOKEN,  // 7  = CAT_ONE
+  18, 20,                              // 8  = CAT_THREEFOUR
+  -CATEGORY3_TOKEN, -CATEGORY4_TOKEN,  // 9  = CAT_THREE
+  -CATEGORY5_TOKEN, -CATEGORY6_TOKEN   // 10 = CAT_FIVE
+};
+/* clang-format on */
+
+static const vpx_tree_index cat1[2] = { 0, 0 };
+static const vpx_tree_index cat2[4] = { 2, 2, 0, 0 };
+static const vpx_tree_index cat3[6] = { 2, 2, 4, 4, 0, 0 };
+static const vpx_tree_index cat4[8] = { 2, 2, 4, 4, 6, 6, 0, 0 };
+static const vpx_tree_index cat5[10] = { 2, 2, 4, 4, 6, 6, 8, 8, 0, 0 };
+static const vpx_tree_index cat6[28] = { 2,  2,  4,  4,  6,  6,  8,  8,  10, 10,
+                                         12, 12, 14, 14, 16, 16, 18, 18, 20, 20,
+                                         22, 22, 24, 24, 26, 26, 0,  0 };
+
+static const int16_t zero_cost[] = { 0 };
+static const int16_t sign_cost[1] = { 512 };
+static const int16_t cat1_cost[1 << 1] = { 864, 1229 };
+static const int16_t cat2_cost[1 << 2] = { 1256, 1453, 1696, 1893 };
+static const int16_t cat3_cost[1 << 3] = { 1652, 1791, 1884, 2023,
+                                           2195, 2334, 2427, 2566 };
+static const int16_t cat4_cost[1 << 4] = { 2079, 2160, 2218, 2299, 2395, 2476,
+                                           2534, 2615, 2661, 2742, 2800, 2881,
+                                           2977, 3058, 3116, 3197 };
+static const int16_t cat5_cost[1 << 5] = {
+  2553, 2576, 2622, 2645, 2703, 2726, 2772, 2795, 2894, 2917, 2963,
+  2986, 3044, 3067, 3113, 3136, 3190, 3213, 3259, 3282, 3340, 3363,
+  3409, 3432, 3531, 3554, 3600, 3623, 3681, 3704, 3750, 3773
+};
+const int16_t vp10_cat6_low_cost[256] = {
+  3378, 3390, 3401, 3413, 3435, 3447, 3458, 3470, 3517, 3529, 3540, 3552, 3574,
+  3586, 3597, 3609, 3671, 3683, 3694, 3706, 3728, 3740, 3751, 3763, 3810, 3822,
+  3833, 3845, 3867, 3879, 3890, 3902, 3973, 3985, 3996, 4008, 4030, 4042, 4053,
+  4065, 4112, 4124, 4135, 4147, 4169, 4181, 4192, 4204, 4266, 4278, 4289, 4301,
+  4323, 4335, 4346, 4358, 4405, 4417, 4428, 4440, 4462, 4474, 4485, 4497, 4253,
+  4265, 4276, 4288, 4310, 4322, 4333, 4345, 4392, 4404, 4415, 4427, 4449, 4461,
+  4472, 4484, 4546, 4558, 4569, 4581, 4603, 4615, 4626, 4638, 4685, 4697, 4708,
+  4720, 4742, 4754, 4765, 4777, 4848, 4860, 4871, 4883, 4905, 4917, 4928, 4940,
+  4987, 4999, 5010, 5022, 5044, 5056, 5067, 5079, 5141, 5153, 5164, 5176, 5198,
+  5210, 5221, 5233, 5280, 5292, 5303, 5315, 5337, 5349, 5360, 5372, 4988, 5000,
+  5011, 5023, 5045, 5057, 5068, 5080, 5127, 5139, 5150, 5162, 5184, 5196, 5207,
+  5219, 5281, 5293, 5304, 5316, 5338, 5350, 5361, 5373, 5420, 5432, 5443, 5455,
+  5477, 5489, 5500, 5512, 5583, 5595, 5606, 5618, 5640, 5652, 5663, 5675, 5722,
+  5734, 5745, 5757, 5779, 5791, 5802, 5814, 5876, 5888, 5899, 5911, 5933, 5945,
+  5956, 5968, 6015, 6027, 6038, 6050, 6072, 6084, 6095, 6107, 5863, 5875, 5886,
+  5898, 5920, 5932, 5943, 5955, 6002, 6014, 6025, 6037, 6059, 6071, 6082, 6094,
+  6156, 6168, 6179, 6191, 6213, 6225, 6236, 6248, 6295, 6307, 6318, 6330, 6352,
+  6364, 6375, 6387, 6458, 6470, 6481, 6493, 6515, 6527, 6538, 6550, 6597, 6609,
+  6620, 6632, 6654, 6666, 6677, 6689, 6751, 6763, 6774, 6786, 6808, 6820, 6831,
+  6843, 6890, 6902, 6913, 6925, 6947, 6959, 6970, 6982
+};
+const int vp10_cat6_high_cost[64] = {
+  88,    2251,  2727,  4890,  3148,  5311,  5787,  7950,  3666,  5829,  6305,
+  8468,  6726,  8889,  9365,  11528, 3666,  5829,  6305,  8468,  6726,  8889,
+  9365,  11528, 7244,  9407,  9883,  12046, 10304, 12467, 12943, 15106, 3666,
+  5829,  6305,  8468,  6726,  8889,  9365,  11528, 7244,  9407,  9883,  12046,
+  10304, 12467, 12943, 15106, 7244,  9407,  9883,  12046, 10304, 12467, 12943,
+  15106, 10822, 12985, 13461, 15624, 13882, 16045, 16521, 18684
+};
+
+#if CONFIG_VP9_HIGHBITDEPTH
+const int vp10_cat6_high10_high_cost[256] = {
+  94,    2257,  2733,  4896,  3154,  5317,  5793,  7956,  3672,  5835,  6311,
+  8474,  6732,  8895,  9371,  11534, 3672,  5835,  6311,  8474,  6732,  8895,
+  9371,  11534, 7250,  9413,  9889,  12052, 10310, 12473, 12949, 15112, 3672,
+  5835,  6311,  8474,  6732,  8895,  9371,  11534, 7250,  9413,  9889,  12052,
+  10310, 12473, 12949, 15112, 7250,  9413,  9889,  12052, 10310, 12473, 12949,
+  15112, 10828, 12991, 13467, 15630, 13888, 16051, 16527, 18690, 4187,  6350,
+  6826,  8989,  7247,  9410,  9886,  12049, 7765,  9928,  10404, 12567, 10825,
+  12988, 13464, 15627, 7765,  9928,  10404, 12567, 10825, 12988, 13464, 15627,
+  11343, 13506, 13982, 16145, 14403, 16566, 17042, 19205, 7765,  9928,  10404,
+  12567, 10825, 12988, 13464, 15627, 11343, 13506, 13982, 16145, 14403, 16566,
+  17042, 19205, 11343, 13506, 13982, 16145, 14403, 16566, 17042, 19205, 14921,
+  17084, 17560, 19723, 17981, 20144, 20620, 22783, 4187,  6350,  6826,  8989,
+  7247,  9410,  9886,  12049, 7765,  9928,  10404, 12567, 10825, 12988, 13464,
+  15627, 7765,  9928,  10404, 12567, 10825, 12988, 13464, 15627, 11343, 13506,
+  13982, 16145, 14403, 16566, 17042, 19205, 7765,  9928,  10404, 12567, 10825,
+  12988, 13464, 15627, 11343, 13506, 13982, 16145, 14403, 16566, 17042, 19205,
+  11343, 13506, 13982, 16145, 14403, 16566, 17042, 19205, 14921, 17084, 17560,
+  19723, 17981, 20144, 20620, 22783, 8280,  10443, 10919, 13082, 11340, 13503,
+  13979, 16142, 11858, 14021, 14497, 16660, 14918, 17081, 17557, 19720, 11858,
+  14021, 14497, 16660, 14918, 17081, 17557, 19720, 15436, 17599, 18075, 20238,
+  18496, 20659, 21135, 23298, 11858, 14021, 14497, 16660, 14918, 17081, 17557,
+  19720, 15436, 17599, 18075, 20238, 18496, 20659, 21135, 23298, 15436, 17599,
+  18075, 20238, 18496, 20659, 21135, 23298, 19014, 21177, 21653, 23816, 22074,
+  24237, 24713, 26876
+};
+const int vp10_cat6_high12_high_cost[1024] = {
+  100,   2263,  2739,  4902,  3160,  5323,  5799,  7962,  3678,  5841,  6317,
+  8480,  6738,  8901,  9377,  11540, 3678,  5841,  6317,  8480,  6738,  8901,
+  9377,  11540, 7256,  9419,  9895,  12058, 10316, 12479, 12955, 15118, 3678,
+  5841,  6317,  8480,  6738,  8901,  9377,  11540, 7256,  9419,  9895,  12058,
+  10316, 12479, 12955, 15118, 7256,  9419,  9895,  12058, 10316, 12479, 12955,
+  15118, 10834, 12997, 13473, 15636, 13894, 16057, 16533, 18696, 4193,  6356,
+  6832,  8995,  7253,  9416,  9892,  12055, 7771,  9934,  10410, 12573, 10831,
+  12994, 13470, 15633, 7771,  9934,  10410, 12573, 10831, 12994, 13470, 15633,
+  11349, 13512, 13988, 16151, 14409, 16572, 17048, 19211, 7771,  9934,  10410,
+  12573, 10831, 12994, 13470, 15633, 11349, 13512, 13988, 16151, 14409, 16572,
+  17048, 19211, 11349, 13512, 13988, 16151, 14409, 16572, 17048, 19211, 14927,
+  17090, 17566, 19729, 17987, 20150, 20626, 22789, 4193,  6356,  6832,  8995,
+  7253,  9416,  9892,  12055, 7771,  9934,  10410, 12573, 10831, 12994, 13470,
+  15633, 7771,  9934,  10410, 12573, 10831, 12994, 13470, 15633, 11349, 13512,
+  13988, 16151, 14409, 16572, 17048, 19211, 7771,  9934,  10410, 12573, 10831,
+  12994, 13470, 15633, 11349, 13512, 13988, 16151, 14409, 16572, 17048, 19211,
+  11349, 13512, 13988, 16151, 14409, 16572, 17048, 19211, 14927, 17090, 17566,
+  19729, 17987, 20150, 20626, 22789, 8286,  10449, 10925, 13088, 11346, 13509,
+  13985, 16148, 11864, 14027, 14503, 16666, 14924, 17087, 17563, 19726, 11864,
+  14027, 14503, 16666, 14924, 17087, 17563, 19726, 15442, 17605, 18081, 20244,
+  18502, 20665, 21141, 23304, 11864, 14027, 14503, 16666, 14924, 17087, 17563,
+  19726, 15442, 17605, 18081, 20244, 18502, 20665, 21141, 23304, 15442, 17605,
+  18081, 20244, 18502, 20665, 21141, 23304, 19020, 21183, 21659, 23822, 22080,
+  24243, 24719, 26882, 4193,  6356,  6832,  8995,  7253,  9416,  9892,  12055,
+  7771,  9934,  10410, 12573, 10831, 12994, 13470, 15633, 7771,  9934,  10410,
+  12573, 10831, 12994, 13470, 15633, 11349, 13512, 13988, 16151, 14409, 16572,
+  17048, 19211, 7771,  9934,  10410, 12573, 10831, 12994, 13470, 15633, 11349,
+  13512, 13988, 16151, 14409, 16572, 17048, 19211, 11349, 13512, 13988, 16151,
+  14409, 16572, 17048, 19211, 14927, 17090, 17566, 19729, 17987, 20150, 20626,
+  22789, 8286,  10449, 10925, 13088, 11346, 13509, 13985, 16148, 11864, 14027,
+  14503, 16666, 14924, 17087, 17563, 19726, 11864, 14027, 14503, 16666, 14924,
+  17087, 17563, 19726, 15442, 17605, 18081, 20244, 18502, 20665, 21141, 23304,
+  11864, 14027, 14503, 16666, 14924, 17087, 17563, 19726, 15442, 17605, 18081,
+  20244, 18502, 20665, 21141, 23304, 15442, 17605, 18081, 20244, 18502, 20665,
+  21141, 23304, 19020, 21183, 21659, 23822, 22080, 24243, 24719, 26882, 8286,
+  10449, 10925, 13088, 11346, 13509, 13985, 16148, 11864, 14027, 14503, 16666,
+  14924, 17087, 17563, 19726, 11864, 14027, 14503, 16666, 14924, 17087, 17563,
+  19726, 15442, 17605, 18081, 20244, 18502, 20665, 21141, 23304, 11864, 14027,
+  14503, 16666, 14924, 17087, 17563, 19726, 15442, 17605, 18081, 20244, 18502,
+  20665, 21141, 23304, 15442, 17605, 18081, 20244, 18502, 20665, 21141, 23304,
+  19020, 21183, 21659, 23822, 22080, 24243, 24719, 26882, 12379, 14542, 15018,
+  17181, 15439, 17602, 18078, 20241, 15957, 18120, 18596, 20759, 19017, 21180,
+  21656, 23819, 15957, 18120, 18596, 20759, 19017, 21180, 21656, 23819, 19535,
+  21698, 22174, 24337, 22595, 24758, 25234, 27397, 15957, 18120, 18596, 20759,
+  19017, 21180, 21656, 23819, 19535, 21698, 22174, 24337, 22595, 24758, 25234,
+  27397, 19535, 21698, 22174, 24337, 22595, 24758, 25234, 27397, 23113, 25276,
+  25752, 27915, 26173, 28336, 28812, 30975, 4193,  6356,  6832,  8995,  7253,
+  9416,  9892,  12055, 7771,  9934,  10410, 12573, 10831, 12994, 13470, 15633,
+  7771,  9934,  10410, 12573, 10831, 12994, 13470, 15633, 11349, 13512, 13988,
+  16151, 14409, 16572, 17048, 19211, 7771,  9934,  10410, 12573, 10831, 12994,
+  13470, 15633, 11349, 13512, 13988, 16151, 14409, 16572, 17048, 19211, 11349,
+  13512, 13988, 16151, 14409, 16572, 17048, 19211, 14927, 17090, 17566, 19729,
+  17987, 20150, 20626, 22789, 8286,  10449, 10925, 13088, 11346, 13509, 13985,
+  16148, 11864, 14027, 14503, 16666, 14924, 17087, 17563, 19726, 11864, 14027,
+  14503, 16666, 14924, 17087, 17563, 19726, 15442, 17605, 18081, 20244, 18502,
+  20665, 21141, 23304, 11864, 14027, 14503, 16666, 14924, 17087, 17563, 19726,
+  15442, 17605, 18081, 20244, 18502, 20665, 21141, 23304, 15442, 17605, 18081,
+  20244, 18502, 20665, 21141, 23304, 19020, 21183, 21659, 23822, 22080, 24243,
+  24719, 26882, 8286,  10449, 10925, 13088, 11346, 13509, 13985, 16148, 11864,
+  14027, 14503, 16666, 14924, 17087, 17563, 19726, 11864, 14027, 14503, 16666,
+  14924, 17087, 17563, 19726, 15442, 17605, 18081, 20244, 18502, 20665, 21141,
+  23304, 11864, 14027, 14503, 16666, 14924, 17087, 17563, 19726, 15442, 17605,
+  18081, 20244, 18502, 20665, 21141, 23304, 15442, 17605, 18081, 20244, 18502,
+  20665, 21141, 23304, 19020, 21183, 21659, 23822, 22080, 24243, 24719, 26882,
+  12379, 14542, 15018, 17181, 15439, 17602, 18078, 20241, 15957, 18120, 18596,
+  20759, 19017, 21180, 21656, 23819, 15957, 18120, 18596, 20759, 19017, 21180,
+  21656, 23819, 19535, 21698, 22174, 24337, 22595, 24758, 25234, 27397, 15957,
+  18120, 18596, 20759, 19017, 21180, 21656, 23819, 19535, 21698, 22174, 24337,
+  22595, 24758, 25234, 27397, 19535, 21698, 22174, 24337, 22595, 24758, 25234,
+  27397, 23113, 25276, 25752, 27915, 26173, 28336, 28812, 30975, 8286,  10449,
+  10925, 13088, 11346, 13509, 13985, 16148, 11864, 14027, 14503, 16666, 14924,
+  17087, 17563, 19726, 11864, 14027, 14503, 16666, 14924, 17087, 17563, 19726,
+  15442, 17605, 18081, 20244, 18502, 20665, 21141, 23304, 11864, 14027, 14503,
+  16666, 14924, 17087, 17563, 19726, 15442, 17605, 18081, 20244, 18502, 20665,
+  21141, 23304, 15442, 17605, 18081, 20244, 18502, 20665, 21141, 23304, 19020,
+  21183, 21659, 23822, 22080, 24243, 24719, 26882, 12379, 14542, 15018, 17181,
+  15439, 17602, 18078, 20241, 15957, 18120, 18596, 20759, 19017, 21180, 21656,
+  23819, 15957, 18120, 18596, 20759, 19017, 21180, 21656, 23819, 19535, 21698,
+  22174, 24337, 22595, 24758, 25234, 27397, 15957, 18120, 18596, 20759, 19017,
+  21180, 21656, 23819, 19535, 21698, 22174, 24337, 22595, 24758, 25234, 27397,
+  19535, 21698, 22174, 24337, 22595, 24758, 25234, 27397, 23113, 25276, 25752,
+  27915, 26173, 28336, 28812, 30975, 12379, 14542, 15018, 17181, 15439, 17602,
+  18078, 20241, 15957, 18120, 18596, 20759, 19017, 21180, 21656, 23819, 15957,
+  18120, 18596, 20759, 19017, 21180, 21656, 23819, 19535, 21698, 22174, 24337,
+  22595, 24758, 25234, 27397, 15957, 18120, 18596, 20759, 19017, 21180, 21656,
+  23819, 19535, 21698, 22174, 24337, 22595, 24758, 25234, 27397, 19535, 21698,
+  22174, 24337, 22595, 24758, 25234, 27397, 23113, 25276, 25752, 27915, 26173,
+  28336, 28812, 30975, 16472, 18635, 19111, 21274, 19532, 21695, 22171, 24334,
+  20050, 22213, 22689, 24852, 23110, 25273, 25749, 27912, 20050, 22213, 22689,
+  24852, 23110, 25273, 25749, 27912, 23628, 25791, 26267, 28430, 26688, 28851,
+  29327, 31490, 20050, 22213, 22689, 24852, 23110, 25273, 25749, 27912, 23628,
+  25791, 26267, 28430, 26688, 28851, 29327, 31490, 23628, 25791, 26267, 28430,
+  26688, 28851, 29327, 31490, 27206, 29369, 29845, 32008, 30266, 32429, 32905,
+  35068
+};
+#endif
+
+#if CONFIG_VP9_HIGHBITDEPTH
+static const vpx_tree_index cat1_high10[2] = { 0, 0 };
+static const vpx_tree_index cat2_high10[4] = { 2, 2, 0, 0 };
+static const vpx_tree_index cat3_high10[6] = { 2, 2, 4, 4, 0, 0 };
+static const vpx_tree_index cat4_high10[8] = { 2, 2, 4, 4, 6, 6, 0, 0 };
+static const vpx_tree_index cat5_high10[10] = { 2, 2, 4, 4, 6, 6, 8, 8, 0, 0 };
+static const vpx_tree_index cat6_high10[32] = { 2,  2,  4,  4,  6,  6,  8,  8,
+                                                10, 10, 12, 12, 14, 14, 16, 16,
+                                                18, 18, 20, 20, 22, 22, 24, 24,
+                                                26, 26, 28, 28, 30, 30, 0,  0 };
+static const vpx_tree_index cat1_high12[2] = { 0, 0 };
+static const vpx_tree_index cat2_high12[4] = { 2, 2, 0, 0 };
+static const vpx_tree_index cat3_high12[6] = { 2, 2, 4, 4, 0, 0 };
+static const vpx_tree_index cat4_high12[8] = { 2, 2, 4, 4, 6, 6, 0, 0 };
+static const vpx_tree_index cat5_high12[10] = { 2, 2, 4, 4, 6, 6, 8, 8, 0, 0 };
+static const vpx_tree_index cat6_high12[36] = {
+  2,  2,  4,  4,  6,  6,  8,  8,  10, 10, 12, 12, 14, 14, 16, 16, 18, 18,
+  20, 20, 22, 22, 24, 24, 26, 26, 28, 28, 30, 30, 32, 32, 34, 34, 0,  0
+};
+#endif
+
+const vp10_extra_bit vp10_extra_bits[ENTROPY_TOKENS] = {
+  { 0, 0, 0, 0, zero_cost },                             // ZERO_TOKEN
+  { 0, 0, 0, 1, sign_cost },                             // ONE_TOKEN
+  { 0, 0, 0, 2, sign_cost },                             // TWO_TOKEN
+  { 0, 0, 0, 3, sign_cost },                             // THREE_TOKEN
+  { 0, 0, 0, 4, sign_cost },                             // FOUR_TOKEN
+  { cat1, vp10_cat1_prob, 1, CAT1_MIN_VAL, cat1_cost },  // CATEGORY1_TOKEN
+  { cat2, vp10_cat2_prob, 2, CAT2_MIN_VAL, cat2_cost },  // CATEGORY2_TOKEN
+  { cat3, vp10_cat3_prob, 3, CAT3_MIN_VAL, cat3_cost },  // CATEGORY3_TOKEN
+  { cat4, vp10_cat4_prob, 4, CAT4_MIN_VAL, cat4_cost },  // CATEGORY4_TOKEN
+  { cat5, vp10_cat5_prob, 5, CAT5_MIN_VAL, cat5_cost },  // CATEGORY5_TOKEN
+  { cat6, vp10_cat6_prob, 14, CAT6_MIN_VAL, 0 },         // CATEGORY6_TOKEN
+  { 0, 0, 0, 0, zero_cost }                              // EOB_TOKEN
+};
+
+#if CONFIG_VP9_HIGHBITDEPTH
+const vp10_extra_bit vp10_extra_bits_high10[ENTROPY_TOKENS] = {
+  { 0, 0, 0, 0, zero_cost },                                           // ZERO
+  { 0, 0, 0, 1, sign_cost },                                           // ONE
+  { 0, 0, 0, 2, sign_cost },                                           // TWO
+  { 0, 0, 0, 3, sign_cost },                                           // THREE
+  { 0, 0, 0, 4, sign_cost },                                           // FOUR
+  { cat1_high10, vp10_cat1_prob_high10, 1, CAT1_MIN_VAL, cat1_cost },  // CAT1
+  { cat2_high10, vp10_cat2_prob_high10, 2, CAT2_MIN_VAL, cat2_cost },  // CAT2
+  { cat3_high10, vp10_cat3_prob_high10, 3, CAT3_MIN_VAL, cat3_cost },  // CAT3
+  { cat4_high10, vp10_cat4_prob_high10, 4, CAT4_MIN_VAL, cat4_cost },  // CAT4
+  { cat5_high10, vp10_cat5_prob_high10, 5, CAT5_MIN_VAL, cat5_cost },  // CAT5
+  { cat6_high10, vp10_cat6_prob_high10, 16, CAT6_MIN_VAL, 0 },         // CAT6
+  { 0, 0, 0, 0, zero_cost }                                            // EOB
+};
+const vp10_extra_bit vp10_extra_bits_high12[ENTROPY_TOKENS] = {
+  { 0, 0, 0, 0, zero_cost },                                           // ZERO
+  { 0, 0, 0, 1, sign_cost },                                           // ONE
+  { 0, 0, 0, 2, sign_cost },                                           // TWO
+  { 0, 0, 0, 3, sign_cost },                                           // THREE
+  { 0, 0, 0, 4, sign_cost },                                           // FOUR
+  { cat1_high12, vp10_cat1_prob_high12, 1, CAT1_MIN_VAL, cat1_cost },  // CAT1
+  { cat2_high12, vp10_cat2_prob_high12, 2, CAT2_MIN_VAL, cat2_cost },  // CAT2
+  { cat3_high12, vp10_cat3_prob_high12, 3, CAT3_MIN_VAL, cat3_cost },  // CAT3
+  { cat4_high12, vp10_cat4_prob_high12, 4, CAT4_MIN_VAL, cat4_cost },  // CAT4
+  { cat5_high12, vp10_cat5_prob_high12, 5, CAT5_MIN_VAL, cat5_cost },  // CAT5
+  { cat6_high12, vp10_cat6_prob_high12, 18, CAT6_MIN_VAL, 0 },         // CAT6
+  { 0, 0, 0, 0, zero_cost }                                            // EOB
+};
+#endif
+
+#if !CONFIG_ANS
+const struct vp10_token vp10_coef_encodings[ENTROPY_TOKENS] = {
+  { 2, 2 },  { 6, 3 },   { 28, 5 },  { 58, 6 },  { 59, 6 },  { 60, 6 },
+  { 61, 6 }, { 124, 7 }, { 125, 7 }, { 126, 7 }, { 127, 7 }, { 0, 1 }
+};
+#endif  // !CONFIG_ANS
+
+struct tokenize_b_args {
+  VP10_COMP *cpi;
+  ThreadData *td;
+  TOKENEXTRA **tp;
+};
+
+static void set_entropy_context_b(int plane, int block, int blk_row,
+                                  int blk_col, BLOCK_SIZE plane_bsize,
+                                  TX_SIZE tx_size, void *arg) {
+  struct tokenize_b_args *const args = arg;
+  ThreadData *const td = args->td;
+  MACROBLOCK *const x = &td->mb;
+  MACROBLOCKD *const xd = &x->e_mbd;
+  struct macroblock_plane *p = &x->plane[plane];
+  struct macroblockd_plane *pd = &xd->plane[plane];
+  vp10_set_contexts(xd, pd, plane_bsize, tx_size, p->eobs[block] > 0, blk_col,
+                    blk_row);
+}
+
+static INLINE void add_token(TOKENEXTRA **t, const vpx_prob *context_tree,
+#if CONFIG_ANS
+                             const rans_dec_lut *token_cdf,
+#endif  // CONFIG_ANS
+                             int32_t extra, uint8_t token,
+                             uint8_t skip_eob_node, unsigned int *counts) {
+  (*t)->token = token;
+  (*t)->extra = extra;
+  (*t)->context_tree = context_tree;
+#if CONFIG_ANS
+  (*t)->token_cdf = token_cdf;
+#endif  // CONFIG_ANS
+  (*t)->skip_eob_node = skip_eob_node;
+  (*t)++;
+  ++counts[token];
+}
+
+static INLINE void add_token_no_extra(TOKENEXTRA **t,
+                                      const vpx_prob *context_tree,
+                                      uint8_t token, uint8_t skip_eob_node,
+                                      unsigned int *counts) {
+  (*t)->token = token;
+  (*t)->context_tree = context_tree;
+  (*t)->skip_eob_node = skip_eob_node;
+  (*t)++;
+  ++counts[token];
+}
+
+static INLINE int get_tx_eob(const struct segmentation *seg, int segment_id,
+                             TX_SIZE tx_size) {
+  const int eob_max = num_4x4_blocks_txsize_lookup[tx_size] << 4;
+  return segfeature_active(seg, segment_id, SEG_LVL_SKIP) ? 0 : eob_max;
+}
+
+void vp10_tokenize_palette_sb(struct ThreadData *const td, BLOCK_SIZE bsize,
+                              int plane, TOKENEXTRA **t) {
+  MACROBLOCK *const x = &td->mb;
+  MACROBLOCKD *const xd = &x->e_mbd;
+  MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
+  uint8_t *color_map = xd->plane[plane != 0].color_index_map;
+  PALETTE_MODE_INFO *pmi = &mbmi->palette_mode_info;
+  int n = pmi->palette_size[plane != 0];
+  int i, j, k;
+  int color_new_idx = -1, color_ctx, color_order[PALETTE_MAX_SIZE];
+  const int rows = (4 * num_4x4_blocks_high_lookup[bsize]) >>
+                   (xd->plane[plane != 0].subsampling_y);
+  const int cols = (4 * num_4x4_blocks_wide_lookup[bsize]) >>
+                   (xd->plane[plane != 0].subsampling_x);
+  const vpx_prob (*const probs)[PALETTE_COLOR_CONTEXTS][PALETTE_COLORS - 1] =
+      plane == 0 ? vp10_default_palette_y_color_prob
+                 : vp10_default_palette_uv_color_prob;
+
+  for (i = 0; i < rows; ++i) {
+    for (j = (i == 0 ? 1 : 0); j < cols; ++j) {
+      color_ctx =
+          vp10_get_palette_color_context(color_map, cols, i, j, n, color_order);
+      for (k = 0; k < n; ++k)
+        if (color_map[i * cols + j] == color_order[k]) {
+          color_new_idx = k;
+          break;
+        }
+      assert(color_new_idx >= 0 && color_new_idx < n);
+      (*t)->token = color_new_idx;
+      (*t)->context_tree = probs[n - 2][color_ctx];
+      (*t)->skip_eob_node = 0;
+      ++(*t);
+    }
+  }
+}
+
+static void tokenize_b(int plane, int block, int blk_row, int blk_col,
+                       BLOCK_SIZE plane_bsize, TX_SIZE tx_size, void *arg) {
+  struct tokenize_b_args *const args = arg;
+  VP10_COMP *cpi = args->cpi;
+  ThreadData *const td = args->td;
+  MACROBLOCK *const x = &td->mb;
+  MACROBLOCKD *const xd = &x->e_mbd;
+  TOKENEXTRA **tp = args->tp;
+  uint8_t token_cache[MAX_TX_SQUARE];
+  struct macroblock_plane *p = &x->plane[plane];
+  struct macroblockd_plane *pd = &xd->plane[plane];
+  MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
+  int pt; /* near block/prev token context index */
+  int c;
+  TOKENEXTRA *t = *tp; /* store tokens starting here */
+  int eob = p->eobs[block];
+  const PLANE_TYPE type = pd->plane_type;
+  const tran_low_t *qcoeff = BLOCK_OFFSET(p->qcoeff, block);
+#if CONFIG_SUPERTX
+  const int segment_id = VPXMIN(mbmi->segment_id, mbmi->segment_id_supertx);
+#else
+  const int segment_id = mbmi->segment_id;
+#endif  // CONFIG_SUEPRTX
+  const int16_t *scan, *nb;
+  const TX_TYPE tx_type = get_tx_type(type, xd, block, tx_size);
+  const scan_order *const so = get_scan(tx_size, tx_type, is_inter_block(mbmi));
+  const int ref = is_inter_block(mbmi);
+  unsigned int (*const counts)[COEFF_CONTEXTS][ENTROPY_TOKENS] =
+      td->rd_counts.coef_counts[txsize_sqr_map[tx_size]][type][ref];
+#if CONFIG_ENTROPY
+  vpx_prob(*coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] =
+      cpi->subframe_stats.coef_probs_buf[cpi->common.coef_probs_update_idx]
+                                        [txsize_sqr_map[tx_size]][type][ref];
+#else
+  vpx_prob (*const coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] =
+      cpi->common.fc->coef_probs[txsize_sqr_map[tx_size]][type][ref];
+#endif  // CONFIG_ENTROPY
+#if CONFIG_ANS
+  rans_dec_lut (*const coef_cdfs)[COEFF_CONTEXTS] =
+      cpi->common.fc->coef_cdfs[txsize_sqr_map[tx_size]][type][ref];
+#endif  // CONFIG_ANS
+  unsigned int (*const eob_branch)[COEFF_CONTEXTS] =
+      td->counts->eob_branch[txsize_sqr_map[tx_size]][type][ref];
+  const uint8_t *const band = get_band_translate(tx_size);
+  const int seg_eob = get_tx_eob(&cpi->common.seg, segment_id, tx_size);
+  int skip_eob = 0;
+  int16_t token;
+  EXTRABIT extra;
+  pt = get_entropy_context(tx_size, pd->above_context + blk_col,
+                           pd->left_context + blk_row);
+  scan = so->scan;
+  nb = so->neighbors;
+  c = 0;
+
+  while (c < eob) {
+    const int v = qcoeff[scan[c]];
+    eob_branch[band[c]][pt] += !skip_eob;
+
+    vp10_get_token_extra(v, &token, &extra);
+
+    add_token(&t, coef_probs[band[c]][pt],
+#if CONFIG_ANS
+              (const rans_dec_lut *)&coef_cdfs[band[c]][pt],
+#endif  // CONFIG_ANS
+              extra, (uint8_t)token, (uint8_t)skip_eob, counts[band[c]][pt]);
+
+    token_cache[scan[c]] = vp10_pt_energy_class[token];
+    ++c;
+    pt = get_coef_context(nb, token_cache, c);
+    skip_eob = (token == ZERO_TOKEN);
+  }
+  if (c < seg_eob) {
+    add_token_no_extra(&t, coef_probs[band[c]][pt], EOB_TOKEN, 0,
+                       counts[band[c]][pt]);
+    ++eob_branch[band[c]][pt];
+  }
+
+  *tp = t;
+
+  vp10_set_contexts(xd, pd, plane_bsize, tx_size, c > 0, blk_col, blk_row);
+}
+
+struct is_skippable_args {
+  uint16_t *eobs;
+  int *skippable;
+};
+static void is_skippable(int plane, int block, int blk_row, int blk_col,
+                         BLOCK_SIZE plane_bsize, TX_SIZE tx_size, void *argv) {
+  struct is_skippable_args *args = argv;
+  (void)plane;
+  (void)plane_bsize;
+  (void)tx_size;
+  (void)blk_row;
+  (void)blk_col;
+  args->skippable[0] &= (!args->eobs[block]);
+}
+
+// TODO(yaowu): rewrite and optimize this function to remove the usage of
+//              vp10_foreach_transform_block() and simplify is_skippable().
+int vp10_is_skippable_in_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) {
+  int result = 1;
+  struct is_skippable_args args = { x->plane[plane].eobs, &result };
+  vp10_foreach_transformed_block_in_plane(&x->e_mbd, bsize, plane, is_skippable,
+                                          &args);
+  return result;
+}
+
+static void has_high_freq_coeff(int plane, int block, int blk_row, int blk_col,
+                                BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
+                                void *argv) {
+  struct is_skippable_args *args = argv;
+  int eobs = (tx_size == TX_4X4) ? 3 : 10;
+  (void)plane;
+  (void)plane_bsize;
+  (void)blk_row;
+  (void)blk_col;
+
+  *(args->skippable) |= (args->eobs[block] > eobs);
+}
+
+int vp10_has_high_freq_in_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) {
+  int result = 0;
+  struct is_skippable_args args = { x->plane[plane].eobs, &result };
+  vp10_foreach_transformed_block_in_plane(&x->e_mbd, bsize, plane,
+                                          has_high_freq_coeff, &args);
+  return result;
+}
+
+#if CONFIG_VAR_TX
+void tokenize_tx(ThreadData *td, TOKENEXTRA **t, int dry_run, TX_SIZE tx_size,
+                 BLOCK_SIZE plane_bsize, int blk_row, int blk_col, int block,
+                 int plane, void *arg) {
+  MACROBLOCK *const x = &td->mb;
+  MACROBLOCKD *const xd = &x->e_mbd;
+  MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
+  const struct macroblockd_plane *const pd = &xd->plane[plane];
+  const BLOCK_SIZE bsize = txsize_to_bsize[tx_size];
+  const int tx_row = blk_row >> (1 - pd->subsampling_y);
+  const int tx_col = blk_col >> (1 - pd->subsampling_x);
+  TX_SIZE plane_tx_size;
+
+  int max_blocks_high = num_4x4_blocks_high_lookup[plane_bsize];
+  int max_blocks_wide = num_4x4_blocks_wide_lookup[plane_bsize];
+
+  assert(tx_size < TX_SIZES);
+
+  if (xd->mb_to_bottom_edge < 0)
+    max_blocks_high += xd->mb_to_bottom_edge >> (5 + pd->subsampling_y);
+  if (xd->mb_to_right_edge < 0)
+    max_blocks_wide += xd->mb_to_right_edge >> (5 + pd->subsampling_x);
+
+  if (blk_row >= max_blocks_high || blk_col >= max_blocks_wide) return;
+
+  plane_tx_size = plane ? get_uv_tx_size_impl(
+                              mbmi->inter_tx_size[tx_row][tx_col], bsize, 0, 0)
+                        : mbmi->inter_tx_size[tx_row][tx_col];
+
+  if (tx_size == plane_tx_size) {
+    const struct macroblockd_plane *const pd = &xd->plane[plane];
+    BLOCK_SIZE plane_bsize = get_plane_block_size(mbmi->sb_type, pd);
+    if (!dry_run)
+      tokenize_b(plane, block, blk_row, blk_col, plane_bsize, tx_size, arg);
+    else
+      set_entropy_context_b(plane, block, blk_row, blk_col, plane_bsize,
+                            tx_size, arg);
+  } else {
+    int bsl = b_width_log2_lookup[bsize];
+    int i;
+
+    assert(bsl > 0);
+    --bsl;
+
+    for (i = 0; i < 4; ++i) {
+      const int offsetr = blk_row + ((i >> 1) << bsl);
+      const int offsetc = blk_col + ((i & 0x01) << bsl);
+      int step = num_4x4_blocks_txsize_lookup[tx_size - 1];
+
+      if (offsetr >= max_blocks_high || offsetc >= max_blocks_wide) continue;
+
+      tokenize_tx(td, t, dry_run, tx_size - 1, plane_bsize, offsetr, offsetc,
+                  block + i * step, plane, arg);
+    }
+  }
+}
+
+void vp10_tokenize_sb_inter(VP10_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
+                            int dry_run, int mi_row, int mi_col,
+                            BLOCK_SIZE bsize) {
+  VP10_COMMON *const cm = &cpi->common;
+  MACROBLOCK *const x = &td->mb;
+  MACROBLOCKD *const xd = &x->e_mbd;
+  MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
+  TOKENEXTRA *t_backup = *t;
+  const int ctx = vp10_get_skip_context(xd);
+  const int skip_inc =
+      !segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP);
+  struct tokenize_b_args arg = { cpi, td, t };
+  int plane;
+  if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
+
+  if (mbmi->skip) {
+    if (!dry_run) td->counts->skip[ctx][1] += skip_inc;
+    reset_skip_context(xd, bsize);
+    if (dry_run) *t = t_backup;
+    return;
+  }
+
+  if (!dry_run)
+    td->counts->skip[ctx][0] += skip_inc;
+  else
+    *t = t_backup;
+
+  for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
+    const struct macroblockd_plane *const pd = &xd->plane[plane];
+    const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd);
+    const int mi_width = num_4x4_blocks_wide_lookup[plane_bsize];
+    const int mi_height = num_4x4_blocks_high_lookup[plane_bsize];
+    const TX_SIZE max_tx_size = max_txsize_lookup[plane_bsize];
+    const BLOCK_SIZE txb_size = txsize_to_bsize[max_tx_size];
+    int bh = num_4x4_blocks_wide_lookup[txb_size];
+    int idx, idy;
+    int block = 0;
+    int step = num_4x4_blocks_txsize_lookup[max_tx_size];
+    for (idy = 0; idy < mi_height; idy += bh) {
+      for (idx = 0; idx < mi_width; idx += bh) {
+        tokenize_tx(td, t, dry_run, max_tx_size, plane_bsize, idy, idx, block,
+                    plane, &arg);
+        block += step;
+      }
+    }
+
+    if (!dry_run) {
+      (*t)->token = EOSB_TOKEN;
+      (*t)++;
+    }
+  }
+}
+#endif  // CONFIG_VAR_TX
+
+void vp10_tokenize_sb(VP10_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
+                      int dry_run, BLOCK_SIZE bsize) {
+  VP10_COMMON *const cm = &cpi->common;
+  MACROBLOCK *const x = &td->mb;
+  MACROBLOCKD *const xd = &x->e_mbd;
+  MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
+  const int ctx = vp10_get_skip_context(xd);
+  const int skip_inc =
+      !segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP);
+  struct tokenize_b_args arg = { cpi, td, t };
+  if (mbmi->skip) {
+    if (!dry_run) td->counts->skip[ctx][1] += skip_inc;
+    reset_skip_context(xd, bsize);
+    return;
+  }
+
+  if (!dry_run) {
+    int plane;
+
+    td->counts->skip[ctx][0] += skip_inc;
+
+    for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
+      vp10_foreach_transformed_block_in_plane(xd, bsize, plane, tokenize_b,
+                                              &arg);
+      (*t)->token = EOSB_TOKEN;
+      (*t)++;
+    }
+  } else {
+    vp10_foreach_transformed_block(xd, bsize, set_entropy_context_b, &arg);
+  }
+}
+
+#if CONFIG_SUPERTX
+void vp10_tokenize_sb_supertx(VP10_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
+                              int dry_run, BLOCK_SIZE bsize) {
+  VP10_COMMON *const cm = &cpi->common;
+  MACROBLOCKD *const xd = &td->mb.e_mbd;
+  MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
+  TOKENEXTRA *t_backup = *t;
+  const int ctx = vp10_get_skip_context(xd);
+  const int skip_inc =
+      !segfeature_active(&cm->seg, mbmi->segment_id_supertx, SEG_LVL_SKIP);
+  struct tokenize_b_args arg = { cpi, td, t };
+  if (mbmi->skip) {
+    if (!dry_run) td->counts->skip[ctx][1] += skip_inc;
+    reset_skip_context(xd, bsize);
+    if (dry_run) *t = t_backup;
+    return;
+  }
+
+  if (!dry_run) {
+    int plane;
+    td->counts->skip[ctx][0] += skip_inc;
+
+    for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
+      vp10_foreach_transformed_block_in_plane(xd, bsize, plane, tokenize_b,
+                                              &arg);
+      (*t)->token = EOSB_TOKEN;
+      (*t)++;
+    }
+  } else {
+    vp10_foreach_transformed_block(xd, bsize, set_entropy_context_b, &arg);
+    *t = t_backup;
+  }
+}
+#endif  // CONFIG_SUPERTX
diff --git a/av1/encoder/tokenize.h b/av1/encoder/tokenize.h
new file mode 100644
index 0000000..7ae8676
--- /dev/null
+++ b/av1/encoder/tokenize.h
@@ -0,0 +1,141 @@
+/*
+ *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP10_ENCODER_TOKENIZE_H_
+#define VP10_ENCODER_TOKENIZE_H_
+
+#include "av1/common/entropy.h"
+
+#include "av1/encoder/block.h"
+#include "av1/encoder/treewriter.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define EOSB_TOKEN 127  // Not signalled, encoder only
+
+#if CONFIG_VP9_HIGHBITDEPTH
+typedef int32_t EXTRABIT;
+#else
+typedef int16_t EXTRABIT;
+#endif
+
+typedef struct {
+  int16_t token;
+  EXTRABIT extra;
+} TOKENVALUE;
+
+typedef struct {
+  const vpx_prob *context_tree;
+#if CONFIG_ANS
+  const rans_dec_lut *token_cdf;
+#endif  // CONFIG_ANS
+  EXTRABIT extra;
+  uint8_t token;
+  uint8_t skip_eob_node;
+} TOKENEXTRA;
+
+extern const vpx_tree_index vp10_coef_tree[];
+extern const vpx_tree_index vp10_coef_con_tree[];
+#if !CONFIG_ANS
+extern const struct vp10_token vp10_coef_encodings[];
+#endif  // !CONFIG_ANS
+
+int vp10_is_skippable_in_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane);
+int vp10_has_high_freq_in_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane);
+
+struct VP10_COMP;
+struct ThreadData;
+
+#if CONFIG_VAR_TX
+void vp10_tokenize_sb_inter(struct VP10_COMP *cpi, struct ThreadData *td,
+                            TOKENEXTRA **t, int dry_run, int mi_row, int mi_col,
+                            BLOCK_SIZE bsize);
+#endif
+
+void vp10_tokenize_palette_sb(struct ThreadData *const td, BLOCK_SIZE bsize,
+                              int plane, TOKENEXTRA **t);
+void vp10_tokenize_sb(struct VP10_COMP *cpi, struct ThreadData *td,
+                      TOKENEXTRA **t, int dry_run, BLOCK_SIZE bsize);
+#if CONFIG_SUPERTX
+void vp10_tokenize_sb_supertx(struct VP10_COMP *cpi, struct ThreadData *td,
+                              TOKENEXTRA **t, int dry_run, BLOCK_SIZE bsize);
+#endif
+
+extern const int16_t *vp10_dct_value_cost_ptr;
+/* TODO: The Token field should be broken out into a separate char array to
+ *  improve cache locality, since it's needed for costing when the rest of the
+ *  fields are not.
+ */
+extern const TOKENVALUE *vp10_dct_value_tokens_ptr;
+extern const TOKENVALUE *vp10_dct_cat_lt_10_value_tokens;
+extern const int *vp10_dct_cat_lt_10_value_cost;
+extern const int16_t vp10_cat6_low_cost[256];
+extern const int vp10_cat6_high_cost[64];
+extern const int vp10_cat6_high10_high_cost[256];
+extern const int vp10_cat6_high12_high_cost[1024];
+static INLINE int vp10_get_cost(int16_t token, EXTRABIT extrabits,
+                                const int *cat6_high_table) {
+  if (token != CATEGORY6_TOKEN)
+    return vp10_extra_bits[token].cost[extrabits >> 1];
+  return vp10_cat6_low_cost[(extrabits >> 1) & 0xff] +
+         cat6_high_table[extrabits >> 9];
+}
+
+#if CONFIG_VP9_HIGHBITDEPTH
+static INLINE const int *vp10_get_high_cost_table(int bit_depth) {
+  return bit_depth == 8 ? vp10_cat6_high_cost
+                        : (bit_depth == 10 ? vp10_cat6_high10_high_cost
+                                           : vp10_cat6_high12_high_cost);
+}
+#else
+static INLINE const int *vp10_get_high_cost_table(int bit_depth) {
+  (void)bit_depth;
+  return vp10_cat6_high_cost;
+}
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+static INLINE void vp10_get_token_extra(int v, int16_t *token,
+                                        EXTRABIT *extra) {
+  if (v >= CAT6_MIN_VAL || v <= -CAT6_MIN_VAL) {
+    *token = CATEGORY6_TOKEN;
+    if (v >= CAT6_MIN_VAL)
+      *extra = 2 * v - 2 * CAT6_MIN_VAL;
+    else
+      *extra = -2 * v - 2 * CAT6_MIN_VAL + 1;
+    return;
+  }
+  *token = vp10_dct_cat_lt_10_value_tokens[v].token;
+  *extra = vp10_dct_cat_lt_10_value_tokens[v].extra;
+}
+static INLINE int16_t vp10_get_token(int v) {
+  if (v >= CAT6_MIN_VAL || v <= -CAT6_MIN_VAL) return 10;
+  return vp10_dct_cat_lt_10_value_tokens[v].token;
+}
+
+static INLINE int vp10_get_token_cost(int v, int16_t *token,
+                                      const int *cat6_high_table) {
+  if (v >= CAT6_MIN_VAL || v <= -CAT6_MIN_VAL) {
+    EXTRABIT extrabits;
+    *token = CATEGORY6_TOKEN;
+    extrabits = abs(v) - CAT6_MIN_VAL;
+    return vp10_cat6_low_cost[extrabits & 0xff] +
+           cat6_high_table[extrabits >> 8];
+  }
+  *token = vp10_dct_cat_lt_10_value_tokens[v].token;
+  return vp10_dct_cat_lt_10_value_cost[v];
+}
+
+#ifdef __cplusplus
+}  // extern "C"
+#endif
+
+#endif  // VP10_ENCODER_TOKENIZE_H_
diff --git a/av1/encoder/treewriter.c b/av1/encoder/treewriter.c
new file mode 100644
index 0000000..d3fcd45
--- /dev/null
+++ b/av1/encoder/treewriter.c
@@ -0,0 +1,58 @@
+/*
+ *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "av1/encoder/treewriter.h"
+
+static void tree2tok(struct vp10_token *tokens, const vpx_tree_index *tree,
+                     int i, int v, int l) {
+  v += v;
+  ++l;
+
+  do {
+    const vpx_tree_index j = tree[i++];
+    if (j <= 0) {
+      tokens[-j].value = v;
+      tokens[-j].len = l;
+    } else {
+      tree2tok(tokens, tree, j, v, l);
+    }
+  } while (++v & 1);
+}
+
+void vp10_tokens_from_tree(struct vp10_token *tokens,
+                           const vpx_tree_index *tree) {
+  tree2tok(tokens, tree, 0, 0, 0);
+}
+
+static unsigned int convert_distribution(unsigned int i, vpx_tree tree,
+                                         unsigned int branch_ct[][2],
+                                         const unsigned int num_events[]) {
+  unsigned int left, right;
+
+  if (tree[i] <= 0)
+    left = num_events[-tree[i]];
+  else
+    left = convert_distribution(tree[i], tree, branch_ct, num_events);
+
+  if (tree[i + 1] <= 0)
+    right = num_events[-tree[i + 1]];
+  else
+    right = convert_distribution(tree[i + 1], tree, branch_ct, num_events);
+
+  branch_ct[i >> 1][0] = left;
+  branch_ct[i >> 1][1] = right;
+  return left + right;
+}
+
+void vp10_tree_probs_from_distribution(vpx_tree tree,
+                                       unsigned int branch_ct[/* n-1 */][2],
+                                       const unsigned int num_events[/* n */]) {
+  convert_distribution(0, tree, branch_ct, num_events);
+}
diff --git a/av1/encoder/treewriter.h b/av1/encoder/treewriter.h
new file mode 100644
index 0000000..43c615f
--- /dev/null
+++ b/av1/encoder/treewriter.h
@@ -0,0 +1,61 @@
+/*
+ *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP10_ENCODER_TREEWRITER_H_
+#define VP10_ENCODER_TREEWRITER_H_
+
+#ifdef VP10_FORCE_VPXBOOL_TREEWRITER
+#include "aom_dsp/bitwriter.h"
+#define tree_writer vpx_writer
+#define tree_bit_write vpx_write
+#else
+#include "av1/encoder/bitwriter.h"
+#define tree_writer vp10_writer
+#define tree_bit_write vp10_write
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void vp10_tree_probs_from_distribution(vpx_tree tree,
+                                       unsigned int branch_ct[/* n - 1 */][2],
+                                       const unsigned int num_events[/* n */]);
+
+struct vp10_token {
+  int value;
+  int len;
+};
+
+void vp10_tokens_from_tree(struct vp10_token *, const vpx_tree_index *);
+
+static INLINE void vp10_write_tree(tree_writer *w, const vpx_tree_index *tree,
+                                   const vpx_prob *probs, int bits, int len,
+                                   vpx_tree_index i) {
+  do {
+    const int bit = (bits >> --len) & 1;
+    tree_bit_write(w, bit, probs[i >> 1]);
+    i = tree[i + bit];
+  } while (len);
+}
+
+static INLINE void vp10_write_token(tree_writer *w, const vpx_tree_index *tree,
+                                    const vpx_prob *probs,
+                                    const struct vp10_token *token) {
+  vp10_write_tree(w, tree, probs, token->value, token->len, 0);
+}
+
+#undef tree_writer
+#undef tree_bit_write
+#ifdef __cplusplus
+}  // extern "C"
+#endif
+
+#endif  // VP10_ENCODER_TREEWRITER_H_
diff --git a/av1/encoder/variance_tree.c b/av1/encoder/variance_tree.c
new file mode 100644
index 0000000..219d39a
--- /dev/null
+++ b/av1/encoder/variance_tree.c
@@ -0,0 +1,60 @@
+/*
+ *  Copyright (c) 2016 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "av1/encoder/variance_tree.h"
+#include "av1/encoder/encoder.h"
+
+void vp10_setup_var_tree(struct VP10Common *cm, ThreadData *td) {
+  int i, j;
+#if CONFIG_EXT_PARTITION
+  const int leaf_nodes = 1024;
+  const int tree_nodes = 1024 + 256 + 64 + 16 + 4 + 1;
+#else
+  const int leaf_nodes = 256;
+  const int tree_nodes = 256 + 64 + 16 + 4 + 1;
+#endif  // CONFIG_EXT_PARTITION
+  int index = 0;
+  VAR_TREE *this_var;
+  int nodes;
+
+  vpx_free(td->var_tree);
+  CHECK_MEM_ERROR(cm, td->var_tree,
+                  vpx_calloc(tree_nodes, sizeof(*td->var_tree)));
+
+  this_var = &td->var_tree[0];
+
+  // Sets up all the leaf nodes in the tree.
+  for (index = 0; index < leaf_nodes; ++index) {
+    VAR_TREE *const leaf = &td->var_tree[index];
+    leaf->split[0] = NULL;
+  }
+
+  // Each node has 4 leaf nodes, fill in the child pointers
+  // from leafs to the root.
+  for (nodes = leaf_nodes >> 2; nodes > 0; nodes >>= 2) {
+    for (i = 0; i < nodes; ++i, ++index) {
+      VAR_TREE *const node = &td->var_tree[index];
+      for (j = 0; j < 4; j++) node->split[j] = this_var++;
+    }
+  }
+
+  // Set up the root node for the largest superblock size
+  i = MAX_MIB_SIZE_LOG2 - MIN_MIB_SIZE_LOG2;
+  td->var_root[i] = &td->var_tree[tree_nodes - 1];
+  // Set up the root nodes for the rest of the possible superblock sizes
+  while (--i >= 0) {
+    td->var_root[i] = td->var_root[i + 1]->split[0];
+  }
+}
+
+void vp10_free_var_tree(ThreadData *td) {
+  vpx_free(td->var_tree);
+  td->var_tree = NULL;
+}
diff --git a/av1/encoder/variance_tree.h b/av1/encoder/variance_tree.h
new file mode 100644
index 0000000..08c40d3
--- /dev/null
+++ b/av1/encoder/variance_tree.h
@@ -0,0 +1,95 @@
+/*
+ *  Copyright (c) 2016 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP10_ENCODER_VARIANCE_TREE_H_
+#define VP10_ENCODER_VARIANCE_TREE_H_
+
+#include <assert.h>
+
+#include "./vpx_config.h"
+
+#include "aom/vpx_integer.h"
+
+#include "av1/common/enums.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct VP10Common;
+struct ThreadData;
+
+typedef struct {
+  int64_t sum_square_error;
+  int64_t sum_error;
+  int log2_count;
+  int variance;
+} var;
+
+typedef struct {
+  var none;
+  var horz[2];
+  var vert[2];
+} partition_variance;
+
+typedef struct VAR_TREE {
+  int force_split;
+  partition_variance variances;
+  struct VAR_TREE *split[4];
+  BLOCK_SIZE bsize;
+  const uint8_t *src;
+  const uint8_t *ref;
+  int src_stride;
+  int ref_stride;
+  int width;
+  int height;
+#if CONFIG_VP9_HIGHBITDEPTH
+  int highbd;
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+} VAR_TREE;
+
+void vp10_setup_var_tree(struct VP10Common *cm, struct ThreadData *td);
+void vp10_free_var_tree(struct ThreadData *td);
+
+// Set variance values given sum square error, sum error, count.
+static INLINE void fill_variance(int64_t s2, int64_t s, int c, var *v) {
+  v->sum_square_error = s2;
+  v->sum_error = s;
+  v->log2_count = c;
+  v->variance =
+      (int)(256 * (v->sum_square_error -
+                   ((v->sum_error * v->sum_error) >> v->log2_count)) >>
+            v->log2_count);
+}
+
+static INLINE void sum_2_variances(const var *a, const var *b, var *r) {
+  assert(a->log2_count == b->log2_count);
+  fill_variance(a->sum_square_error + b->sum_square_error,
+                a->sum_error + b->sum_error, a->log2_count + 1, r);
+}
+
+static INLINE void fill_variance_node(VAR_TREE *vt) {
+  sum_2_variances(&vt->split[0]->variances.none, &vt->split[1]->variances.none,
+                  &vt->variances.horz[0]);
+  sum_2_variances(&vt->split[2]->variances.none, &vt->split[3]->variances.none,
+                  &vt->variances.horz[1]);
+  sum_2_variances(&vt->split[0]->variances.none, &vt->split[2]->variances.none,
+                  &vt->variances.vert[0]);
+  sum_2_variances(&vt->split[1]->variances.none, &vt->split[3]->variances.none,
+                  &vt->variances.vert[1]);
+  sum_2_variances(&vt->variances.vert[0], &vt->variances.vert[1],
+                  &vt->variances.none);
+}
+
+#ifdef __cplusplus
+}  // extern "C"
+#endif
+
+#endif /* VP10_ENCODER_VARIANCE_TREE_H_ */
diff --git a/av1/encoder/wedge_utils.c b/av1/encoder/wedge_utils.c
new file mode 100644
index 0000000..548bc48
--- /dev/null
+++ b/av1/encoder/wedge_utils.c
@@ -0,0 +1,128 @@
+/*
+ *  Copyright (c) 2016 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+
+#include "aom/vpx_integer.h"
+
+#include "aom_ports/mem.h"
+
+#include "aom_dsp/vpx_dsp_common.h"
+
+#include "av1/common/reconinter.h"
+
+#define MAX_MASK_VALUE (1 << WEDGE_WEIGHT_BITS)
+
+/**
+ * Computes SSE of a compound predictor constructed from 2 fundamental
+ * predictors p0 and p1 using blending with mask.
+ *
+ * r1:  Residuals of p1.
+ *      (source - p1)
+ * d:   Difference of p1 and p0.
+ *      (p1 - p0)
+ * m:   The blending mask
+ * N:   Number of pixels
+ *
+ * 'r1', 'd', and 'm' are contiguous.
+ *
+ * Computes:
+ *  Sum((MAX_MASK_VALUE*r1 + mask*d)**2), which is equivalent to:
+ *  Sum((mask*r0 + (MAX_MASK_VALUE-mask)*r1)**2),
+ *    where r0 is (source - p0), and r1 is (source - p1), which is in turn
+ *    is equivalent to:
+ *  Sum((source*MAX_MASK_VALUE - (mask*p0 + (MAX_MASK_VALUE-mask)*p1))**2),
+ *    which is the SSE of the residuals of the compound predictor scaled up by
+ *    MAX_MASK_VALUE**2.
+ *
+ * Note that we clamp the partial term in the loop to 16 bits signed. This is
+ * to facilitate equivalent SIMD implementation. It should have no effect if
+ * residuals are within 16 - WEDGE_WEIGHT_BITS (=10) signed, which always
+ * holds for 8 bit input, and on real input, it should hold practically always,
+ * as residuals are expected to be small.
+ */
+uint64_t vp10_wedge_sse_from_residuals_c(const int16_t *r1, const int16_t *d,
+                                         const uint8_t *m, int N) {
+  uint64_t csse = 0;
+  int i;
+  assert(N % 64 == 0);
+  for (i = 0; i < N; i++) {
+    int32_t t = MAX_MASK_VALUE * r1[i] + m[i] * d[i];
+    t = clamp(t, INT16_MIN, INT16_MAX);
+    csse += t * t;
+  }
+  return ROUND_POWER_OF_TWO(csse, 2 * WEDGE_WEIGHT_BITS);
+}
+
+/**
+ * Choose the mask sign for a compound predictor.
+ *
+ * ds:    Difference of the squares of the residuals.
+ *        r0**2 - r1**2
+ * m:     The blending mask
+ * N:     Number of pixels
+ * limit: Pre-computed threshold value.
+ *        MAX_MASK_VALUE/2 * (sum(r0**2) - sum(r1**2))
+ *
+ * 'ds' and 'm' are contiguous.
+ *
+ * Returns true if the negated mask has lower SSE compared to the positive
+ * mask. Computation is based on:
+ *  Sum((mask*r0 + (MAX_MASK_VALUE-mask)*r1)**2)
+ *                                     >
+ *                                Sum(((MAX_MASK_VALUE-mask)*r0 + mask*r1)**2)
+ *
+ *  which can be simplified to:
+ *
+ *  Sum(mask*(r0**2 - r1**2)) > MAX_MASK_VALUE/2 * (sum(r0**2) - sum(r1**2))
+ *
+ *  The right hand side does not depend on the mask, and needs to be passed as
+ *  the 'limit' parameter.
+ *
+ *  After pre-computing (r0**2 - r1**2), which is passed in as 'ds', the left
+ *  hand side is simply a scalar product between an int16_t and uint8_t vector.
+ *
+ *  Note that for efficiency, ds is stored on 16 bits. Real input residuals
+ *  being small, this should not cause a noticeable issue.
+ */
+int vp10_wedge_sign_from_residuals_c(const int16_t *ds, const uint8_t *m, int N,
+                                     int64_t limit) {
+  int64_t acc = 0;
+
+  assert(N % 64 == 0);
+
+  do {
+    acc += *ds++ * *m++;
+  } while (--N);
+
+  return acc > limit;
+}
+
+/**
+ * Compute the element-wise difference of the squares of 2 arrays.
+ *
+ * d: Difference of the squares of the inputs: a**2 - b**2
+ * a: First input array
+ * b: Second input array
+ * N: Number of elements
+ *
+ * 'd', 'a', and 'b' are contiguous.
+ *
+ * The result is saturated to signed 16 bits.
+ */
+void vp10_wedge_compute_delta_squares_c(int16_t *d, const int16_t *a,
+                                        const int16_t *b, int N) {
+  int i;
+
+  assert(N % 64 == 0);
+
+  for (i = 0; i < N; i++)
+    d[i] = clamp(a[i] * a[i] - b[i] * b[i], INT16_MIN, INT16_MAX);
+}
diff --git a/av1/encoder/x86/dct_intrin_sse2.c b/av1/encoder/x86/dct_intrin_sse2.c
new file mode 100644
index 0000000..6fe3ada
--- /dev/null
+++ b/av1/encoder/x86/dct_intrin_sse2.c
@@ -0,0 +1,2583 @@
+/*
+ *  Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include <emmintrin.h>  // SSE2
+
+#include "./vp10_rtcd.h"
+#include "./vpx_dsp_rtcd.h"
+#include "aom_dsp/txfm_common.h"
+#include "aom_dsp/x86/fwd_txfm_sse2.h"
+#include "aom_dsp/x86/txfm_common_sse2.h"
+#include "aom_ports/mem.h"
+
+static INLINE void load_buffer_4x4(const int16_t *input, __m128i *in,
+                                   int stride, int flipud, int fliplr) {
+  const __m128i k__nonzero_bias_a = _mm_setr_epi16(0, 1, 1, 1, 1, 1, 1, 1);
+  const __m128i k__nonzero_bias_b = _mm_setr_epi16(1, 0, 0, 0, 0, 0, 0, 0);
+  __m128i mask;
+
+  if (!flipud) {
+    in[0] = _mm_loadl_epi64((const __m128i *)(input + 0 * stride));
+    in[1] = _mm_loadl_epi64((const __m128i *)(input + 1 * stride));
+    in[2] = _mm_loadl_epi64((const __m128i *)(input + 2 * stride));
+    in[3] = _mm_loadl_epi64((const __m128i *)(input + 3 * stride));
+  } else {
+    in[0] = _mm_loadl_epi64((const __m128i *)(input + 3 * stride));
+    in[1] = _mm_loadl_epi64((const __m128i *)(input + 2 * stride));
+    in[2] = _mm_loadl_epi64((const __m128i *)(input + 1 * stride));
+    in[3] = _mm_loadl_epi64((const __m128i *)(input + 0 * stride));
+  }
+
+  if (fliplr) {
+    in[0] = _mm_shufflelo_epi16(in[0], 0x1b);
+    in[1] = _mm_shufflelo_epi16(in[1], 0x1b);
+    in[2] = _mm_shufflelo_epi16(in[2], 0x1b);
+    in[3] = _mm_shufflelo_epi16(in[3], 0x1b);
+  }
+
+  in[0] = _mm_slli_epi16(in[0], 4);
+  in[1] = _mm_slli_epi16(in[1], 4);
+  in[2] = _mm_slli_epi16(in[2], 4);
+  in[3] = _mm_slli_epi16(in[3], 4);
+
+  mask = _mm_cmpeq_epi16(in[0], k__nonzero_bias_a);
+  in[0] = _mm_add_epi16(in[0], mask);
+  in[0] = _mm_add_epi16(in[0], k__nonzero_bias_b);
+}
+
+static INLINE void write_buffer_4x4(tran_low_t *output, __m128i *res) {
+  const __m128i kOne = _mm_set1_epi16(1);
+  __m128i in01 = _mm_unpacklo_epi64(res[0], res[1]);
+  __m128i in23 = _mm_unpacklo_epi64(res[2], res[3]);
+  __m128i out01 = _mm_add_epi16(in01, kOne);
+  __m128i out23 = _mm_add_epi16(in23, kOne);
+  out01 = _mm_srai_epi16(out01, 2);
+  out23 = _mm_srai_epi16(out23, 2);
+  store_output(&out01, (output + 0 * 8));
+  store_output(&out23, (output + 1 * 8));
+}
+
+static INLINE void transpose_4x4(__m128i *res) {
+  // Combine and transpose
+  // 00 01 02 03 20 21 22 23
+  // 10 11 12 13 30 31 32 33
+  const __m128i tr0_0 = _mm_unpacklo_epi16(res[0], res[1]);
+  const __m128i tr0_1 = _mm_unpackhi_epi16(res[0], res[1]);
+
+  // 00 10 01 11 02 12 03 13
+  // 20 30 21 31 22 32 23 33
+  res[0] = _mm_unpacklo_epi32(tr0_0, tr0_1);
+  res[2] = _mm_unpackhi_epi32(tr0_0, tr0_1);
+
+  // 00 10 20 30 01 11 21 31
+  // 02 12 22 32 03 13 23 33
+  // only use the first 4 16-bit integers
+  res[1] = _mm_unpackhi_epi64(res[0], res[0]);
+  res[3] = _mm_unpackhi_epi64(res[2], res[2]);
+}
+
+static void fdct4_sse2(__m128i *in) {
+  const __m128i k__cospi_p16_p16 = _mm_set1_epi16((int16_t)cospi_16_64);
+  const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
+  const __m128i k__cospi_p08_p24 = pair_set_epi16(cospi_8_64, cospi_24_64);
+  const __m128i k__cospi_p24_m08 = pair_set_epi16(cospi_24_64, -cospi_8_64);
+  const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
+
+  __m128i u[4], v[4];
+  u[0] = _mm_unpacklo_epi16(in[0], in[1]);
+  u[1] = _mm_unpacklo_epi16(in[3], in[2]);
+
+  v[0] = _mm_add_epi16(u[0], u[1]);
+  v[1] = _mm_sub_epi16(u[0], u[1]);
+
+  u[0] = _mm_madd_epi16(v[0], k__cospi_p16_p16);  // 0
+  u[1] = _mm_madd_epi16(v[0], k__cospi_p16_m16);  // 2
+  u[2] = _mm_madd_epi16(v[1], k__cospi_p08_p24);  // 1
+  u[3] = _mm_madd_epi16(v[1], k__cospi_p24_m08);  // 3
+
+  v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING);
+  v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING);
+  v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING);
+  v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING);
+  u[0] = _mm_srai_epi32(v[0], DCT_CONST_BITS);
+  u[1] = _mm_srai_epi32(v[1], DCT_CONST_BITS);
+  u[2] = _mm_srai_epi32(v[2], DCT_CONST_BITS);
+  u[3] = _mm_srai_epi32(v[3], DCT_CONST_BITS);
+
+  in[0] = _mm_packs_epi32(u[0], u[1]);
+  in[1] = _mm_packs_epi32(u[2], u[3]);
+  transpose_4x4(in);
+}
+
+static void fadst4_sse2(__m128i *in) {
+  const __m128i k__sinpi_p01_p02 = pair_set_epi16(sinpi_1_9, sinpi_2_9);
+  const __m128i k__sinpi_p04_m01 = pair_set_epi16(sinpi_4_9, -sinpi_1_9);
+  const __m128i k__sinpi_p03_p04 = pair_set_epi16(sinpi_3_9, sinpi_4_9);
+  const __m128i k__sinpi_m03_p02 = pair_set_epi16(-sinpi_3_9, sinpi_2_9);
+  const __m128i k__sinpi_p03_p03 = _mm_set1_epi16((int16_t)sinpi_3_9);
+  const __m128i kZero = _mm_set1_epi16(0);
+  const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
+  __m128i u[8], v[8];
+  __m128i in7 = _mm_add_epi16(in[0], in[1]);
+
+  u[0] = _mm_unpacklo_epi16(in[0], in[1]);
+  u[1] = _mm_unpacklo_epi16(in[2], in[3]);
+  u[2] = _mm_unpacklo_epi16(in7, kZero);
+  u[3] = _mm_unpacklo_epi16(in[2], kZero);
+  u[4] = _mm_unpacklo_epi16(in[3], kZero);
+
+  v[0] = _mm_madd_epi16(u[0], k__sinpi_p01_p02);  // s0 + s2
+  v[1] = _mm_madd_epi16(u[1], k__sinpi_p03_p04);  // s4 + s5
+  v[2] = _mm_madd_epi16(u[2], k__sinpi_p03_p03);  // x1
+  v[3] = _mm_madd_epi16(u[0], k__sinpi_p04_m01);  // s1 - s3
+  v[4] = _mm_madd_epi16(u[1], k__sinpi_m03_p02);  // -s4 + s6
+  v[5] = _mm_madd_epi16(u[3], k__sinpi_p03_p03);  // s4
+  v[6] = _mm_madd_epi16(u[4], k__sinpi_p03_p03);
+
+  u[0] = _mm_add_epi32(v[0], v[1]);
+  u[1] = _mm_sub_epi32(v[2], v[6]);
+  u[2] = _mm_add_epi32(v[3], v[4]);
+  u[3] = _mm_sub_epi32(u[2], u[0]);
+  u[4] = _mm_slli_epi32(v[5], 2);
+  u[5] = _mm_sub_epi32(u[4], v[5]);
+  u[6] = _mm_add_epi32(u[3], u[5]);
+
+  v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING);
+  v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING);
+  v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING);
+  v[3] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING);
+
+  u[0] = _mm_srai_epi32(v[0], DCT_CONST_BITS);
+  u[1] = _mm_srai_epi32(v[1], DCT_CONST_BITS);
+  u[2] = _mm_srai_epi32(v[2], DCT_CONST_BITS);
+  u[3] = _mm_srai_epi32(v[3], DCT_CONST_BITS);
+
+  in[0] = _mm_packs_epi32(u[0], u[2]);
+  in[1] = _mm_packs_epi32(u[1], u[3]);
+  transpose_4x4(in);
+}
+
+#if CONFIG_EXT_TX
+static void fidtx4_sse2(__m128i *in) {
+  const __m128i k__zero_epi16 = _mm_set1_epi16((int16_t)0);
+  const __m128i k__sqrt2_epi16 = _mm_set1_epi16((int16_t)Sqrt2);
+  const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
+
+  __m128i v0, v1, v2, v3;
+  __m128i u0, u1, u2, u3;
+
+  v0 = _mm_unpacklo_epi16(in[0], k__zero_epi16);
+  v1 = _mm_unpacklo_epi16(in[1], k__zero_epi16);
+  v2 = _mm_unpacklo_epi16(in[2], k__zero_epi16);
+  v3 = _mm_unpacklo_epi16(in[3], k__zero_epi16);
+
+  u0 = _mm_madd_epi16(v0, k__sqrt2_epi16);
+  u1 = _mm_madd_epi16(v1, k__sqrt2_epi16);
+  u2 = _mm_madd_epi16(v2, k__sqrt2_epi16);
+  u3 = _mm_madd_epi16(v3, k__sqrt2_epi16);
+
+  v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
+  v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING);
+  v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
+  v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING);
+
+  u0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
+  u1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
+  u2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
+  u3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
+
+  in[0] = _mm_packs_epi32(u0, u2);
+  in[1] = _mm_packs_epi32(u1, u3);
+  transpose_4x4(in);
+}
+#endif  // CONFIG_EXT_TX
+
+void vp10_fht4x4_sse2(const int16_t *input, tran_low_t *output, int stride,
+                      int tx_type) {
+  __m128i in[4];
+
+  switch (tx_type) {
+    case DCT_DCT: vpx_fdct4x4_sse2(input, output, stride); break;
+    case ADST_DCT:
+      load_buffer_4x4(input, in, stride, 0, 0);
+      fadst4_sse2(in);
+      fdct4_sse2(in);
+      write_buffer_4x4(output, in);
+      break;
+    case DCT_ADST:
+      load_buffer_4x4(input, in, stride, 0, 0);
+      fdct4_sse2(in);
+      fadst4_sse2(in);
+      write_buffer_4x4(output, in);
+      break;
+    case ADST_ADST:
+      load_buffer_4x4(input, in, stride, 0, 0);
+      fadst4_sse2(in);
+      fadst4_sse2(in);
+      write_buffer_4x4(output, in);
+      break;
+#if CONFIG_EXT_TX
+    case FLIPADST_DCT:
+      load_buffer_4x4(input, in, stride, 1, 0);
+      fadst4_sse2(in);
+      fdct4_sse2(in);
+      write_buffer_4x4(output, in);
+      break;
+    case DCT_FLIPADST:
+      load_buffer_4x4(input, in, stride, 0, 1);
+      fdct4_sse2(in);
+      fadst4_sse2(in);
+      write_buffer_4x4(output, in);
+      break;
+    case FLIPADST_FLIPADST:
+      load_buffer_4x4(input, in, stride, 1, 1);
+      fadst4_sse2(in);
+      fadst4_sse2(in);
+      write_buffer_4x4(output, in);
+      break;
+    case ADST_FLIPADST:
+      load_buffer_4x4(input, in, stride, 0, 1);
+      fadst4_sse2(in);
+      fadst4_sse2(in);
+      write_buffer_4x4(output, in);
+      break;
+    case FLIPADST_ADST:
+      load_buffer_4x4(input, in, stride, 1, 0);
+      fadst4_sse2(in);
+      fadst4_sse2(in);
+      write_buffer_4x4(output, in);
+      break;
+    case V_DCT:
+      load_buffer_4x4(input, in, stride, 0, 0);
+      fdct4_sse2(in);
+      fidtx4_sse2(in);
+      write_buffer_4x4(output, in);
+      break;
+    case H_DCT:
+      load_buffer_4x4(input, in, stride, 0, 0);
+      fidtx4_sse2(in);
+      fdct4_sse2(in);
+      write_buffer_4x4(output, in);
+      break;
+    case V_ADST:
+      load_buffer_4x4(input, in, stride, 0, 0);
+      fadst4_sse2(in);
+      fidtx4_sse2(in);
+      write_buffer_4x4(output, in);
+      break;
+    case H_ADST:
+      load_buffer_4x4(input, in, stride, 0, 0);
+      fidtx4_sse2(in);
+      fadst4_sse2(in);
+      write_buffer_4x4(output, in);
+      break;
+    case V_FLIPADST:
+      load_buffer_4x4(input, in, stride, 1, 0);
+      fadst4_sse2(in);
+      fidtx4_sse2(in);
+      write_buffer_4x4(output, in);
+      break;
+    case H_FLIPADST:
+      load_buffer_4x4(input, in, stride, 0, 1);
+      fidtx4_sse2(in);
+      fadst4_sse2(in);
+      write_buffer_4x4(output, in);
+      break;
+#endif  // CONFIG_EXT_TX
+    default: assert(0);
+  }
+}
+
+void vp10_fdct8x8_quant_sse2(
+    const int16_t *input, int stride, int16_t *coeff_ptr, intptr_t n_coeffs,
+    int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr,
+    const int16_t *quant_ptr, const int16_t *quant_shift_ptr,
+    int16_t *qcoeff_ptr, int16_t *dqcoeff_ptr, const int16_t *dequant_ptr,
+    uint16_t *eob_ptr, const int16_t *scan_ptr, const int16_t *iscan_ptr) {
+  __m128i zero;
+  int pass;
+  // Constants
+  //    When we use them, in one case, they are all the same. In all others
+  //    it's a pair of them that we need to repeat four times. This is done
+  //    by constructing the 32 bit constant corresponding to that pair.
+  const __m128i k__cospi_p16_p16 = _mm_set1_epi16((int16_t)cospi_16_64);
+  const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
+  const __m128i k__cospi_p24_p08 = pair_set_epi16(cospi_24_64, cospi_8_64);
+  const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64, cospi_24_64);
+  const __m128i k__cospi_p28_p04 = pair_set_epi16(cospi_28_64, cospi_4_64);
+  const __m128i k__cospi_m04_p28 = pair_set_epi16(-cospi_4_64, cospi_28_64);
+  const __m128i k__cospi_p12_p20 = pair_set_epi16(cospi_12_64, cospi_20_64);
+  const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64);
+  const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
+  // Load input
+  __m128i in0 = _mm_load_si128((const __m128i *)(input + 0 * stride));
+  __m128i in1 = _mm_load_si128((const __m128i *)(input + 1 * stride));
+  __m128i in2 = _mm_load_si128((const __m128i *)(input + 2 * stride));
+  __m128i in3 = _mm_load_si128((const __m128i *)(input + 3 * stride));
+  __m128i in4 = _mm_load_si128((const __m128i *)(input + 4 * stride));
+  __m128i in5 = _mm_load_si128((const __m128i *)(input + 5 * stride));
+  __m128i in6 = _mm_load_si128((const __m128i *)(input + 6 * stride));
+  __m128i in7 = _mm_load_si128((const __m128i *)(input + 7 * stride));
+  __m128i *in[8];
+  int index = 0;
+
+  (void)scan_ptr;
+  (void)zbin_ptr;
+  (void)quant_shift_ptr;
+  (void)coeff_ptr;
+
+  // Pre-condition input (shift by two)
+  in0 = _mm_slli_epi16(in0, 2);
+  in1 = _mm_slli_epi16(in1, 2);
+  in2 = _mm_slli_epi16(in2, 2);
+  in3 = _mm_slli_epi16(in3, 2);
+  in4 = _mm_slli_epi16(in4, 2);
+  in5 = _mm_slli_epi16(in5, 2);
+  in6 = _mm_slli_epi16(in6, 2);
+  in7 = _mm_slli_epi16(in7, 2);
+
+  in[0] = &in0;
+  in[1] = &in1;
+  in[2] = &in2;
+  in[3] = &in3;
+  in[4] = &in4;
+  in[5] = &in5;
+  in[6] = &in6;
+  in[7] = &in7;
+
+  // We do two passes, first the columns, then the rows. The results of the
+  // first pass are transposed so that the same column code can be reused. The
+  // results of the second pass are also transposed so that the rows (processed
+  // as columns) are put back in row positions.
+  for (pass = 0; pass < 2; pass++) {
+    // To store results of each pass before the transpose.
+    __m128i res0, res1, res2, res3, res4, res5, res6, res7;
+    // Add/subtract
+    const __m128i q0 = _mm_add_epi16(in0, in7);
+    const __m128i q1 = _mm_add_epi16(in1, in6);
+    const __m128i q2 = _mm_add_epi16(in2, in5);
+    const __m128i q3 = _mm_add_epi16(in3, in4);
+    const __m128i q4 = _mm_sub_epi16(in3, in4);
+    const __m128i q5 = _mm_sub_epi16(in2, in5);
+    const __m128i q6 = _mm_sub_epi16(in1, in6);
+    const __m128i q7 = _mm_sub_epi16(in0, in7);
+    // Work on first four results
+    {
+      // Add/subtract
+      const __m128i r0 = _mm_add_epi16(q0, q3);
+      const __m128i r1 = _mm_add_epi16(q1, q2);
+      const __m128i r2 = _mm_sub_epi16(q1, q2);
+      const __m128i r3 = _mm_sub_epi16(q0, q3);
+      // Interleave to do the multiply by constants which gets us into 32bits
+      const __m128i t0 = _mm_unpacklo_epi16(r0, r1);
+      const __m128i t1 = _mm_unpackhi_epi16(r0, r1);
+      const __m128i t2 = _mm_unpacklo_epi16(r2, r3);
+      const __m128i t3 = _mm_unpackhi_epi16(r2, r3);
+      const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p16_p16);
+      const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p16_p16);
+      const __m128i u2 = _mm_madd_epi16(t0, k__cospi_p16_m16);
+      const __m128i u3 = _mm_madd_epi16(t1, k__cospi_p16_m16);
+      const __m128i u4 = _mm_madd_epi16(t2, k__cospi_p24_p08);
+      const __m128i u5 = _mm_madd_epi16(t3, k__cospi_p24_p08);
+      const __m128i u6 = _mm_madd_epi16(t2, k__cospi_m08_p24);
+      const __m128i u7 = _mm_madd_epi16(t3, k__cospi_m08_p24);
+      // dct_const_round_shift
+      const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
+      const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING);
+      const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
+      const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING);
+      const __m128i v4 = _mm_add_epi32(u4, k__DCT_CONST_ROUNDING);
+      const __m128i v5 = _mm_add_epi32(u5, k__DCT_CONST_ROUNDING);
+      const __m128i v6 = _mm_add_epi32(u6, k__DCT_CONST_ROUNDING);
+      const __m128i v7 = _mm_add_epi32(u7, k__DCT_CONST_ROUNDING);
+      const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
+      const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
+      const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
+      const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
+      const __m128i w4 = _mm_srai_epi32(v4, DCT_CONST_BITS);
+      const __m128i w5 = _mm_srai_epi32(v5, DCT_CONST_BITS);
+      const __m128i w6 = _mm_srai_epi32(v6, DCT_CONST_BITS);
+      const __m128i w7 = _mm_srai_epi32(v7, DCT_CONST_BITS);
+      // Combine
+      res0 = _mm_packs_epi32(w0, w1);
+      res4 = _mm_packs_epi32(w2, w3);
+      res2 = _mm_packs_epi32(w4, w5);
+      res6 = _mm_packs_epi32(w6, w7);
+    }
+    // Work on next four results
+    {
+      // Interleave to do the multiply by constants which gets us into 32bits
+      const __m128i d0 = _mm_unpacklo_epi16(q6, q5);
+      const __m128i d1 = _mm_unpackhi_epi16(q6, q5);
+      const __m128i e0 = _mm_madd_epi16(d0, k__cospi_p16_m16);
+      const __m128i e1 = _mm_madd_epi16(d1, k__cospi_p16_m16);
+      const __m128i e2 = _mm_madd_epi16(d0, k__cospi_p16_p16);
+      const __m128i e3 = _mm_madd_epi16(d1, k__cospi_p16_p16);
+      // dct_const_round_shift
+      const __m128i f0 = _mm_add_epi32(e0, k__DCT_CONST_ROUNDING);
+      const __m128i f1 = _mm_add_epi32(e1, k__DCT_CONST_ROUNDING);
+      const __m128i f2 = _mm_add_epi32(e2, k__DCT_CONST_ROUNDING);
+      const __m128i f3 = _mm_add_epi32(e3, k__DCT_CONST_ROUNDING);
+      const __m128i s0 = _mm_srai_epi32(f0, DCT_CONST_BITS);
+      const __m128i s1 = _mm_srai_epi32(f1, DCT_CONST_BITS);
+      const __m128i s2 = _mm_srai_epi32(f2, DCT_CONST_BITS);
+      const __m128i s3 = _mm_srai_epi32(f3, DCT_CONST_BITS);
+      // Combine
+      const __m128i r0 = _mm_packs_epi32(s0, s1);
+      const __m128i r1 = _mm_packs_epi32(s2, s3);
+      // Add/subtract
+      const __m128i x0 = _mm_add_epi16(q4, r0);
+      const __m128i x1 = _mm_sub_epi16(q4, r0);
+      const __m128i x2 = _mm_sub_epi16(q7, r1);
+      const __m128i x3 = _mm_add_epi16(q7, r1);
+      // Interleave to do the multiply by constants which gets us into 32bits
+      const __m128i t0 = _mm_unpacklo_epi16(x0, x3);
+      const __m128i t1 = _mm_unpackhi_epi16(x0, x3);
+      const __m128i t2 = _mm_unpacklo_epi16(x1, x2);
+      const __m128i t3 = _mm_unpackhi_epi16(x1, x2);
+      const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p28_p04);
+      const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p28_p04);
+      const __m128i u2 = _mm_madd_epi16(t0, k__cospi_m04_p28);
+      const __m128i u3 = _mm_madd_epi16(t1, k__cospi_m04_p28);
+      const __m128i u4 = _mm_madd_epi16(t2, k__cospi_p12_p20);
+      const __m128i u5 = _mm_madd_epi16(t3, k__cospi_p12_p20);
+      const __m128i u6 = _mm_madd_epi16(t2, k__cospi_m20_p12);
+      const __m128i u7 = _mm_madd_epi16(t3, k__cospi_m20_p12);
+      // dct_const_round_shift
+      const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
+      const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING);
+      const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
+      const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING);
+      const __m128i v4 = _mm_add_epi32(u4, k__DCT_CONST_ROUNDING);
+      const __m128i v5 = _mm_add_epi32(u5, k__DCT_CONST_ROUNDING);
+      const __m128i v6 = _mm_add_epi32(u6, k__DCT_CONST_ROUNDING);
+      const __m128i v7 = _mm_add_epi32(u7, k__DCT_CONST_ROUNDING);
+      const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
+      const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
+      const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
+      const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
+      const __m128i w4 = _mm_srai_epi32(v4, DCT_CONST_BITS);
+      const __m128i w5 = _mm_srai_epi32(v5, DCT_CONST_BITS);
+      const __m128i w6 = _mm_srai_epi32(v6, DCT_CONST_BITS);
+      const __m128i w7 = _mm_srai_epi32(v7, DCT_CONST_BITS);
+      // Combine
+      res1 = _mm_packs_epi32(w0, w1);
+      res7 = _mm_packs_epi32(w2, w3);
+      res5 = _mm_packs_epi32(w4, w5);
+      res3 = _mm_packs_epi32(w6, w7);
+    }
+    // Transpose the 8x8.
+    {
+      // 00 01 02 03 04 05 06 07
+      // 10 11 12 13 14 15 16 17
+      // 20 21 22 23 24 25 26 27
+      // 30 31 32 33 34 35 36 37
+      // 40 41 42 43 44 45 46 47
+      // 50 51 52 53 54 55 56 57
+      // 60 61 62 63 64 65 66 67
+      // 70 71 72 73 74 75 76 77
+      const __m128i tr0_0 = _mm_unpacklo_epi16(res0, res1);
+      const __m128i tr0_1 = _mm_unpacklo_epi16(res2, res3);
+      const __m128i tr0_2 = _mm_unpackhi_epi16(res0, res1);
+      const __m128i tr0_3 = _mm_unpackhi_epi16(res2, res3);
+      const __m128i tr0_4 = _mm_unpacklo_epi16(res4, res5);
+      const __m128i tr0_5 = _mm_unpacklo_epi16(res6, res7);
+      const __m128i tr0_6 = _mm_unpackhi_epi16(res4, res5);
+      const __m128i tr0_7 = _mm_unpackhi_epi16(res6, res7);
+      // 00 10 01 11 02 12 03 13
+      // 20 30 21 31 22 32 23 33
+      // 04 14 05 15 06 16 07 17
+      // 24 34 25 35 26 36 27 37
+      // 40 50 41 51 42 52 43 53
+      // 60 70 61 71 62 72 63 73
+      // 54 54 55 55 56 56 57 57
+      // 64 74 65 75 66 76 67 77
+      const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1);
+      const __m128i tr1_1 = _mm_unpacklo_epi32(tr0_2, tr0_3);
+      const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1);
+      const __m128i tr1_3 = _mm_unpackhi_epi32(tr0_2, tr0_3);
+      const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5);
+      const __m128i tr1_5 = _mm_unpacklo_epi32(tr0_6, tr0_7);
+      const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5);
+      const __m128i tr1_7 = _mm_unpackhi_epi32(tr0_6, tr0_7);
+      // 00 10 20 30 01 11 21 31
+      // 40 50 60 70 41 51 61 71
+      // 02 12 22 32 03 13 23 33
+      // 42 52 62 72 43 53 63 73
+      // 04 14 24 34 05 15 21 36
+      // 44 54 64 74 45 55 61 76
+      // 06 16 26 36 07 17 27 37
+      // 46 56 66 76 47 57 67 77
+      in0 = _mm_unpacklo_epi64(tr1_0, tr1_4);
+      in1 = _mm_unpackhi_epi64(tr1_0, tr1_4);
+      in2 = _mm_unpacklo_epi64(tr1_2, tr1_6);
+      in3 = _mm_unpackhi_epi64(tr1_2, tr1_6);
+      in4 = _mm_unpacklo_epi64(tr1_1, tr1_5);
+      in5 = _mm_unpackhi_epi64(tr1_1, tr1_5);
+      in6 = _mm_unpacklo_epi64(tr1_3, tr1_7);
+      in7 = _mm_unpackhi_epi64(tr1_3, tr1_7);
+      // 00 10 20 30 40 50 60 70
+      // 01 11 21 31 41 51 61 71
+      // 02 12 22 32 42 52 62 72
+      // 03 13 23 33 43 53 63 73
+      // 04 14 24 34 44 54 64 74
+      // 05 15 25 35 45 55 65 75
+      // 06 16 26 36 46 56 66 76
+      // 07 17 27 37 47 57 67 77
+    }
+  }
+  // Post-condition output and store it
+  {
+    // Post-condition (division by two)
+    //    division of two 16 bits signed numbers using shifts
+    //    n / 2 = (n - (n >> 15)) >> 1
+    const __m128i sign_in0 = _mm_srai_epi16(in0, 15);
+    const __m128i sign_in1 = _mm_srai_epi16(in1, 15);
+    const __m128i sign_in2 = _mm_srai_epi16(in2, 15);
+    const __m128i sign_in3 = _mm_srai_epi16(in3, 15);
+    const __m128i sign_in4 = _mm_srai_epi16(in4, 15);
+    const __m128i sign_in5 = _mm_srai_epi16(in5, 15);
+    const __m128i sign_in6 = _mm_srai_epi16(in6, 15);
+    const __m128i sign_in7 = _mm_srai_epi16(in7, 15);
+    in0 = _mm_sub_epi16(in0, sign_in0);
+    in1 = _mm_sub_epi16(in1, sign_in1);
+    in2 = _mm_sub_epi16(in2, sign_in2);
+    in3 = _mm_sub_epi16(in3, sign_in3);
+    in4 = _mm_sub_epi16(in4, sign_in4);
+    in5 = _mm_sub_epi16(in5, sign_in5);
+    in6 = _mm_sub_epi16(in6, sign_in6);
+    in7 = _mm_sub_epi16(in7, sign_in7);
+    in0 = _mm_srai_epi16(in0, 1);
+    in1 = _mm_srai_epi16(in1, 1);
+    in2 = _mm_srai_epi16(in2, 1);
+    in3 = _mm_srai_epi16(in3, 1);
+    in4 = _mm_srai_epi16(in4, 1);
+    in5 = _mm_srai_epi16(in5, 1);
+    in6 = _mm_srai_epi16(in6, 1);
+    in7 = _mm_srai_epi16(in7, 1);
+  }
+
+  iscan_ptr += n_coeffs;
+  qcoeff_ptr += n_coeffs;
+  dqcoeff_ptr += n_coeffs;
+  n_coeffs = -n_coeffs;
+  zero = _mm_setzero_si128();
+
+  if (!skip_block) {
+    __m128i eob;
+    __m128i round, quant, dequant;
+    {
+      __m128i coeff0, coeff1;
+
+      // Setup global values
+      {
+        round = _mm_load_si128((const __m128i *)round_ptr);
+        quant = _mm_load_si128((const __m128i *)quant_ptr);
+        dequant = _mm_load_si128((const __m128i *)dequant_ptr);
+      }
+
+      {
+        __m128i coeff0_sign, coeff1_sign;
+        __m128i qcoeff0, qcoeff1;
+        __m128i qtmp0, qtmp1;
+        // Do DC and first 15 AC
+        coeff0 = *in[0];
+        coeff1 = *in[1];
+
+        // Poor man's sign extract
+        coeff0_sign = _mm_srai_epi16(coeff0, 15);
+        coeff1_sign = _mm_srai_epi16(coeff1, 15);
+        qcoeff0 = _mm_xor_si128(coeff0, coeff0_sign);
+        qcoeff1 = _mm_xor_si128(coeff1, coeff1_sign);
+        qcoeff0 = _mm_sub_epi16(qcoeff0, coeff0_sign);
+        qcoeff1 = _mm_sub_epi16(qcoeff1, coeff1_sign);
+
+        qcoeff0 = _mm_adds_epi16(qcoeff0, round);
+        round = _mm_unpackhi_epi64(round, round);
+        qcoeff1 = _mm_adds_epi16(qcoeff1, round);
+        qtmp0 = _mm_mulhi_epi16(qcoeff0, quant);
+        quant = _mm_unpackhi_epi64(quant, quant);
+        qtmp1 = _mm_mulhi_epi16(qcoeff1, quant);
+
+        // Reinsert signs
+        qcoeff0 = _mm_xor_si128(qtmp0, coeff0_sign);
+        qcoeff1 = _mm_xor_si128(qtmp1, coeff1_sign);
+        qcoeff0 = _mm_sub_epi16(qcoeff0, coeff0_sign);
+        qcoeff1 = _mm_sub_epi16(qcoeff1, coeff1_sign);
+
+        _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs), qcoeff0);
+        _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs) + 1, qcoeff1);
+
+        coeff0 = _mm_mullo_epi16(qcoeff0, dequant);
+        dequant = _mm_unpackhi_epi64(dequant, dequant);
+        coeff1 = _mm_mullo_epi16(qcoeff1, dequant);
+
+        _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs), coeff0);
+        _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs) + 1, coeff1);
+      }
+
+      {
+        // Scan for eob
+        __m128i zero_coeff0, zero_coeff1;
+        __m128i nzero_coeff0, nzero_coeff1;
+        __m128i iscan0, iscan1;
+        __m128i eob1;
+        zero_coeff0 = _mm_cmpeq_epi16(coeff0, zero);
+        zero_coeff1 = _mm_cmpeq_epi16(coeff1, zero);
+        nzero_coeff0 = _mm_cmpeq_epi16(zero_coeff0, zero);
+        nzero_coeff1 = _mm_cmpeq_epi16(zero_coeff1, zero);
+        iscan0 = _mm_load_si128((const __m128i *)(iscan_ptr + n_coeffs));
+        iscan1 = _mm_load_si128((const __m128i *)(iscan_ptr + n_coeffs) + 1);
+        // Add one to convert from indices to counts
+        iscan0 = _mm_sub_epi16(iscan0, nzero_coeff0);
+        iscan1 = _mm_sub_epi16(iscan1, nzero_coeff1);
+        eob = _mm_and_si128(iscan0, nzero_coeff0);
+        eob1 = _mm_and_si128(iscan1, nzero_coeff1);
+        eob = _mm_max_epi16(eob, eob1);
+      }
+      n_coeffs += 8 * 2;
+    }
+
+    // AC only loop
+    index = 2;
+    while (n_coeffs < 0) {
+      __m128i coeff0, coeff1;
+      {
+        __m128i coeff0_sign, coeff1_sign;
+        __m128i qcoeff0, qcoeff1;
+        __m128i qtmp0, qtmp1;
+
+        assert(index < (int)(sizeof(in) / sizeof(in[0])) - 1);
+        coeff0 = *in[index];
+        coeff1 = *in[index + 1];
+
+        // Poor man's sign extract
+        coeff0_sign = _mm_srai_epi16(coeff0, 15);
+        coeff1_sign = _mm_srai_epi16(coeff1, 15);
+        qcoeff0 = _mm_xor_si128(coeff0, coeff0_sign);
+        qcoeff1 = _mm_xor_si128(coeff1, coeff1_sign);
+        qcoeff0 = _mm_sub_epi16(qcoeff0, coeff0_sign);
+        qcoeff1 = _mm_sub_epi16(qcoeff1, coeff1_sign);
+
+        qcoeff0 = _mm_adds_epi16(qcoeff0, round);
+        qcoeff1 = _mm_adds_epi16(qcoeff1, round);
+        qtmp0 = _mm_mulhi_epi16(qcoeff0, quant);
+        qtmp1 = _mm_mulhi_epi16(qcoeff1, quant);
+
+        // Reinsert signs
+        qcoeff0 = _mm_xor_si128(qtmp0, coeff0_sign);
+        qcoeff1 = _mm_xor_si128(qtmp1, coeff1_sign);
+        qcoeff0 = _mm_sub_epi16(qcoeff0, coeff0_sign);
+        qcoeff1 = _mm_sub_epi16(qcoeff1, coeff1_sign);
+
+        _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs), qcoeff0);
+        _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs) + 1, qcoeff1);
+
+        coeff0 = _mm_mullo_epi16(qcoeff0, dequant);
+        coeff1 = _mm_mullo_epi16(qcoeff1, dequant);
+
+        _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs), coeff0);
+        _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs) + 1, coeff1);
+      }
+
+      {
+        // Scan for eob
+        __m128i zero_coeff0, zero_coeff1;
+        __m128i nzero_coeff0, nzero_coeff1;
+        __m128i iscan0, iscan1;
+        __m128i eob0, eob1;
+        zero_coeff0 = _mm_cmpeq_epi16(coeff0, zero);
+        zero_coeff1 = _mm_cmpeq_epi16(coeff1, zero);
+        nzero_coeff0 = _mm_cmpeq_epi16(zero_coeff0, zero);
+        nzero_coeff1 = _mm_cmpeq_epi16(zero_coeff1, zero);
+        iscan0 = _mm_load_si128((const __m128i *)(iscan_ptr + n_coeffs));
+        iscan1 = _mm_load_si128((const __m128i *)(iscan_ptr + n_coeffs) + 1);
+        // Add one to convert from indices to counts
+        iscan0 = _mm_sub_epi16(iscan0, nzero_coeff0);
+        iscan1 = _mm_sub_epi16(iscan1, nzero_coeff1);
+        eob0 = _mm_and_si128(iscan0, nzero_coeff0);
+        eob1 = _mm_and_si128(iscan1, nzero_coeff1);
+        eob0 = _mm_max_epi16(eob0, eob1);
+        eob = _mm_max_epi16(eob, eob0);
+      }
+      n_coeffs += 8 * 2;
+      index += 2;
+    }
+
+    // Accumulate EOB
+    {
+      __m128i eob_shuffled;
+      eob_shuffled = _mm_shuffle_epi32(eob, 0xe);
+      eob = _mm_max_epi16(eob, eob_shuffled);
+      eob_shuffled = _mm_shufflelo_epi16(eob, 0xe);
+      eob = _mm_max_epi16(eob, eob_shuffled);
+      eob_shuffled = _mm_shufflelo_epi16(eob, 0x1);
+      eob = _mm_max_epi16(eob, eob_shuffled);
+      *eob_ptr = _mm_extract_epi16(eob, 1);
+    }
+  } else {
+    do {
+      _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs), zero);
+      _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs) + 1, zero);
+      _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs), zero);
+      _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs) + 1, zero);
+      n_coeffs += 8 * 2;
+    } while (n_coeffs < 0);
+    *eob_ptr = 0;
+  }
+}
+
+// load 8x8 array
+static INLINE void load_buffer_8x8(const int16_t *input, __m128i *in,
+                                   int stride, int flipud, int fliplr) {
+  if (!flipud) {
+    in[0] = _mm_load_si128((const __m128i *)(input + 0 * stride));
+    in[1] = _mm_load_si128((const __m128i *)(input + 1 * stride));
+    in[2] = _mm_load_si128((const __m128i *)(input + 2 * stride));
+    in[3] = _mm_load_si128((const __m128i *)(input + 3 * stride));
+    in[4] = _mm_load_si128((const __m128i *)(input + 4 * stride));
+    in[5] = _mm_load_si128((const __m128i *)(input + 5 * stride));
+    in[6] = _mm_load_si128((const __m128i *)(input + 6 * stride));
+    in[7] = _mm_load_si128((const __m128i *)(input + 7 * stride));
+  } else {
+    in[0] = _mm_load_si128((const __m128i *)(input + 7 * stride));
+    in[1] = _mm_load_si128((const __m128i *)(input + 6 * stride));
+    in[2] = _mm_load_si128((const __m128i *)(input + 5 * stride));
+    in[3] = _mm_load_si128((const __m128i *)(input + 4 * stride));
+    in[4] = _mm_load_si128((const __m128i *)(input + 3 * stride));
+    in[5] = _mm_load_si128((const __m128i *)(input + 2 * stride));
+    in[6] = _mm_load_si128((const __m128i *)(input + 1 * stride));
+    in[7] = _mm_load_si128((const __m128i *)(input + 0 * stride));
+  }
+
+  if (fliplr) {
+    in[0] = mm_reverse_epi16(in[0]);
+    in[1] = mm_reverse_epi16(in[1]);
+    in[2] = mm_reverse_epi16(in[2]);
+    in[3] = mm_reverse_epi16(in[3]);
+    in[4] = mm_reverse_epi16(in[4]);
+    in[5] = mm_reverse_epi16(in[5]);
+    in[6] = mm_reverse_epi16(in[6]);
+    in[7] = mm_reverse_epi16(in[7]);
+  }
+
+  in[0] = _mm_slli_epi16(in[0], 2);
+  in[1] = _mm_slli_epi16(in[1], 2);
+  in[2] = _mm_slli_epi16(in[2], 2);
+  in[3] = _mm_slli_epi16(in[3], 2);
+  in[4] = _mm_slli_epi16(in[4], 2);
+  in[5] = _mm_slli_epi16(in[5], 2);
+  in[6] = _mm_slli_epi16(in[6], 2);
+  in[7] = _mm_slli_epi16(in[7], 2);
+}
+
+// right shift and rounding
+static INLINE void right_shift_8x8(__m128i *res, const int bit) {
+  __m128i sign0 = _mm_srai_epi16(res[0], 15);
+  __m128i sign1 = _mm_srai_epi16(res[1], 15);
+  __m128i sign2 = _mm_srai_epi16(res[2], 15);
+  __m128i sign3 = _mm_srai_epi16(res[3], 15);
+  __m128i sign4 = _mm_srai_epi16(res[4], 15);
+  __m128i sign5 = _mm_srai_epi16(res[5], 15);
+  __m128i sign6 = _mm_srai_epi16(res[6], 15);
+  __m128i sign7 = _mm_srai_epi16(res[7], 15);
+
+  if (bit == 2) {
+    const __m128i const_rounding = _mm_set1_epi16(1);
+    res[0] = _mm_add_epi16(res[0], const_rounding);
+    res[1] = _mm_add_epi16(res[1], const_rounding);
+    res[2] = _mm_add_epi16(res[2], const_rounding);
+    res[3] = _mm_add_epi16(res[3], const_rounding);
+    res[4] = _mm_add_epi16(res[4], const_rounding);
+    res[5] = _mm_add_epi16(res[5], const_rounding);
+    res[6] = _mm_add_epi16(res[6], const_rounding);
+    res[7] = _mm_add_epi16(res[7], const_rounding);
+  }
+
+  res[0] = _mm_sub_epi16(res[0], sign0);
+  res[1] = _mm_sub_epi16(res[1], sign1);
+  res[2] = _mm_sub_epi16(res[2], sign2);
+  res[3] = _mm_sub_epi16(res[3], sign3);
+  res[4] = _mm_sub_epi16(res[4], sign4);
+  res[5] = _mm_sub_epi16(res[5], sign5);
+  res[6] = _mm_sub_epi16(res[6], sign6);
+  res[7] = _mm_sub_epi16(res[7], sign7);
+
+  if (bit == 1) {
+    res[0] = _mm_srai_epi16(res[0], 1);
+    res[1] = _mm_srai_epi16(res[1], 1);
+    res[2] = _mm_srai_epi16(res[2], 1);
+    res[3] = _mm_srai_epi16(res[3], 1);
+    res[4] = _mm_srai_epi16(res[4], 1);
+    res[5] = _mm_srai_epi16(res[5], 1);
+    res[6] = _mm_srai_epi16(res[6], 1);
+    res[7] = _mm_srai_epi16(res[7], 1);
+  } else {
+    res[0] = _mm_srai_epi16(res[0], 2);
+    res[1] = _mm_srai_epi16(res[1], 2);
+    res[2] = _mm_srai_epi16(res[2], 2);
+    res[3] = _mm_srai_epi16(res[3], 2);
+    res[4] = _mm_srai_epi16(res[4], 2);
+    res[5] = _mm_srai_epi16(res[5], 2);
+    res[6] = _mm_srai_epi16(res[6], 2);
+    res[7] = _mm_srai_epi16(res[7], 2);
+  }
+}
+
+// write 8x8 array
+static INLINE void write_buffer_8x8(tran_low_t *output, __m128i *res,
+                                    int stride) {
+  store_output(&res[0], (output + 0 * stride));
+  store_output(&res[1], (output + 1 * stride));
+  store_output(&res[2], (output + 2 * stride));
+  store_output(&res[3], (output + 3 * stride));
+  store_output(&res[4], (output + 4 * stride));
+  store_output(&res[5], (output + 5 * stride));
+  store_output(&res[6], (output + 6 * stride));
+  store_output(&res[7], (output + 7 * stride));
+}
+
+// perform in-place transpose
+static INLINE void array_transpose_8x8(__m128i *in, __m128i *res) {
+  const __m128i tr0_0 = _mm_unpacklo_epi16(in[0], in[1]);
+  const __m128i tr0_1 = _mm_unpacklo_epi16(in[2], in[3]);
+  const __m128i tr0_2 = _mm_unpackhi_epi16(in[0], in[1]);
+  const __m128i tr0_3 = _mm_unpackhi_epi16(in[2], in[3]);
+  const __m128i tr0_4 = _mm_unpacklo_epi16(in[4], in[5]);
+  const __m128i tr0_5 = _mm_unpacklo_epi16(in[6], in[7]);
+  const __m128i tr0_6 = _mm_unpackhi_epi16(in[4], in[5]);
+  const __m128i tr0_7 = _mm_unpackhi_epi16(in[6], in[7]);
+  // 00 10 01 11 02 12 03 13
+  // 20 30 21 31 22 32 23 33
+  // 04 14 05 15 06 16 07 17
+  // 24 34 25 35 26 36 27 37
+  // 40 50 41 51 42 52 43 53
+  // 60 70 61 71 62 72 63 73
+  // 44 54 45 55 46 56 47 57
+  // 64 74 65 75 66 76 67 77
+  const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1);
+  const __m128i tr1_1 = _mm_unpacklo_epi32(tr0_4, tr0_5);
+  const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1);
+  const __m128i tr1_3 = _mm_unpackhi_epi32(tr0_4, tr0_5);
+  const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_2, tr0_3);
+  const __m128i tr1_5 = _mm_unpacklo_epi32(tr0_6, tr0_7);
+  const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_2, tr0_3);
+  const __m128i tr1_7 = _mm_unpackhi_epi32(tr0_6, tr0_7);
+  // 00 10 20 30 01 11 21 31
+  // 40 50 60 70 41 51 61 71
+  // 02 12 22 32 03 13 23 33
+  // 42 52 62 72 43 53 63 73
+  // 04 14 24 34 05 15 25 35
+  // 44 54 64 74 45 55 65 75
+  // 06 16 26 36 07 17 27 37
+  // 46 56 66 76 47 57 67 77
+  res[0] = _mm_unpacklo_epi64(tr1_0, tr1_1);
+  res[1] = _mm_unpackhi_epi64(tr1_0, tr1_1);
+  res[2] = _mm_unpacklo_epi64(tr1_2, tr1_3);
+  res[3] = _mm_unpackhi_epi64(tr1_2, tr1_3);
+  res[4] = _mm_unpacklo_epi64(tr1_4, tr1_5);
+  res[5] = _mm_unpackhi_epi64(tr1_4, tr1_5);
+  res[6] = _mm_unpacklo_epi64(tr1_6, tr1_7);
+  res[7] = _mm_unpackhi_epi64(tr1_6, tr1_7);
+  // 00 10 20 30 40 50 60 70
+  // 01 11 21 31 41 51 61 71
+  // 02 12 22 32 42 52 62 72
+  // 03 13 23 33 43 53 63 73
+  // 04 14 24 34 44 54 64 74
+  // 05 15 25 35 45 55 65 75
+  // 06 16 26 36 46 56 66 76
+  // 07 17 27 37 47 57 67 77
+}
+
+static void fdct8_sse2(__m128i *in) {
+  // constants
+  const __m128i k__cospi_p16_p16 = _mm_set1_epi16((int16_t)cospi_16_64);
+  const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
+  const __m128i k__cospi_p24_p08 = pair_set_epi16(cospi_24_64, cospi_8_64);
+  const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64, cospi_24_64);
+  const __m128i k__cospi_p28_p04 = pair_set_epi16(cospi_28_64, cospi_4_64);
+  const __m128i k__cospi_m04_p28 = pair_set_epi16(-cospi_4_64, cospi_28_64);
+  const __m128i k__cospi_p12_p20 = pair_set_epi16(cospi_12_64, cospi_20_64);
+  const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64);
+  const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
+  __m128i u0, u1, u2, u3, u4, u5, u6, u7;
+  __m128i v0, v1, v2, v3, v4, v5, v6, v7;
+  __m128i s0, s1, s2, s3, s4, s5, s6, s7;
+
+  // stage 1
+  s0 = _mm_add_epi16(in[0], in[7]);
+  s1 = _mm_add_epi16(in[1], in[6]);
+  s2 = _mm_add_epi16(in[2], in[5]);
+  s3 = _mm_add_epi16(in[3], in[4]);
+  s4 = _mm_sub_epi16(in[3], in[4]);
+  s5 = _mm_sub_epi16(in[2], in[5]);
+  s6 = _mm_sub_epi16(in[1], in[6]);
+  s7 = _mm_sub_epi16(in[0], in[7]);
+
+  u0 = _mm_add_epi16(s0, s3);
+  u1 = _mm_add_epi16(s1, s2);
+  u2 = _mm_sub_epi16(s1, s2);
+  u3 = _mm_sub_epi16(s0, s3);
+  // interleave and perform butterfly multiplication/addition
+  v0 = _mm_unpacklo_epi16(u0, u1);
+  v1 = _mm_unpackhi_epi16(u0, u1);
+  v2 = _mm_unpacklo_epi16(u2, u3);
+  v3 = _mm_unpackhi_epi16(u2, u3);
+
+  u0 = _mm_madd_epi16(v0, k__cospi_p16_p16);
+  u1 = _mm_madd_epi16(v1, k__cospi_p16_p16);
+  u2 = _mm_madd_epi16(v0, k__cospi_p16_m16);
+  u3 = _mm_madd_epi16(v1, k__cospi_p16_m16);
+  u4 = _mm_madd_epi16(v2, k__cospi_p24_p08);
+  u5 = _mm_madd_epi16(v3, k__cospi_p24_p08);
+  u6 = _mm_madd_epi16(v2, k__cospi_m08_p24);
+  u7 = _mm_madd_epi16(v3, k__cospi_m08_p24);
+
+  // shift and rounding
+  v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
+  v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING);
+  v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
+  v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING);
+  v4 = _mm_add_epi32(u4, k__DCT_CONST_ROUNDING);
+  v5 = _mm_add_epi32(u5, k__DCT_CONST_ROUNDING);
+  v6 = _mm_add_epi32(u6, k__DCT_CONST_ROUNDING);
+  v7 = _mm_add_epi32(u7, k__DCT_CONST_ROUNDING);
+
+  u0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
+  u1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
+  u2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
+  u3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
+  u4 = _mm_srai_epi32(v4, DCT_CONST_BITS);
+  u5 = _mm_srai_epi32(v5, DCT_CONST_BITS);
+  u6 = _mm_srai_epi32(v6, DCT_CONST_BITS);
+  u7 = _mm_srai_epi32(v7, DCT_CONST_BITS);
+
+  in[0] = _mm_packs_epi32(u0, u1);
+  in[2] = _mm_packs_epi32(u4, u5);
+  in[4] = _mm_packs_epi32(u2, u3);
+  in[6] = _mm_packs_epi32(u6, u7);
+
+  // stage 2
+  // interleave and perform butterfly multiplication/addition
+  u0 = _mm_unpacklo_epi16(s6, s5);
+  u1 = _mm_unpackhi_epi16(s6, s5);
+  v0 = _mm_madd_epi16(u0, k__cospi_p16_m16);
+  v1 = _mm_madd_epi16(u1, k__cospi_p16_m16);
+  v2 = _mm_madd_epi16(u0, k__cospi_p16_p16);
+  v3 = _mm_madd_epi16(u1, k__cospi_p16_p16);
+
+  // shift and rounding
+  u0 = _mm_add_epi32(v0, k__DCT_CONST_ROUNDING);
+  u1 = _mm_add_epi32(v1, k__DCT_CONST_ROUNDING);
+  u2 = _mm_add_epi32(v2, k__DCT_CONST_ROUNDING);
+  u3 = _mm_add_epi32(v3, k__DCT_CONST_ROUNDING);
+
+  v0 = _mm_srai_epi32(u0, DCT_CONST_BITS);
+  v1 = _mm_srai_epi32(u1, DCT_CONST_BITS);
+  v2 = _mm_srai_epi32(u2, DCT_CONST_BITS);
+  v3 = _mm_srai_epi32(u3, DCT_CONST_BITS);
+
+  u0 = _mm_packs_epi32(v0, v1);
+  u1 = _mm_packs_epi32(v2, v3);
+
+  // stage 3
+  s0 = _mm_add_epi16(s4, u0);
+  s1 = _mm_sub_epi16(s4, u0);
+  s2 = _mm_sub_epi16(s7, u1);
+  s3 = _mm_add_epi16(s7, u1);
+
+  // stage 4
+  u0 = _mm_unpacklo_epi16(s0, s3);
+  u1 = _mm_unpackhi_epi16(s0, s3);
+  u2 = _mm_unpacklo_epi16(s1, s2);
+  u3 = _mm_unpackhi_epi16(s1, s2);
+
+  v0 = _mm_madd_epi16(u0, k__cospi_p28_p04);
+  v1 = _mm_madd_epi16(u1, k__cospi_p28_p04);
+  v2 = _mm_madd_epi16(u2, k__cospi_p12_p20);
+  v3 = _mm_madd_epi16(u3, k__cospi_p12_p20);
+  v4 = _mm_madd_epi16(u2, k__cospi_m20_p12);
+  v5 = _mm_madd_epi16(u3, k__cospi_m20_p12);
+  v6 = _mm_madd_epi16(u0, k__cospi_m04_p28);
+  v7 = _mm_madd_epi16(u1, k__cospi_m04_p28);
+
+  // shift and rounding
+  u0 = _mm_add_epi32(v0, k__DCT_CONST_ROUNDING);
+  u1 = _mm_add_epi32(v1, k__DCT_CONST_ROUNDING);
+  u2 = _mm_add_epi32(v2, k__DCT_CONST_ROUNDING);
+  u3 = _mm_add_epi32(v3, k__DCT_CONST_ROUNDING);
+  u4 = _mm_add_epi32(v4, k__DCT_CONST_ROUNDING);
+  u5 = _mm_add_epi32(v5, k__DCT_CONST_ROUNDING);
+  u6 = _mm_add_epi32(v6, k__DCT_CONST_ROUNDING);
+  u7 = _mm_add_epi32(v7, k__DCT_CONST_ROUNDING);
+
+  v0 = _mm_srai_epi32(u0, DCT_CONST_BITS);
+  v1 = _mm_srai_epi32(u1, DCT_CONST_BITS);
+  v2 = _mm_srai_epi32(u2, DCT_CONST_BITS);
+  v3 = _mm_srai_epi32(u3, DCT_CONST_BITS);
+  v4 = _mm_srai_epi32(u4, DCT_CONST_BITS);
+  v5 = _mm_srai_epi32(u5, DCT_CONST_BITS);
+  v6 = _mm_srai_epi32(u6, DCT_CONST_BITS);
+  v7 = _mm_srai_epi32(u7, DCT_CONST_BITS);
+
+  in[1] = _mm_packs_epi32(v0, v1);
+  in[3] = _mm_packs_epi32(v4, v5);
+  in[5] = _mm_packs_epi32(v2, v3);
+  in[7] = _mm_packs_epi32(v6, v7);
+
+  // transpose
+  array_transpose_8x8(in, in);
+}
+
+static void fadst8_sse2(__m128i *in) {
+  // Constants
+  const __m128i k__cospi_p02_p30 = pair_set_epi16(cospi_2_64, cospi_30_64);
+  const __m128i k__cospi_p30_m02 = pair_set_epi16(cospi_30_64, -cospi_2_64);
+  const __m128i k__cospi_p10_p22 = pair_set_epi16(cospi_10_64, cospi_22_64);
+  const __m128i k__cospi_p22_m10 = pair_set_epi16(cospi_22_64, -cospi_10_64);
+  const __m128i k__cospi_p18_p14 = pair_set_epi16(cospi_18_64, cospi_14_64);
+  const __m128i k__cospi_p14_m18 = pair_set_epi16(cospi_14_64, -cospi_18_64);
+  const __m128i k__cospi_p26_p06 = pair_set_epi16(cospi_26_64, cospi_6_64);
+  const __m128i k__cospi_p06_m26 = pair_set_epi16(cospi_6_64, -cospi_26_64);
+  const __m128i k__cospi_p08_p24 = pair_set_epi16(cospi_8_64, cospi_24_64);
+  const __m128i k__cospi_p24_m08 = pair_set_epi16(cospi_24_64, -cospi_8_64);
+  const __m128i k__cospi_m24_p08 = pair_set_epi16(-cospi_24_64, cospi_8_64);
+  const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
+  const __m128i k__cospi_p16_p16 = _mm_set1_epi16((int16_t)cospi_16_64);
+  const __m128i k__const_0 = _mm_set1_epi16(0);
+  const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
+
+  __m128i u0, u1, u2, u3, u4, u5, u6, u7, u8, u9, u10, u11, u12, u13, u14, u15;
+  __m128i v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15;
+  __m128i w0, w1, w2, w3, w4, w5, w6, w7, w8, w9, w10, w11, w12, w13, w14, w15;
+  __m128i s0, s1, s2, s3, s4, s5, s6, s7;
+  __m128i in0, in1, in2, in3, in4, in5, in6, in7;
+
+  // properly aligned for butterfly input
+  in0 = in[7];
+  in1 = in[0];
+  in2 = in[5];
+  in3 = in[2];
+  in4 = in[3];
+  in5 = in[4];
+  in6 = in[1];
+  in7 = in[6];
+
+  // column transformation
+  // stage 1
+  // interleave and multiply/add into 32-bit integer
+  s0 = _mm_unpacklo_epi16(in0, in1);
+  s1 = _mm_unpackhi_epi16(in0, in1);
+  s2 = _mm_unpacklo_epi16(in2, in3);
+  s3 = _mm_unpackhi_epi16(in2, in3);
+  s4 = _mm_unpacklo_epi16(in4, in5);
+  s5 = _mm_unpackhi_epi16(in4, in5);
+  s6 = _mm_unpacklo_epi16(in6, in7);
+  s7 = _mm_unpackhi_epi16(in6, in7);
+
+  u0 = _mm_madd_epi16(s0, k__cospi_p02_p30);
+  u1 = _mm_madd_epi16(s1, k__cospi_p02_p30);
+  u2 = _mm_madd_epi16(s0, k__cospi_p30_m02);
+  u3 = _mm_madd_epi16(s1, k__cospi_p30_m02);
+  u4 = _mm_madd_epi16(s2, k__cospi_p10_p22);
+  u5 = _mm_madd_epi16(s3, k__cospi_p10_p22);
+  u6 = _mm_madd_epi16(s2, k__cospi_p22_m10);
+  u7 = _mm_madd_epi16(s3, k__cospi_p22_m10);
+  u8 = _mm_madd_epi16(s4, k__cospi_p18_p14);
+  u9 = _mm_madd_epi16(s5, k__cospi_p18_p14);
+  u10 = _mm_madd_epi16(s4, k__cospi_p14_m18);
+  u11 = _mm_madd_epi16(s5, k__cospi_p14_m18);
+  u12 = _mm_madd_epi16(s6, k__cospi_p26_p06);
+  u13 = _mm_madd_epi16(s7, k__cospi_p26_p06);
+  u14 = _mm_madd_epi16(s6, k__cospi_p06_m26);
+  u15 = _mm_madd_epi16(s7, k__cospi_p06_m26);
+
+  // addition
+  w0 = _mm_add_epi32(u0, u8);
+  w1 = _mm_add_epi32(u1, u9);
+  w2 = _mm_add_epi32(u2, u10);
+  w3 = _mm_add_epi32(u3, u11);
+  w4 = _mm_add_epi32(u4, u12);
+  w5 = _mm_add_epi32(u5, u13);
+  w6 = _mm_add_epi32(u6, u14);
+  w7 = _mm_add_epi32(u7, u15);
+  w8 = _mm_sub_epi32(u0, u8);
+  w9 = _mm_sub_epi32(u1, u9);
+  w10 = _mm_sub_epi32(u2, u10);
+  w11 = _mm_sub_epi32(u3, u11);
+  w12 = _mm_sub_epi32(u4, u12);
+  w13 = _mm_sub_epi32(u5, u13);
+  w14 = _mm_sub_epi32(u6, u14);
+  w15 = _mm_sub_epi32(u7, u15);
+
+  // shift and rounding
+  v0 = _mm_add_epi32(w0, k__DCT_CONST_ROUNDING);
+  v1 = _mm_add_epi32(w1, k__DCT_CONST_ROUNDING);
+  v2 = _mm_add_epi32(w2, k__DCT_CONST_ROUNDING);
+  v3 = _mm_add_epi32(w3, k__DCT_CONST_ROUNDING);
+  v4 = _mm_add_epi32(w4, k__DCT_CONST_ROUNDING);
+  v5 = _mm_add_epi32(w5, k__DCT_CONST_ROUNDING);
+  v6 = _mm_add_epi32(w6, k__DCT_CONST_ROUNDING);
+  v7 = _mm_add_epi32(w7, k__DCT_CONST_ROUNDING);
+  v8 = _mm_add_epi32(w8, k__DCT_CONST_ROUNDING);
+  v9 = _mm_add_epi32(w9, k__DCT_CONST_ROUNDING);
+  v10 = _mm_add_epi32(w10, k__DCT_CONST_ROUNDING);
+  v11 = _mm_add_epi32(w11, k__DCT_CONST_ROUNDING);
+  v12 = _mm_add_epi32(w12, k__DCT_CONST_ROUNDING);
+  v13 = _mm_add_epi32(w13, k__DCT_CONST_ROUNDING);
+  v14 = _mm_add_epi32(w14, k__DCT_CONST_ROUNDING);
+  v15 = _mm_add_epi32(w15, k__DCT_CONST_ROUNDING);
+
+  u0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
+  u1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
+  u2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
+  u3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
+  u4 = _mm_srai_epi32(v4, DCT_CONST_BITS);
+  u5 = _mm_srai_epi32(v5, DCT_CONST_BITS);
+  u6 = _mm_srai_epi32(v6, DCT_CONST_BITS);
+  u7 = _mm_srai_epi32(v7, DCT_CONST_BITS);
+  u8 = _mm_srai_epi32(v8, DCT_CONST_BITS);
+  u9 = _mm_srai_epi32(v9, DCT_CONST_BITS);
+  u10 = _mm_srai_epi32(v10, DCT_CONST_BITS);
+  u11 = _mm_srai_epi32(v11, DCT_CONST_BITS);
+  u12 = _mm_srai_epi32(v12, DCT_CONST_BITS);
+  u13 = _mm_srai_epi32(v13, DCT_CONST_BITS);
+  u14 = _mm_srai_epi32(v14, DCT_CONST_BITS);
+  u15 = _mm_srai_epi32(v15, DCT_CONST_BITS);
+
+  // back to 16-bit and pack 8 integers into __m128i
+  in[0] = _mm_packs_epi32(u0, u1);
+  in[1] = _mm_packs_epi32(u2, u3);
+  in[2] = _mm_packs_epi32(u4, u5);
+  in[3] = _mm_packs_epi32(u6, u7);
+  in[4] = _mm_packs_epi32(u8, u9);
+  in[5] = _mm_packs_epi32(u10, u11);
+  in[6] = _mm_packs_epi32(u12, u13);
+  in[7] = _mm_packs_epi32(u14, u15);
+
+  // stage 2
+  s0 = _mm_add_epi16(in[0], in[2]);
+  s1 = _mm_add_epi16(in[1], in[3]);
+  s2 = _mm_sub_epi16(in[0], in[2]);
+  s3 = _mm_sub_epi16(in[1], in[3]);
+  u0 = _mm_unpacklo_epi16(in[4], in[5]);
+  u1 = _mm_unpackhi_epi16(in[4], in[5]);
+  u2 = _mm_unpacklo_epi16(in[6], in[7]);
+  u3 = _mm_unpackhi_epi16(in[6], in[7]);
+
+  v0 = _mm_madd_epi16(u0, k__cospi_p08_p24);
+  v1 = _mm_madd_epi16(u1, k__cospi_p08_p24);
+  v2 = _mm_madd_epi16(u0, k__cospi_p24_m08);
+  v3 = _mm_madd_epi16(u1, k__cospi_p24_m08);
+  v4 = _mm_madd_epi16(u2, k__cospi_m24_p08);
+  v5 = _mm_madd_epi16(u3, k__cospi_m24_p08);
+  v6 = _mm_madd_epi16(u2, k__cospi_p08_p24);
+  v7 = _mm_madd_epi16(u3, k__cospi_p08_p24);
+
+  w0 = _mm_add_epi32(v0, v4);
+  w1 = _mm_add_epi32(v1, v5);
+  w2 = _mm_add_epi32(v2, v6);
+  w3 = _mm_add_epi32(v3, v7);
+  w4 = _mm_sub_epi32(v0, v4);
+  w5 = _mm_sub_epi32(v1, v5);
+  w6 = _mm_sub_epi32(v2, v6);
+  w7 = _mm_sub_epi32(v3, v7);
+
+  v0 = _mm_add_epi32(w0, k__DCT_CONST_ROUNDING);
+  v1 = _mm_add_epi32(w1, k__DCT_CONST_ROUNDING);
+  v2 = _mm_add_epi32(w2, k__DCT_CONST_ROUNDING);
+  v3 = _mm_add_epi32(w3, k__DCT_CONST_ROUNDING);
+  v4 = _mm_add_epi32(w4, k__DCT_CONST_ROUNDING);
+  v5 = _mm_add_epi32(w5, k__DCT_CONST_ROUNDING);
+  v6 = _mm_add_epi32(w6, k__DCT_CONST_ROUNDING);
+  v7 = _mm_add_epi32(w7, k__DCT_CONST_ROUNDING);
+
+  u0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
+  u1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
+  u2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
+  u3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
+  u4 = _mm_srai_epi32(v4, DCT_CONST_BITS);
+  u5 = _mm_srai_epi32(v5, DCT_CONST_BITS);
+  u6 = _mm_srai_epi32(v6, DCT_CONST_BITS);
+  u7 = _mm_srai_epi32(v7, DCT_CONST_BITS);
+
+  // back to 16-bit intergers
+  s4 = _mm_packs_epi32(u0, u1);
+  s5 = _mm_packs_epi32(u2, u3);
+  s6 = _mm_packs_epi32(u4, u5);
+  s7 = _mm_packs_epi32(u6, u7);
+
+  // stage 3
+  u0 = _mm_unpacklo_epi16(s2, s3);
+  u1 = _mm_unpackhi_epi16(s2, s3);
+  u2 = _mm_unpacklo_epi16(s6, s7);
+  u3 = _mm_unpackhi_epi16(s6, s7);
+
+  v0 = _mm_madd_epi16(u0, k__cospi_p16_p16);
+  v1 = _mm_madd_epi16(u1, k__cospi_p16_p16);
+  v2 = _mm_madd_epi16(u0, k__cospi_p16_m16);
+  v3 = _mm_madd_epi16(u1, k__cospi_p16_m16);
+  v4 = _mm_madd_epi16(u2, k__cospi_p16_p16);
+  v5 = _mm_madd_epi16(u3, k__cospi_p16_p16);
+  v6 = _mm_madd_epi16(u2, k__cospi_p16_m16);
+  v7 = _mm_madd_epi16(u3, k__cospi_p16_m16);
+
+  u0 = _mm_add_epi32(v0, k__DCT_CONST_ROUNDING);
+  u1 = _mm_add_epi32(v1, k__DCT_CONST_ROUNDING);
+  u2 = _mm_add_epi32(v2, k__DCT_CONST_ROUNDING);
+  u3 = _mm_add_epi32(v3, k__DCT_CONST_ROUNDING);
+  u4 = _mm_add_epi32(v4, k__DCT_CONST_ROUNDING);
+  u5 = _mm_add_epi32(v5, k__DCT_CONST_ROUNDING);
+  u6 = _mm_add_epi32(v6, k__DCT_CONST_ROUNDING);
+  u7 = _mm_add_epi32(v7, k__DCT_CONST_ROUNDING);
+
+  v0 = _mm_srai_epi32(u0, DCT_CONST_BITS);
+  v1 = _mm_srai_epi32(u1, DCT_CONST_BITS);
+  v2 = _mm_srai_epi32(u2, DCT_CONST_BITS);
+  v3 = _mm_srai_epi32(u3, DCT_CONST_BITS);
+  v4 = _mm_srai_epi32(u4, DCT_CONST_BITS);
+  v5 = _mm_srai_epi32(u5, DCT_CONST_BITS);
+  v6 = _mm_srai_epi32(u6, DCT_CONST_BITS);
+  v7 = _mm_srai_epi32(u7, DCT_CONST_BITS);
+
+  s2 = _mm_packs_epi32(v0, v1);
+  s3 = _mm_packs_epi32(v2, v3);
+  s6 = _mm_packs_epi32(v4, v5);
+  s7 = _mm_packs_epi32(v6, v7);
+
+  // FIXME(jingning): do subtract using bit inversion?
+  in[0] = s0;
+  in[1] = _mm_sub_epi16(k__const_0, s4);
+  in[2] = s6;
+  in[3] = _mm_sub_epi16(k__const_0, s2);
+  in[4] = s3;
+  in[5] = _mm_sub_epi16(k__const_0, s7);
+  in[6] = s5;
+  in[7] = _mm_sub_epi16(k__const_0, s1);
+
+  // transpose
+  array_transpose_8x8(in, in);
+}
+
+#if CONFIG_EXT_TX
+static void fidtx8_sse2(__m128i *in) {
+  in[0] = _mm_slli_epi16(in[0], 1);
+  in[1] = _mm_slli_epi16(in[1], 1);
+  in[2] = _mm_slli_epi16(in[2], 1);
+  in[3] = _mm_slli_epi16(in[3], 1);
+  in[4] = _mm_slli_epi16(in[4], 1);
+  in[5] = _mm_slli_epi16(in[5], 1);
+  in[6] = _mm_slli_epi16(in[6], 1);
+  in[7] = _mm_slli_epi16(in[7], 1);
+
+  array_transpose_8x8(in, in);
+}
+#endif  // CONFIG_EXT_TX
+
+void vp10_fht8x8_sse2(const int16_t *input, tran_low_t *output, int stride,
+                      int tx_type) {
+  __m128i in[8];
+
+  switch (tx_type) {
+    case DCT_DCT: vpx_fdct8x8_sse2(input, output, stride); break;
+    case ADST_DCT:
+      load_buffer_8x8(input, in, stride, 0, 0);
+      fadst8_sse2(in);
+      fdct8_sse2(in);
+      right_shift_8x8(in, 1);
+      write_buffer_8x8(output, in, 8);
+      break;
+    case DCT_ADST:
+      load_buffer_8x8(input, in, stride, 0, 0);
+      fdct8_sse2(in);
+      fadst8_sse2(in);
+      right_shift_8x8(in, 1);
+      write_buffer_8x8(output, in, 8);
+      break;
+    case ADST_ADST:
+      load_buffer_8x8(input, in, stride, 0, 0);
+      fadst8_sse2(in);
+      fadst8_sse2(in);
+      right_shift_8x8(in, 1);
+      write_buffer_8x8(output, in, 8);
+      break;
+#if CONFIG_EXT_TX
+    case FLIPADST_DCT:
+      load_buffer_8x8(input, in, stride, 1, 0);
+      fadst8_sse2(in);
+      fdct8_sse2(in);
+      right_shift_8x8(in, 1);
+      write_buffer_8x8(output, in, 8);
+      break;
+    case DCT_FLIPADST:
+      load_buffer_8x8(input, in, stride, 0, 1);
+      fdct8_sse2(in);
+      fadst8_sse2(in);
+      right_shift_8x8(in, 1);
+      write_buffer_8x8(output, in, 8);
+      break;
+    case FLIPADST_FLIPADST:
+      load_buffer_8x8(input, in, stride, 1, 1);
+      fadst8_sse2(in);
+      fadst8_sse2(in);
+      right_shift_8x8(in, 1);
+      write_buffer_8x8(output, in, 8);
+      break;
+    case ADST_FLIPADST:
+      load_buffer_8x8(input, in, stride, 0, 1);
+      fadst8_sse2(in);
+      fadst8_sse2(in);
+      right_shift_8x8(in, 1);
+      write_buffer_8x8(output, in, 8);
+      break;
+    case FLIPADST_ADST:
+      load_buffer_8x8(input, in, stride, 1, 0);
+      fadst8_sse2(in);
+      fadst8_sse2(in);
+      right_shift_8x8(in, 1);
+      write_buffer_8x8(output, in, 8);
+      break;
+    case V_DCT:
+      load_buffer_8x8(input, in, stride, 0, 0);
+      fdct8_sse2(in);
+      fidtx8_sse2(in);
+      right_shift_8x8(in, 1);
+      write_buffer_8x8(output, in, 8);
+      break;
+    case H_DCT:
+      load_buffer_8x8(input, in, stride, 0, 0);
+      fidtx8_sse2(in);
+      fdct8_sse2(in);
+      right_shift_8x8(in, 1);
+      write_buffer_8x8(output, in, 8);
+      break;
+    case V_ADST:
+      load_buffer_8x8(input, in, stride, 0, 0);
+      fadst8_sse2(in);
+      fidtx8_sse2(in);
+      right_shift_8x8(in, 1);
+      write_buffer_8x8(output, in, 8);
+      break;
+    case H_ADST:
+      load_buffer_8x8(input, in, stride, 0, 0);
+      fidtx8_sse2(in);
+      fadst8_sse2(in);
+      right_shift_8x8(in, 1);
+      write_buffer_8x8(output, in, 8);
+      break;
+    case V_FLIPADST:
+      load_buffer_8x8(input, in, stride, 1, 0);
+      fadst8_sse2(in);
+      fidtx8_sse2(in);
+      right_shift_8x8(in, 1);
+      write_buffer_8x8(output, in, 8);
+      break;
+    case H_FLIPADST:
+      load_buffer_8x8(input, in, stride, 0, 1);
+      fidtx8_sse2(in);
+      fadst8_sse2(in);
+      right_shift_8x8(in, 1);
+      write_buffer_8x8(output, in, 8);
+      break;
+#endif  // CONFIG_EXT_TX
+    default: assert(0);
+  }
+}
+
+static INLINE void load_buffer_16x16(const int16_t *input, __m128i *in0,
+                                     __m128i *in1, int stride, int flipud,
+                                     int fliplr) {
+  // Load 4 8x8 blocks
+  const int16_t *topL = input;
+  const int16_t *topR = input + 8;
+  const int16_t *botL = input + 8 * stride;
+  const int16_t *botR = input + 8 * stride + 8;
+
+  const int16_t *tmp;
+
+  if (flipud) {
+    // Swap left columns
+    tmp = topL;
+    topL = botL;
+    botL = tmp;
+    // Swap right columns
+    tmp = topR;
+    topR = botR;
+    botR = tmp;
+  }
+
+  if (fliplr) {
+    // Swap top rows
+    tmp = topL;
+    topL = topR;
+    topR = tmp;
+    // Swap bottom rows
+    tmp = botL;
+    botL = botR;
+    botR = tmp;
+  }
+
+  // load first 8 columns
+  load_buffer_8x8(topL, in0, stride, flipud, fliplr);
+  load_buffer_8x8(botL, in0 + 8, stride, flipud, fliplr);
+
+  // load second 8 columns
+  load_buffer_8x8(topR, in1, stride, flipud, fliplr);
+  load_buffer_8x8(botR, in1 + 8, stride, flipud, fliplr);
+}
+
+static INLINE void write_buffer_16x16(tran_low_t *output, __m128i *in0,
+                                      __m128i *in1, int stride) {
+  // write first 8 columns
+  write_buffer_8x8(output, in0, stride);
+  write_buffer_8x8(output + 8 * stride, in0 + 8, stride);
+  // write second 8 columns
+  output += 8;
+  write_buffer_8x8(output, in1, stride);
+  write_buffer_8x8(output + 8 * stride, in1 + 8, stride);
+}
+
+static INLINE void array_transpose_16x16(__m128i *res0, __m128i *res1) {
+  __m128i tbuf[8];
+  array_transpose_8x8(res0, res0);
+  array_transpose_8x8(res1, tbuf);
+  array_transpose_8x8(res0 + 8, res1);
+  array_transpose_8x8(res1 + 8, res1 + 8);
+
+  res0[8] = tbuf[0];
+  res0[9] = tbuf[1];
+  res0[10] = tbuf[2];
+  res0[11] = tbuf[3];
+  res0[12] = tbuf[4];
+  res0[13] = tbuf[5];
+  res0[14] = tbuf[6];
+  res0[15] = tbuf[7];
+}
+
+static INLINE void right_shift_16x16(__m128i *res0, __m128i *res1) {
+  // perform rounding operations
+  right_shift_8x8(res0, 2);
+  right_shift_8x8(res0 + 8, 2);
+  right_shift_8x8(res1, 2);
+  right_shift_8x8(res1 + 8, 2);
+}
+
+static void fdct16_8col(__m128i *in) {
+  // perform 16x16 1-D DCT for 8 columns
+  __m128i i[8], s[8], p[8], t[8], u[16], v[16];
+  const __m128i k__cospi_p16_p16 = _mm_set1_epi16((int16_t)cospi_16_64);
+  const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
+  const __m128i k__cospi_m16_p16 = pair_set_epi16(-cospi_16_64, cospi_16_64);
+  const __m128i k__cospi_p24_p08 = pair_set_epi16(cospi_24_64, cospi_8_64);
+  const __m128i k__cospi_m24_m08 = pair_set_epi16(-cospi_24_64, -cospi_8_64);
+  const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64, cospi_24_64);
+  const __m128i k__cospi_p28_p04 = pair_set_epi16(cospi_28_64, cospi_4_64);
+  const __m128i k__cospi_m04_p28 = pair_set_epi16(-cospi_4_64, cospi_28_64);
+  const __m128i k__cospi_p12_p20 = pair_set_epi16(cospi_12_64, cospi_20_64);
+  const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64);
+  const __m128i k__cospi_p30_p02 = pair_set_epi16(cospi_30_64, cospi_2_64);
+  const __m128i k__cospi_p14_p18 = pair_set_epi16(cospi_14_64, cospi_18_64);
+  const __m128i k__cospi_m02_p30 = pair_set_epi16(-cospi_2_64, cospi_30_64);
+  const __m128i k__cospi_m18_p14 = pair_set_epi16(-cospi_18_64, cospi_14_64);
+  const __m128i k__cospi_p22_p10 = pair_set_epi16(cospi_22_64, cospi_10_64);
+  const __m128i k__cospi_p06_p26 = pair_set_epi16(cospi_6_64, cospi_26_64);
+  const __m128i k__cospi_m10_p22 = pair_set_epi16(-cospi_10_64, cospi_22_64);
+  const __m128i k__cospi_m26_p06 = pair_set_epi16(-cospi_26_64, cospi_6_64);
+  const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
+
+  // stage 1
+  i[0] = _mm_add_epi16(in[0], in[15]);
+  i[1] = _mm_add_epi16(in[1], in[14]);
+  i[2] = _mm_add_epi16(in[2], in[13]);
+  i[3] = _mm_add_epi16(in[3], in[12]);
+  i[4] = _mm_add_epi16(in[4], in[11]);
+  i[5] = _mm_add_epi16(in[5], in[10]);
+  i[6] = _mm_add_epi16(in[6], in[9]);
+  i[7] = _mm_add_epi16(in[7], in[8]);
+
+  s[0] = _mm_sub_epi16(in[7], in[8]);
+  s[1] = _mm_sub_epi16(in[6], in[9]);
+  s[2] = _mm_sub_epi16(in[5], in[10]);
+  s[3] = _mm_sub_epi16(in[4], in[11]);
+  s[4] = _mm_sub_epi16(in[3], in[12]);
+  s[5] = _mm_sub_epi16(in[2], in[13]);
+  s[6] = _mm_sub_epi16(in[1], in[14]);
+  s[7] = _mm_sub_epi16(in[0], in[15]);
+
+  p[0] = _mm_add_epi16(i[0], i[7]);
+  p[1] = _mm_add_epi16(i[1], i[6]);
+  p[2] = _mm_add_epi16(i[2], i[5]);
+  p[3] = _mm_add_epi16(i[3], i[4]);
+  p[4] = _mm_sub_epi16(i[3], i[4]);
+  p[5] = _mm_sub_epi16(i[2], i[5]);
+  p[6] = _mm_sub_epi16(i[1], i[6]);
+  p[7] = _mm_sub_epi16(i[0], i[7]);
+
+  u[0] = _mm_add_epi16(p[0], p[3]);
+  u[1] = _mm_add_epi16(p[1], p[2]);
+  u[2] = _mm_sub_epi16(p[1], p[2]);
+  u[3] = _mm_sub_epi16(p[0], p[3]);
+
+  v[0] = _mm_unpacklo_epi16(u[0], u[1]);
+  v[1] = _mm_unpackhi_epi16(u[0], u[1]);
+  v[2] = _mm_unpacklo_epi16(u[2], u[3]);
+  v[3] = _mm_unpackhi_epi16(u[2], u[3]);
+
+  u[0] = _mm_madd_epi16(v[0], k__cospi_p16_p16);
+  u[1] = _mm_madd_epi16(v[1], k__cospi_p16_p16);
+  u[2] = _mm_madd_epi16(v[0], k__cospi_p16_m16);
+  u[3] = _mm_madd_epi16(v[1], k__cospi_p16_m16);
+  u[4] = _mm_madd_epi16(v[2], k__cospi_p24_p08);
+  u[5] = _mm_madd_epi16(v[3], k__cospi_p24_p08);
+  u[6] = _mm_madd_epi16(v[2], k__cospi_m08_p24);
+  u[7] = _mm_madd_epi16(v[3], k__cospi_m08_p24);
+
+  v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING);
+  v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING);
+  v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING);
+  v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING);
+  v[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING);
+  v[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING);
+  v[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING);
+  v[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING);
+
+  u[0] = _mm_srai_epi32(v[0], DCT_CONST_BITS);
+  u[1] = _mm_srai_epi32(v[1], DCT_CONST_BITS);
+  u[2] = _mm_srai_epi32(v[2], DCT_CONST_BITS);
+  u[3] = _mm_srai_epi32(v[3], DCT_CONST_BITS);
+  u[4] = _mm_srai_epi32(v[4], DCT_CONST_BITS);
+  u[5] = _mm_srai_epi32(v[5], DCT_CONST_BITS);
+  u[6] = _mm_srai_epi32(v[6], DCT_CONST_BITS);
+  u[7] = _mm_srai_epi32(v[7], DCT_CONST_BITS);
+
+  in[0] = _mm_packs_epi32(u[0], u[1]);
+  in[4] = _mm_packs_epi32(u[4], u[5]);
+  in[8] = _mm_packs_epi32(u[2], u[3]);
+  in[12] = _mm_packs_epi32(u[6], u[7]);
+
+  u[0] = _mm_unpacklo_epi16(p[5], p[6]);
+  u[1] = _mm_unpackhi_epi16(p[5], p[6]);
+  v[0] = _mm_madd_epi16(u[0], k__cospi_m16_p16);
+  v[1] = _mm_madd_epi16(u[1], k__cospi_m16_p16);
+  v[2] = _mm_madd_epi16(u[0], k__cospi_p16_p16);
+  v[3] = _mm_madd_epi16(u[1], k__cospi_p16_p16);
+
+  u[0] = _mm_add_epi32(v[0], k__DCT_CONST_ROUNDING);
+  u[1] = _mm_add_epi32(v[1], k__DCT_CONST_ROUNDING);
+  u[2] = _mm_add_epi32(v[2], k__DCT_CONST_ROUNDING);
+  u[3] = _mm_add_epi32(v[3], k__DCT_CONST_ROUNDING);
+
+  v[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS);
+  v[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS);
+  v[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS);
+  v[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS);
+
+  u[0] = _mm_packs_epi32(v[0], v[1]);
+  u[1] = _mm_packs_epi32(v[2], v[3]);
+
+  t[0] = _mm_add_epi16(p[4], u[0]);
+  t[1] = _mm_sub_epi16(p[4], u[0]);
+  t[2] = _mm_sub_epi16(p[7], u[1]);
+  t[3] = _mm_add_epi16(p[7], u[1]);
+
+  u[0] = _mm_unpacklo_epi16(t[0], t[3]);
+  u[1] = _mm_unpackhi_epi16(t[0], t[3]);
+  u[2] = _mm_unpacklo_epi16(t[1], t[2]);
+  u[3] = _mm_unpackhi_epi16(t[1], t[2]);
+
+  v[0] = _mm_madd_epi16(u[0], k__cospi_p28_p04);
+  v[1] = _mm_madd_epi16(u[1], k__cospi_p28_p04);
+  v[2] = _mm_madd_epi16(u[2], k__cospi_p12_p20);
+  v[3] = _mm_madd_epi16(u[3], k__cospi_p12_p20);
+  v[4] = _mm_madd_epi16(u[2], k__cospi_m20_p12);
+  v[5] = _mm_madd_epi16(u[3], k__cospi_m20_p12);
+  v[6] = _mm_madd_epi16(u[0], k__cospi_m04_p28);
+  v[7] = _mm_madd_epi16(u[1], k__cospi_m04_p28);
+
+  u[0] = _mm_add_epi32(v[0], k__DCT_CONST_ROUNDING);
+  u[1] = _mm_add_epi32(v[1], k__DCT_CONST_ROUNDING);
+  u[2] = _mm_add_epi32(v[2], k__DCT_CONST_ROUNDING);
+  u[3] = _mm_add_epi32(v[3], k__DCT_CONST_ROUNDING);
+  u[4] = _mm_add_epi32(v[4], k__DCT_CONST_ROUNDING);
+  u[5] = _mm_add_epi32(v[5], k__DCT_CONST_ROUNDING);
+  u[6] = _mm_add_epi32(v[6], k__DCT_CONST_ROUNDING);
+  u[7] = _mm_add_epi32(v[7], k__DCT_CONST_ROUNDING);
+
+  v[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS);
+  v[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS);
+  v[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS);
+  v[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS);
+  v[4] = _mm_srai_epi32(u[4], DCT_CONST_BITS);
+  v[5] = _mm_srai_epi32(u[5], DCT_CONST_BITS);
+  v[6] = _mm_srai_epi32(u[6], DCT_CONST_BITS);
+  v[7] = _mm_srai_epi32(u[7], DCT_CONST_BITS);
+
+  in[2] = _mm_packs_epi32(v[0], v[1]);
+  in[6] = _mm_packs_epi32(v[4], v[5]);
+  in[10] = _mm_packs_epi32(v[2], v[3]);
+  in[14] = _mm_packs_epi32(v[6], v[7]);
+
+  // stage 2
+  u[0] = _mm_unpacklo_epi16(s[2], s[5]);
+  u[1] = _mm_unpackhi_epi16(s[2], s[5]);
+  u[2] = _mm_unpacklo_epi16(s[3], s[4]);
+  u[3] = _mm_unpackhi_epi16(s[3], s[4]);
+
+  v[0] = _mm_madd_epi16(u[0], k__cospi_m16_p16);
+  v[1] = _mm_madd_epi16(u[1], k__cospi_m16_p16);
+  v[2] = _mm_madd_epi16(u[2], k__cospi_m16_p16);
+  v[3] = _mm_madd_epi16(u[3], k__cospi_m16_p16);
+  v[4] = _mm_madd_epi16(u[2], k__cospi_p16_p16);
+  v[5] = _mm_madd_epi16(u[3], k__cospi_p16_p16);
+  v[6] = _mm_madd_epi16(u[0], k__cospi_p16_p16);
+  v[7] = _mm_madd_epi16(u[1], k__cospi_p16_p16);
+
+  u[0] = _mm_add_epi32(v[0], k__DCT_CONST_ROUNDING);
+  u[1] = _mm_add_epi32(v[1], k__DCT_CONST_ROUNDING);
+  u[2] = _mm_add_epi32(v[2], k__DCT_CONST_ROUNDING);
+  u[3] = _mm_add_epi32(v[3], k__DCT_CONST_ROUNDING);
+  u[4] = _mm_add_epi32(v[4], k__DCT_CONST_ROUNDING);
+  u[5] = _mm_add_epi32(v[5], k__DCT_CONST_ROUNDING);
+  u[6] = _mm_add_epi32(v[6], k__DCT_CONST_ROUNDING);
+  u[7] = _mm_add_epi32(v[7], k__DCT_CONST_ROUNDING);
+
+  v[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS);
+  v[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS);
+  v[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS);
+  v[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS);
+  v[4] = _mm_srai_epi32(u[4], DCT_CONST_BITS);
+  v[5] = _mm_srai_epi32(u[5], DCT_CONST_BITS);
+  v[6] = _mm_srai_epi32(u[6], DCT_CONST_BITS);
+  v[7] = _mm_srai_epi32(u[7], DCT_CONST_BITS);
+
+  t[2] = _mm_packs_epi32(v[0], v[1]);
+  t[3] = _mm_packs_epi32(v[2], v[3]);
+  t[4] = _mm_packs_epi32(v[4], v[5]);
+  t[5] = _mm_packs_epi32(v[6], v[7]);
+
+  // stage 3
+  p[0] = _mm_add_epi16(s[0], t[3]);
+  p[1] = _mm_add_epi16(s[1], t[2]);
+  p[2] = _mm_sub_epi16(s[1], t[2]);
+  p[3] = _mm_sub_epi16(s[0], t[3]);
+  p[4] = _mm_sub_epi16(s[7], t[4]);
+  p[5] = _mm_sub_epi16(s[6], t[5]);
+  p[6] = _mm_add_epi16(s[6], t[5]);
+  p[7] = _mm_add_epi16(s[7], t[4]);
+
+  // stage 4
+  u[0] = _mm_unpacklo_epi16(p[1], p[6]);
+  u[1] = _mm_unpackhi_epi16(p[1], p[6]);
+  u[2] = _mm_unpacklo_epi16(p[2], p[5]);
+  u[3] = _mm_unpackhi_epi16(p[2], p[5]);
+
+  v[0] = _mm_madd_epi16(u[0], k__cospi_m08_p24);
+  v[1] = _mm_madd_epi16(u[1], k__cospi_m08_p24);
+  v[2] = _mm_madd_epi16(u[2], k__cospi_m24_m08);
+  v[3] = _mm_madd_epi16(u[3], k__cospi_m24_m08);
+  v[4] = _mm_madd_epi16(u[2], k__cospi_m08_p24);
+  v[5] = _mm_madd_epi16(u[3], k__cospi_m08_p24);
+  v[6] = _mm_madd_epi16(u[0], k__cospi_p24_p08);
+  v[7] = _mm_madd_epi16(u[1], k__cospi_p24_p08);
+
+  u[0] = _mm_add_epi32(v[0], k__DCT_CONST_ROUNDING);
+  u[1] = _mm_add_epi32(v[1], k__DCT_CONST_ROUNDING);
+  u[2] = _mm_add_epi32(v[2], k__DCT_CONST_ROUNDING);
+  u[3] = _mm_add_epi32(v[3], k__DCT_CONST_ROUNDING);
+  u[4] = _mm_add_epi32(v[4], k__DCT_CONST_ROUNDING);
+  u[5] = _mm_add_epi32(v[5], k__DCT_CONST_ROUNDING);
+  u[6] = _mm_add_epi32(v[6], k__DCT_CONST_ROUNDING);
+  u[7] = _mm_add_epi32(v[7], k__DCT_CONST_ROUNDING);
+
+  v[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS);
+  v[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS);
+  v[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS);
+  v[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS);
+  v[4] = _mm_srai_epi32(u[4], DCT_CONST_BITS);
+  v[5] = _mm_srai_epi32(u[5], DCT_CONST_BITS);
+  v[6] = _mm_srai_epi32(u[6], DCT_CONST_BITS);
+  v[7] = _mm_srai_epi32(u[7], DCT_CONST_BITS);
+
+  t[1] = _mm_packs_epi32(v[0], v[1]);
+  t[2] = _mm_packs_epi32(v[2], v[3]);
+  t[5] = _mm_packs_epi32(v[4], v[5]);
+  t[6] = _mm_packs_epi32(v[6], v[7]);
+
+  // stage 5
+  s[0] = _mm_add_epi16(p[0], t[1]);
+  s[1] = _mm_sub_epi16(p[0], t[1]);
+  s[2] = _mm_sub_epi16(p[3], t[2]);
+  s[3] = _mm_add_epi16(p[3], t[2]);
+  s[4] = _mm_add_epi16(p[4], t[5]);
+  s[5] = _mm_sub_epi16(p[4], t[5]);
+  s[6] = _mm_sub_epi16(p[7], t[6]);
+  s[7] = _mm_add_epi16(p[7], t[6]);
+
+  // stage 6
+  u[0] = _mm_unpacklo_epi16(s[0], s[7]);
+  u[1] = _mm_unpackhi_epi16(s[0], s[7]);
+  u[2] = _mm_unpacklo_epi16(s[1], s[6]);
+  u[3] = _mm_unpackhi_epi16(s[1], s[6]);
+  u[4] = _mm_unpacklo_epi16(s[2], s[5]);
+  u[5] = _mm_unpackhi_epi16(s[2], s[5]);
+  u[6] = _mm_unpacklo_epi16(s[3], s[4]);
+  u[7] = _mm_unpackhi_epi16(s[3], s[4]);
+
+  v[0] = _mm_madd_epi16(u[0], k__cospi_p30_p02);
+  v[1] = _mm_madd_epi16(u[1], k__cospi_p30_p02);
+  v[2] = _mm_madd_epi16(u[2], k__cospi_p14_p18);
+  v[3] = _mm_madd_epi16(u[3], k__cospi_p14_p18);
+  v[4] = _mm_madd_epi16(u[4], k__cospi_p22_p10);
+  v[5] = _mm_madd_epi16(u[5], k__cospi_p22_p10);
+  v[6] = _mm_madd_epi16(u[6], k__cospi_p06_p26);
+  v[7] = _mm_madd_epi16(u[7], k__cospi_p06_p26);
+  v[8] = _mm_madd_epi16(u[6], k__cospi_m26_p06);
+  v[9] = _mm_madd_epi16(u[7], k__cospi_m26_p06);
+  v[10] = _mm_madd_epi16(u[4], k__cospi_m10_p22);
+  v[11] = _mm_madd_epi16(u[5], k__cospi_m10_p22);
+  v[12] = _mm_madd_epi16(u[2], k__cospi_m18_p14);
+  v[13] = _mm_madd_epi16(u[3], k__cospi_m18_p14);
+  v[14] = _mm_madd_epi16(u[0], k__cospi_m02_p30);
+  v[15] = _mm_madd_epi16(u[1], k__cospi_m02_p30);
+
+  u[0] = _mm_add_epi32(v[0], k__DCT_CONST_ROUNDING);
+  u[1] = _mm_add_epi32(v[1], k__DCT_CONST_ROUNDING);
+  u[2] = _mm_add_epi32(v[2], k__DCT_CONST_ROUNDING);
+  u[3] = _mm_add_epi32(v[3], k__DCT_CONST_ROUNDING);
+  u[4] = _mm_add_epi32(v[4], k__DCT_CONST_ROUNDING);
+  u[5] = _mm_add_epi32(v[5], k__DCT_CONST_ROUNDING);
+  u[6] = _mm_add_epi32(v[6], k__DCT_CONST_ROUNDING);
+  u[7] = _mm_add_epi32(v[7], k__DCT_CONST_ROUNDING);
+  u[8] = _mm_add_epi32(v[8], k__DCT_CONST_ROUNDING);
+  u[9] = _mm_add_epi32(v[9], k__DCT_CONST_ROUNDING);
+  u[10] = _mm_add_epi32(v[10], k__DCT_CONST_ROUNDING);
+  u[11] = _mm_add_epi32(v[11], k__DCT_CONST_ROUNDING);
+  u[12] = _mm_add_epi32(v[12], k__DCT_CONST_ROUNDING);
+  u[13] = _mm_add_epi32(v[13], k__DCT_CONST_ROUNDING);
+  u[14] = _mm_add_epi32(v[14], k__DCT_CONST_ROUNDING);
+  u[15] = _mm_add_epi32(v[15], k__DCT_CONST_ROUNDING);
+
+  v[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS);
+  v[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS);
+  v[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS);
+  v[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS);
+  v[4] = _mm_srai_epi32(u[4], DCT_CONST_BITS);
+  v[5] = _mm_srai_epi32(u[5], DCT_CONST_BITS);
+  v[6] = _mm_srai_epi32(u[6], DCT_CONST_BITS);
+  v[7] = _mm_srai_epi32(u[7], DCT_CONST_BITS);
+  v[8] = _mm_srai_epi32(u[8], DCT_CONST_BITS);
+  v[9] = _mm_srai_epi32(u[9], DCT_CONST_BITS);
+  v[10] = _mm_srai_epi32(u[10], DCT_CONST_BITS);
+  v[11] = _mm_srai_epi32(u[11], DCT_CONST_BITS);
+  v[12] = _mm_srai_epi32(u[12], DCT_CONST_BITS);
+  v[13] = _mm_srai_epi32(u[13], DCT_CONST_BITS);
+  v[14] = _mm_srai_epi32(u[14], DCT_CONST_BITS);
+  v[15] = _mm_srai_epi32(u[15], DCT_CONST_BITS);
+
+  in[1] = _mm_packs_epi32(v[0], v[1]);
+  in[9] = _mm_packs_epi32(v[2], v[3]);
+  in[5] = _mm_packs_epi32(v[4], v[5]);
+  in[13] = _mm_packs_epi32(v[6], v[7]);
+  in[3] = _mm_packs_epi32(v[8], v[9]);
+  in[11] = _mm_packs_epi32(v[10], v[11]);
+  in[7] = _mm_packs_epi32(v[12], v[13]);
+  in[15] = _mm_packs_epi32(v[14], v[15]);
+}
+
+static void fadst16_8col(__m128i *in) {
+  // perform 16x16 1-D ADST for 8 columns
+  __m128i s[16], x[16], u[32], v[32];
+  const __m128i k__cospi_p01_p31 = pair_set_epi16(cospi_1_64, cospi_31_64);
+  const __m128i k__cospi_p31_m01 = pair_set_epi16(cospi_31_64, -cospi_1_64);
+  const __m128i k__cospi_p05_p27 = pair_set_epi16(cospi_5_64, cospi_27_64);
+  const __m128i k__cospi_p27_m05 = pair_set_epi16(cospi_27_64, -cospi_5_64);
+  const __m128i k__cospi_p09_p23 = pair_set_epi16(cospi_9_64, cospi_23_64);
+  const __m128i k__cospi_p23_m09 = pair_set_epi16(cospi_23_64, -cospi_9_64);
+  const __m128i k__cospi_p13_p19 = pair_set_epi16(cospi_13_64, cospi_19_64);
+  const __m128i k__cospi_p19_m13 = pair_set_epi16(cospi_19_64, -cospi_13_64);
+  const __m128i k__cospi_p17_p15 = pair_set_epi16(cospi_17_64, cospi_15_64);
+  const __m128i k__cospi_p15_m17 = pair_set_epi16(cospi_15_64, -cospi_17_64);
+  const __m128i k__cospi_p21_p11 = pair_set_epi16(cospi_21_64, cospi_11_64);
+  const __m128i k__cospi_p11_m21 = pair_set_epi16(cospi_11_64, -cospi_21_64);
+  const __m128i k__cospi_p25_p07 = pair_set_epi16(cospi_25_64, cospi_7_64);
+  const __m128i k__cospi_p07_m25 = pair_set_epi16(cospi_7_64, -cospi_25_64);
+  const __m128i k__cospi_p29_p03 = pair_set_epi16(cospi_29_64, cospi_3_64);
+  const __m128i k__cospi_p03_m29 = pair_set_epi16(cospi_3_64, -cospi_29_64);
+  const __m128i k__cospi_p04_p28 = pair_set_epi16(cospi_4_64, cospi_28_64);
+  const __m128i k__cospi_p28_m04 = pair_set_epi16(cospi_28_64, -cospi_4_64);
+  const __m128i k__cospi_p20_p12 = pair_set_epi16(cospi_20_64, cospi_12_64);
+  const __m128i k__cospi_p12_m20 = pair_set_epi16(cospi_12_64, -cospi_20_64);
+  const __m128i k__cospi_m28_p04 = pair_set_epi16(-cospi_28_64, cospi_4_64);
+  const __m128i k__cospi_m12_p20 = pair_set_epi16(-cospi_12_64, cospi_20_64);
+  const __m128i k__cospi_p08_p24 = pair_set_epi16(cospi_8_64, cospi_24_64);
+  const __m128i k__cospi_p24_m08 = pair_set_epi16(cospi_24_64, -cospi_8_64);
+  const __m128i k__cospi_m24_p08 = pair_set_epi16(-cospi_24_64, cospi_8_64);
+  const __m128i k__cospi_m16_m16 = _mm_set1_epi16((int16_t)-cospi_16_64);
+  const __m128i k__cospi_p16_p16 = _mm_set1_epi16((int16_t)cospi_16_64);
+  const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
+  const __m128i k__cospi_m16_p16 = pair_set_epi16(-cospi_16_64, cospi_16_64);
+  const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
+  const __m128i kZero = _mm_set1_epi16(0);
+
+  u[0] = _mm_unpacklo_epi16(in[15], in[0]);
+  u[1] = _mm_unpackhi_epi16(in[15], in[0]);
+  u[2] = _mm_unpacklo_epi16(in[13], in[2]);
+  u[3] = _mm_unpackhi_epi16(in[13], in[2]);
+  u[4] = _mm_unpacklo_epi16(in[11], in[4]);
+  u[5] = _mm_unpackhi_epi16(in[11], in[4]);
+  u[6] = _mm_unpacklo_epi16(in[9], in[6]);
+  u[7] = _mm_unpackhi_epi16(in[9], in[6]);
+  u[8] = _mm_unpacklo_epi16(in[7], in[8]);
+  u[9] = _mm_unpackhi_epi16(in[7], in[8]);
+  u[10] = _mm_unpacklo_epi16(in[5], in[10]);
+  u[11] = _mm_unpackhi_epi16(in[5], in[10]);
+  u[12] = _mm_unpacklo_epi16(in[3], in[12]);
+  u[13] = _mm_unpackhi_epi16(in[3], in[12]);
+  u[14] = _mm_unpacklo_epi16(in[1], in[14]);
+  u[15] = _mm_unpackhi_epi16(in[1], in[14]);
+
+  v[0] = _mm_madd_epi16(u[0], k__cospi_p01_p31);
+  v[1] = _mm_madd_epi16(u[1], k__cospi_p01_p31);
+  v[2] = _mm_madd_epi16(u[0], k__cospi_p31_m01);
+  v[3] = _mm_madd_epi16(u[1], k__cospi_p31_m01);
+  v[4] = _mm_madd_epi16(u[2], k__cospi_p05_p27);
+  v[5] = _mm_madd_epi16(u[3], k__cospi_p05_p27);
+  v[6] = _mm_madd_epi16(u[2], k__cospi_p27_m05);
+  v[7] = _mm_madd_epi16(u[3], k__cospi_p27_m05);
+  v[8] = _mm_madd_epi16(u[4], k__cospi_p09_p23);
+  v[9] = _mm_madd_epi16(u[5], k__cospi_p09_p23);
+  v[10] = _mm_madd_epi16(u[4], k__cospi_p23_m09);
+  v[11] = _mm_madd_epi16(u[5], k__cospi_p23_m09);
+  v[12] = _mm_madd_epi16(u[6], k__cospi_p13_p19);
+  v[13] = _mm_madd_epi16(u[7], k__cospi_p13_p19);
+  v[14] = _mm_madd_epi16(u[6], k__cospi_p19_m13);
+  v[15] = _mm_madd_epi16(u[7], k__cospi_p19_m13);
+  v[16] = _mm_madd_epi16(u[8], k__cospi_p17_p15);
+  v[17] = _mm_madd_epi16(u[9], k__cospi_p17_p15);
+  v[18] = _mm_madd_epi16(u[8], k__cospi_p15_m17);
+  v[19] = _mm_madd_epi16(u[9], k__cospi_p15_m17);
+  v[20] = _mm_madd_epi16(u[10], k__cospi_p21_p11);
+  v[21] = _mm_madd_epi16(u[11], k__cospi_p21_p11);
+  v[22] = _mm_madd_epi16(u[10], k__cospi_p11_m21);
+  v[23] = _mm_madd_epi16(u[11], k__cospi_p11_m21);
+  v[24] = _mm_madd_epi16(u[12], k__cospi_p25_p07);
+  v[25] = _mm_madd_epi16(u[13], k__cospi_p25_p07);
+  v[26] = _mm_madd_epi16(u[12], k__cospi_p07_m25);
+  v[27] = _mm_madd_epi16(u[13], k__cospi_p07_m25);
+  v[28] = _mm_madd_epi16(u[14], k__cospi_p29_p03);
+  v[29] = _mm_madd_epi16(u[15], k__cospi_p29_p03);
+  v[30] = _mm_madd_epi16(u[14], k__cospi_p03_m29);
+  v[31] = _mm_madd_epi16(u[15], k__cospi_p03_m29);
+
+  u[0] = _mm_add_epi32(v[0], v[16]);
+  u[1] = _mm_add_epi32(v[1], v[17]);
+  u[2] = _mm_add_epi32(v[2], v[18]);
+  u[3] = _mm_add_epi32(v[3], v[19]);
+  u[4] = _mm_add_epi32(v[4], v[20]);
+  u[5] = _mm_add_epi32(v[5], v[21]);
+  u[6] = _mm_add_epi32(v[6], v[22]);
+  u[7] = _mm_add_epi32(v[7], v[23]);
+  u[8] = _mm_add_epi32(v[8], v[24]);
+  u[9] = _mm_add_epi32(v[9], v[25]);
+  u[10] = _mm_add_epi32(v[10], v[26]);
+  u[11] = _mm_add_epi32(v[11], v[27]);
+  u[12] = _mm_add_epi32(v[12], v[28]);
+  u[13] = _mm_add_epi32(v[13], v[29]);
+  u[14] = _mm_add_epi32(v[14], v[30]);
+  u[15] = _mm_add_epi32(v[15], v[31]);
+  u[16] = _mm_sub_epi32(v[0], v[16]);
+  u[17] = _mm_sub_epi32(v[1], v[17]);
+  u[18] = _mm_sub_epi32(v[2], v[18]);
+  u[19] = _mm_sub_epi32(v[3], v[19]);
+  u[20] = _mm_sub_epi32(v[4], v[20]);
+  u[21] = _mm_sub_epi32(v[5], v[21]);
+  u[22] = _mm_sub_epi32(v[6], v[22]);
+  u[23] = _mm_sub_epi32(v[7], v[23]);
+  u[24] = _mm_sub_epi32(v[8], v[24]);
+  u[25] = _mm_sub_epi32(v[9], v[25]);
+  u[26] = _mm_sub_epi32(v[10], v[26]);
+  u[27] = _mm_sub_epi32(v[11], v[27]);
+  u[28] = _mm_sub_epi32(v[12], v[28]);
+  u[29] = _mm_sub_epi32(v[13], v[29]);
+  u[30] = _mm_sub_epi32(v[14], v[30]);
+  u[31] = _mm_sub_epi32(v[15], v[31]);
+
+  v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING);
+  v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING);
+  v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING);
+  v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING);
+  v[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING);
+  v[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING);
+  v[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING);
+  v[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING);
+  v[8] = _mm_add_epi32(u[8], k__DCT_CONST_ROUNDING);
+  v[9] = _mm_add_epi32(u[9], k__DCT_CONST_ROUNDING);
+  v[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING);
+  v[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING);
+  v[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING);
+  v[13] = _mm_add_epi32(u[13], k__DCT_CONST_ROUNDING);
+  v[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING);
+  v[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING);
+  v[16] = _mm_add_epi32(u[16], k__DCT_CONST_ROUNDING);
+  v[17] = _mm_add_epi32(u[17], k__DCT_CONST_ROUNDING);
+  v[18] = _mm_add_epi32(u[18], k__DCT_CONST_ROUNDING);
+  v[19] = _mm_add_epi32(u[19], k__DCT_CONST_ROUNDING);
+  v[20] = _mm_add_epi32(u[20], k__DCT_CONST_ROUNDING);
+  v[21] = _mm_add_epi32(u[21], k__DCT_CONST_ROUNDING);
+  v[22] = _mm_add_epi32(u[22], k__DCT_CONST_ROUNDING);
+  v[23] = _mm_add_epi32(u[23], k__DCT_CONST_ROUNDING);
+  v[24] = _mm_add_epi32(u[24], k__DCT_CONST_ROUNDING);
+  v[25] = _mm_add_epi32(u[25], k__DCT_CONST_ROUNDING);
+  v[26] = _mm_add_epi32(u[26], k__DCT_CONST_ROUNDING);
+  v[27] = _mm_add_epi32(u[27], k__DCT_CONST_ROUNDING);
+  v[28] = _mm_add_epi32(u[28], k__DCT_CONST_ROUNDING);
+  v[29] = _mm_add_epi32(u[29], k__DCT_CONST_ROUNDING);
+  v[30] = _mm_add_epi32(u[30], k__DCT_CONST_ROUNDING);
+  v[31] = _mm_add_epi32(u[31], k__DCT_CONST_ROUNDING);
+
+  u[0] = _mm_srai_epi32(v[0], DCT_CONST_BITS);
+  u[1] = _mm_srai_epi32(v[1], DCT_CONST_BITS);
+  u[2] = _mm_srai_epi32(v[2], DCT_CONST_BITS);
+  u[3] = _mm_srai_epi32(v[3], DCT_CONST_BITS);
+  u[4] = _mm_srai_epi32(v[4], DCT_CONST_BITS);
+  u[5] = _mm_srai_epi32(v[5], DCT_CONST_BITS);
+  u[6] = _mm_srai_epi32(v[6], DCT_CONST_BITS);
+  u[7] = _mm_srai_epi32(v[7], DCT_CONST_BITS);
+  u[8] = _mm_srai_epi32(v[8], DCT_CONST_BITS);
+  u[9] = _mm_srai_epi32(v[9], DCT_CONST_BITS);
+  u[10] = _mm_srai_epi32(v[10], DCT_CONST_BITS);
+  u[11] = _mm_srai_epi32(v[11], DCT_CONST_BITS);
+  u[12] = _mm_srai_epi32(v[12], DCT_CONST_BITS);
+  u[13] = _mm_srai_epi32(v[13], DCT_CONST_BITS);
+  u[14] = _mm_srai_epi32(v[14], DCT_CONST_BITS);
+  u[15] = _mm_srai_epi32(v[15], DCT_CONST_BITS);
+  u[16] = _mm_srai_epi32(v[16], DCT_CONST_BITS);
+  u[17] = _mm_srai_epi32(v[17], DCT_CONST_BITS);
+  u[18] = _mm_srai_epi32(v[18], DCT_CONST_BITS);
+  u[19] = _mm_srai_epi32(v[19], DCT_CONST_BITS);
+  u[20] = _mm_srai_epi32(v[20], DCT_CONST_BITS);
+  u[21] = _mm_srai_epi32(v[21], DCT_CONST_BITS);
+  u[22] = _mm_srai_epi32(v[22], DCT_CONST_BITS);
+  u[23] = _mm_srai_epi32(v[23], DCT_CONST_BITS);
+  u[24] = _mm_srai_epi32(v[24], DCT_CONST_BITS);
+  u[25] = _mm_srai_epi32(v[25], DCT_CONST_BITS);
+  u[26] = _mm_srai_epi32(v[26], DCT_CONST_BITS);
+  u[27] = _mm_srai_epi32(v[27], DCT_CONST_BITS);
+  u[28] = _mm_srai_epi32(v[28], DCT_CONST_BITS);
+  u[29] = _mm_srai_epi32(v[29], DCT_CONST_BITS);
+  u[30] = _mm_srai_epi32(v[30], DCT_CONST_BITS);
+  u[31] = _mm_srai_epi32(v[31], DCT_CONST_BITS);
+
+  s[0] = _mm_packs_epi32(u[0], u[1]);
+  s[1] = _mm_packs_epi32(u[2], u[3]);
+  s[2] = _mm_packs_epi32(u[4], u[5]);
+  s[3] = _mm_packs_epi32(u[6], u[7]);
+  s[4] = _mm_packs_epi32(u[8], u[9]);
+  s[5] = _mm_packs_epi32(u[10], u[11]);
+  s[6] = _mm_packs_epi32(u[12], u[13]);
+  s[7] = _mm_packs_epi32(u[14], u[15]);
+  s[8] = _mm_packs_epi32(u[16], u[17]);
+  s[9] = _mm_packs_epi32(u[18], u[19]);
+  s[10] = _mm_packs_epi32(u[20], u[21]);
+  s[11] = _mm_packs_epi32(u[22], u[23]);
+  s[12] = _mm_packs_epi32(u[24], u[25]);
+  s[13] = _mm_packs_epi32(u[26], u[27]);
+  s[14] = _mm_packs_epi32(u[28], u[29]);
+  s[15] = _mm_packs_epi32(u[30], u[31]);
+
+  // stage 2
+  u[0] = _mm_unpacklo_epi16(s[8], s[9]);
+  u[1] = _mm_unpackhi_epi16(s[8], s[9]);
+  u[2] = _mm_unpacklo_epi16(s[10], s[11]);
+  u[3] = _mm_unpackhi_epi16(s[10], s[11]);
+  u[4] = _mm_unpacklo_epi16(s[12], s[13]);
+  u[5] = _mm_unpackhi_epi16(s[12], s[13]);
+  u[6] = _mm_unpacklo_epi16(s[14], s[15]);
+  u[7] = _mm_unpackhi_epi16(s[14], s[15]);
+
+  v[0] = _mm_madd_epi16(u[0], k__cospi_p04_p28);
+  v[1] = _mm_madd_epi16(u[1], k__cospi_p04_p28);
+  v[2] = _mm_madd_epi16(u[0], k__cospi_p28_m04);
+  v[3] = _mm_madd_epi16(u[1], k__cospi_p28_m04);
+  v[4] = _mm_madd_epi16(u[2], k__cospi_p20_p12);
+  v[5] = _mm_madd_epi16(u[3], k__cospi_p20_p12);
+  v[6] = _mm_madd_epi16(u[2], k__cospi_p12_m20);
+  v[7] = _mm_madd_epi16(u[3], k__cospi_p12_m20);
+  v[8] = _mm_madd_epi16(u[4], k__cospi_m28_p04);
+  v[9] = _mm_madd_epi16(u[5], k__cospi_m28_p04);
+  v[10] = _mm_madd_epi16(u[4], k__cospi_p04_p28);
+  v[11] = _mm_madd_epi16(u[5], k__cospi_p04_p28);
+  v[12] = _mm_madd_epi16(u[6], k__cospi_m12_p20);
+  v[13] = _mm_madd_epi16(u[7], k__cospi_m12_p20);
+  v[14] = _mm_madd_epi16(u[6], k__cospi_p20_p12);
+  v[15] = _mm_madd_epi16(u[7], k__cospi_p20_p12);
+
+  u[0] = _mm_add_epi32(v[0], v[8]);
+  u[1] = _mm_add_epi32(v[1], v[9]);
+  u[2] = _mm_add_epi32(v[2], v[10]);
+  u[3] = _mm_add_epi32(v[3], v[11]);
+  u[4] = _mm_add_epi32(v[4], v[12]);
+  u[5] = _mm_add_epi32(v[5], v[13]);
+  u[6] = _mm_add_epi32(v[6], v[14]);
+  u[7] = _mm_add_epi32(v[7], v[15]);
+  u[8] = _mm_sub_epi32(v[0], v[8]);
+  u[9] = _mm_sub_epi32(v[1], v[9]);
+  u[10] = _mm_sub_epi32(v[2], v[10]);
+  u[11] = _mm_sub_epi32(v[3], v[11]);
+  u[12] = _mm_sub_epi32(v[4], v[12]);
+  u[13] = _mm_sub_epi32(v[5], v[13]);
+  u[14] = _mm_sub_epi32(v[6], v[14]);
+  u[15] = _mm_sub_epi32(v[7], v[15]);
+
+  v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING);
+  v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING);
+  v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING);
+  v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING);
+  v[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING);
+  v[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING);
+  v[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING);
+  v[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING);
+  v[8] = _mm_add_epi32(u[8], k__DCT_CONST_ROUNDING);
+  v[9] = _mm_add_epi32(u[9], k__DCT_CONST_ROUNDING);
+  v[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING);
+  v[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING);
+  v[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING);
+  v[13] = _mm_add_epi32(u[13], k__DCT_CONST_ROUNDING);
+  v[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING);
+  v[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING);
+
+  u[0] = _mm_srai_epi32(v[0], DCT_CONST_BITS);
+  u[1] = _mm_srai_epi32(v[1], DCT_CONST_BITS);
+  u[2] = _mm_srai_epi32(v[2], DCT_CONST_BITS);
+  u[3] = _mm_srai_epi32(v[3], DCT_CONST_BITS);
+  u[4] = _mm_srai_epi32(v[4], DCT_CONST_BITS);
+  u[5] = _mm_srai_epi32(v[5], DCT_CONST_BITS);
+  u[6] = _mm_srai_epi32(v[6], DCT_CONST_BITS);
+  u[7] = _mm_srai_epi32(v[7], DCT_CONST_BITS);
+  u[8] = _mm_srai_epi32(v[8], DCT_CONST_BITS);
+  u[9] = _mm_srai_epi32(v[9], DCT_CONST_BITS);
+  u[10] = _mm_srai_epi32(v[10], DCT_CONST_BITS);
+  u[11] = _mm_srai_epi32(v[11], DCT_CONST_BITS);
+  u[12] = _mm_srai_epi32(v[12], DCT_CONST_BITS);
+  u[13] = _mm_srai_epi32(v[13], DCT_CONST_BITS);
+  u[14] = _mm_srai_epi32(v[14], DCT_CONST_BITS);
+  u[15] = _mm_srai_epi32(v[15], DCT_CONST_BITS);
+
+  x[0] = _mm_add_epi16(s[0], s[4]);
+  x[1] = _mm_add_epi16(s[1], s[5]);
+  x[2] = _mm_add_epi16(s[2], s[6]);
+  x[3] = _mm_add_epi16(s[3], s[7]);
+  x[4] = _mm_sub_epi16(s[0], s[4]);
+  x[5] = _mm_sub_epi16(s[1], s[5]);
+  x[6] = _mm_sub_epi16(s[2], s[6]);
+  x[7] = _mm_sub_epi16(s[3], s[7]);
+  x[8] = _mm_packs_epi32(u[0], u[1]);
+  x[9] = _mm_packs_epi32(u[2], u[3]);
+  x[10] = _mm_packs_epi32(u[4], u[5]);
+  x[11] = _mm_packs_epi32(u[6], u[7]);
+  x[12] = _mm_packs_epi32(u[8], u[9]);
+  x[13] = _mm_packs_epi32(u[10], u[11]);
+  x[14] = _mm_packs_epi32(u[12], u[13]);
+  x[15] = _mm_packs_epi32(u[14], u[15]);
+
+  // stage 3
+  u[0] = _mm_unpacklo_epi16(x[4], x[5]);
+  u[1] = _mm_unpackhi_epi16(x[4], x[5]);
+  u[2] = _mm_unpacklo_epi16(x[6], x[7]);
+  u[3] = _mm_unpackhi_epi16(x[6], x[7]);
+  u[4] = _mm_unpacklo_epi16(x[12], x[13]);
+  u[5] = _mm_unpackhi_epi16(x[12], x[13]);
+  u[6] = _mm_unpacklo_epi16(x[14], x[15]);
+  u[7] = _mm_unpackhi_epi16(x[14], x[15]);
+
+  v[0] = _mm_madd_epi16(u[0], k__cospi_p08_p24);
+  v[1] = _mm_madd_epi16(u[1], k__cospi_p08_p24);
+  v[2] = _mm_madd_epi16(u[0], k__cospi_p24_m08);
+  v[3] = _mm_madd_epi16(u[1], k__cospi_p24_m08);
+  v[4] = _mm_madd_epi16(u[2], k__cospi_m24_p08);
+  v[5] = _mm_madd_epi16(u[3], k__cospi_m24_p08);
+  v[6] = _mm_madd_epi16(u[2], k__cospi_p08_p24);
+  v[7] = _mm_madd_epi16(u[3], k__cospi_p08_p24);
+  v[8] = _mm_madd_epi16(u[4], k__cospi_p08_p24);
+  v[9] = _mm_madd_epi16(u[5], k__cospi_p08_p24);
+  v[10] = _mm_madd_epi16(u[4], k__cospi_p24_m08);
+  v[11] = _mm_madd_epi16(u[5], k__cospi_p24_m08);
+  v[12] = _mm_madd_epi16(u[6], k__cospi_m24_p08);
+  v[13] = _mm_madd_epi16(u[7], k__cospi_m24_p08);
+  v[14] = _mm_madd_epi16(u[6], k__cospi_p08_p24);
+  v[15] = _mm_madd_epi16(u[7], k__cospi_p08_p24);
+
+  u[0] = _mm_add_epi32(v[0], v[4]);
+  u[1] = _mm_add_epi32(v[1], v[5]);
+  u[2] = _mm_add_epi32(v[2], v[6]);
+  u[3] = _mm_add_epi32(v[3], v[7]);
+  u[4] = _mm_sub_epi32(v[0], v[4]);
+  u[5] = _mm_sub_epi32(v[1], v[5]);
+  u[6] = _mm_sub_epi32(v[2], v[6]);
+  u[7] = _mm_sub_epi32(v[3], v[7]);
+  u[8] = _mm_add_epi32(v[8], v[12]);
+  u[9] = _mm_add_epi32(v[9], v[13]);
+  u[10] = _mm_add_epi32(v[10], v[14]);
+  u[11] = _mm_add_epi32(v[11], v[15]);
+  u[12] = _mm_sub_epi32(v[8], v[12]);
+  u[13] = _mm_sub_epi32(v[9], v[13]);
+  u[14] = _mm_sub_epi32(v[10], v[14]);
+  u[15] = _mm_sub_epi32(v[11], v[15]);
+
+  u[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING);
+  u[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING);
+  u[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING);
+  u[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING);
+  u[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING);
+  u[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING);
+  u[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING);
+  u[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING);
+  u[8] = _mm_add_epi32(u[8], k__DCT_CONST_ROUNDING);
+  u[9] = _mm_add_epi32(u[9], k__DCT_CONST_ROUNDING);
+  u[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING);
+  u[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING);
+  u[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING);
+  u[13] = _mm_add_epi32(u[13], k__DCT_CONST_ROUNDING);
+  u[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING);
+  u[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING);
+
+  v[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS);
+  v[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS);
+  v[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS);
+  v[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS);
+  v[4] = _mm_srai_epi32(u[4], DCT_CONST_BITS);
+  v[5] = _mm_srai_epi32(u[5], DCT_CONST_BITS);
+  v[6] = _mm_srai_epi32(u[6], DCT_CONST_BITS);
+  v[7] = _mm_srai_epi32(u[7], DCT_CONST_BITS);
+  v[8] = _mm_srai_epi32(u[8], DCT_CONST_BITS);
+  v[9] = _mm_srai_epi32(u[9], DCT_CONST_BITS);
+  v[10] = _mm_srai_epi32(u[10], DCT_CONST_BITS);
+  v[11] = _mm_srai_epi32(u[11], DCT_CONST_BITS);
+  v[12] = _mm_srai_epi32(u[12], DCT_CONST_BITS);
+  v[13] = _mm_srai_epi32(u[13], DCT_CONST_BITS);
+  v[14] = _mm_srai_epi32(u[14], DCT_CONST_BITS);
+  v[15] = _mm_srai_epi32(u[15], DCT_CONST_BITS);
+
+  s[0] = _mm_add_epi16(x[0], x[2]);
+  s[1] = _mm_add_epi16(x[1], x[3]);
+  s[2] = _mm_sub_epi16(x[0], x[2]);
+  s[3] = _mm_sub_epi16(x[1], x[3]);
+  s[4] = _mm_packs_epi32(v[0], v[1]);
+  s[5] = _mm_packs_epi32(v[2], v[3]);
+  s[6] = _mm_packs_epi32(v[4], v[5]);
+  s[7] = _mm_packs_epi32(v[6], v[7]);
+  s[8] = _mm_add_epi16(x[8], x[10]);
+  s[9] = _mm_add_epi16(x[9], x[11]);
+  s[10] = _mm_sub_epi16(x[8], x[10]);
+  s[11] = _mm_sub_epi16(x[9], x[11]);
+  s[12] = _mm_packs_epi32(v[8], v[9]);
+  s[13] = _mm_packs_epi32(v[10], v[11]);
+  s[14] = _mm_packs_epi32(v[12], v[13]);
+  s[15] = _mm_packs_epi32(v[14], v[15]);
+
+  // stage 4
+  u[0] = _mm_unpacklo_epi16(s[2], s[3]);
+  u[1] = _mm_unpackhi_epi16(s[2], s[3]);
+  u[2] = _mm_unpacklo_epi16(s[6], s[7]);
+  u[3] = _mm_unpackhi_epi16(s[6], s[7]);
+  u[4] = _mm_unpacklo_epi16(s[10], s[11]);
+  u[5] = _mm_unpackhi_epi16(s[10], s[11]);
+  u[6] = _mm_unpacklo_epi16(s[14], s[15]);
+  u[7] = _mm_unpackhi_epi16(s[14], s[15]);
+
+  v[0] = _mm_madd_epi16(u[0], k__cospi_m16_m16);
+  v[1] = _mm_madd_epi16(u[1], k__cospi_m16_m16);
+  v[2] = _mm_madd_epi16(u[0], k__cospi_p16_m16);
+  v[3] = _mm_madd_epi16(u[1], k__cospi_p16_m16);
+  v[4] = _mm_madd_epi16(u[2], k__cospi_p16_p16);
+  v[5] = _mm_madd_epi16(u[3], k__cospi_p16_p16);
+  v[6] = _mm_madd_epi16(u[2], k__cospi_m16_p16);
+  v[7] = _mm_madd_epi16(u[3], k__cospi_m16_p16);
+  v[8] = _mm_madd_epi16(u[4], k__cospi_p16_p16);
+  v[9] = _mm_madd_epi16(u[5], k__cospi_p16_p16);
+  v[10] = _mm_madd_epi16(u[4], k__cospi_m16_p16);
+  v[11] = _mm_madd_epi16(u[5], k__cospi_m16_p16);
+  v[12] = _mm_madd_epi16(u[6], k__cospi_m16_m16);
+  v[13] = _mm_madd_epi16(u[7], k__cospi_m16_m16);
+  v[14] = _mm_madd_epi16(u[6], k__cospi_p16_m16);
+  v[15] = _mm_madd_epi16(u[7], k__cospi_p16_m16);
+
+  u[0] = _mm_add_epi32(v[0], k__DCT_CONST_ROUNDING);
+  u[1] = _mm_add_epi32(v[1], k__DCT_CONST_ROUNDING);
+  u[2] = _mm_add_epi32(v[2], k__DCT_CONST_ROUNDING);
+  u[3] = _mm_add_epi32(v[3], k__DCT_CONST_ROUNDING);
+  u[4] = _mm_add_epi32(v[4], k__DCT_CONST_ROUNDING);
+  u[5] = _mm_add_epi32(v[5], k__DCT_CONST_ROUNDING);
+  u[6] = _mm_add_epi32(v[6], k__DCT_CONST_ROUNDING);
+  u[7] = _mm_add_epi32(v[7], k__DCT_CONST_ROUNDING);
+  u[8] = _mm_add_epi32(v[8], k__DCT_CONST_ROUNDING);
+  u[9] = _mm_add_epi32(v[9], k__DCT_CONST_ROUNDING);
+  u[10] = _mm_add_epi32(v[10], k__DCT_CONST_ROUNDING);
+  u[11] = _mm_add_epi32(v[11], k__DCT_CONST_ROUNDING);
+  u[12] = _mm_add_epi32(v[12], k__DCT_CONST_ROUNDING);
+  u[13] = _mm_add_epi32(v[13], k__DCT_CONST_ROUNDING);
+  u[14] = _mm_add_epi32(v[14], k__DCT_CONST_ROUNDING);
+  u[15] = _mm_add_epi32(v[15], k__DCT_CONST_ROUNDING);
+
+  v[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS);
+  v[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS);
+  v[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS);
+  v[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS);
+  v[4] = _mm_srai_epi32(u[4], DCT_CONST_BITS);
+  v[5] = _mm_srai_epi32(u[5], DCT_CONST_BITS);
+  v[6] = _mm_srai_epi32(u[6], DCT_CONST_BITS);
+  v[7] = _mm_srai_epi32(u[7], DCT_CONST_BITS);
+  v[8] = _mm_srai_epi32(u[8], DCT_CONST_BITS);
+  v[9] = _mm_srai_epi32(u[9], DCT_CONST_BITS);
+  v[10] = _mm_srai_epi32(u[10], DCT_CONST_BITS);
+  v[11] = _mm_srai_epi32(u[11], DCT_CONST_BITS);
+  v[12] = _mm_srai_epi32(u[12], DCT_CONST_BITS);
+  v[13] = _mm_srai_epi32(u[13], DCT_CONST_BITS);
+  v[14] = _mm_srai_epi32(u[14], DCT_CONST_BITS);
+  v[15] = _mm_srai_epi32(u[15], DCT_CONST_BITS);
+
+  in[0] = s[0];
+  in[1] = _mm_sub_epi16(kZero, s[8]);
+  in[2] = s[12];
+  in[3] = _mm_sub_epi16(kZero, s[4]);
+  in[4] = _mm_packs_epi32(v[4], v[5]);
+  in[5] = _mm_packs_epi32(v[12], v[13]);
+  in[6] = _mm_packs_epi32(v[8], v[9]);
+  in[7] = _mm_packs_epi32(v[0], v[1]);
+  in[8] = _mm_packs_epi32(v[2], v[3]);
+  in[9] = _mm_packs_epi32(v[10], v[11]);
+  in[10] = _mm_packs_epi32(v[14], v[15]);
+  in[11] = _mm_packs_epi32(v[6], v[7]);
+  in[12] = s[5];
+  in[13] = _mm_sub_epi16(kZero, s[13]);
+  in[14] = s[9];
+  in[15] = _mm_sub_epi16(kZero, s[1]);
+}
+
+static void fdct16_sse2(__m128i *in0, __m128i *in1) {
+  fdct16_8col(in0);
+  fdct16_8col(in1);
+  array_transpose_16x16(in0, in1);
+}
+
+static void fadst16_sse2(__m128i *in0, __m128i *in1) {
+  fadst16_8col(in0);
+  fadst16_8col(in1);
+  array_transpose_16x16(in0, in1);
+}
+
+#if CONFIG_EXT_TX
+static void fidtx16_8col(__m128i *in) {
+  const __m128i k__zero_epi16 = _mm_set1_epi16((int16_t)0);
+  const __m128i k__sqrt2_epi16 = _mm_set1_epi16((int16_t)Sqrt2);
+  const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
+
+  __m128i v0, v1, v2, v3, v4, v5, v6, v7;
+  __m128i u0, u1, u2, u3, u4, u5, u6, u7;
+  __m128i x0, x1, x2, x3, x4, x5, x6, x7;
+  __m128i y0, y1, y2, y3, y4, y5, y6, y7;
+
+  in[0] = _mm_slli_epi16(in[0], 1);
+  in[1] = _mm_slli_epi16(in[1], 1);
+  in[2] = _mm_slli_epi16(in[2], 1);
+  in[3] = _mm_slli_epi16(in[3], 1);
+  in[4] = _mm_slli_epi16(in[4], 1);
+  in[5] = _mm_slli_epi16(in[5], 1);
+  in[6] = _mm_slli_epi16(in[6], 1);
+  in[7] = _mm_slli_epi16(in[7], 1);
+  in[8] = _mm_slli_epi16(in[8], 1);
+  in[9] = _mm_slli_epi16(in[9], 1);
+  in[10] = _mm_slli_epi16(in[10], 1);
+  in[11] = _mm_slli_epi16(in[11], 1);
+  in[12] = _mm_slli_epi16(in[12], 1);
+  in[13] = _mm_slli_epi16(in[13], 1);
+  in[14] = _mm_slli_epi16(in[14], 1);
+  in[15] = _mm_slli_epi16(in[15], 1);
+
+  v0 = _mm_unpacklo_epi16(in[0], k__zero_epi16);
+  v1 = _mm_unpacklo_epi16(in[1], k__zero_epi16);
+  v2 = _mm_unpacklo_epi16(in[2], k__zero_epi16);
+  v3 = _mm_unpacklo_epi16(in[3], k__zero_epi16);
+  v4 = _mm_unpacklo_epi16(in[4], k__zero_epi16);
+  v5 = _mm_unpacklo_epi16(in[5], k__zero_epi16);
+  v6 = _mm_unpacklo_epi16(in[6], k__zero_epi16);
+  v7 = _mm_unpacklo_epi16(in[7], k__zero_epi16);
+
+  u0 = _mm_unpacklo_epi16(in[8], k__zero_epi16);
+  u1 = _mm_unpacklo_epi16(in[9], k__zero_epi16);
+  u2 = _mm_unpacklo_epi16(in[10], k__zero_epi16);
+  u3 = _mm_unpacklo_epi16(in[11], k__zero_epi16);
+  u4 = _mm_unpacklo_epi16(in[12], k__zero_epi16);
+  u5 = _mm_unpacklo_epi16(in[13], k__zero_epi16);
+  u6 = _mm_unpacklo_epi16(in[14], k__zero_epi16);
+  u7 = _mm_unpacklo_epi16(in[15], k__zero_epi16);
+
+  x0 = _mm_unpackhi_epi16(in[0], k__zero_epi16);
+  x1 = _mm_unpackhi_epi16(in[1], k__zero_epi16);
+  x2 = _mm_unpackhi_epi16(in[2], k__zero_epi16);
+  x3 = _mm_unpackhi_epi16(in[3], k__zero_epi16);
+  x4 = _mm_unpackhi_epi16(in[4], k__zero_epi16);
+  x5 = _mm_unpackhi_epi16(in[5], k__zero_epi16);
+  x6 = _mm_unpackhi_epi16(in[6], k__zero_epi16);
+  x7 = _mm_unpackhi_epi16(in[7], k__zero_epi16);
+
+  y0 = _mm_unpackhi_epi16(in[8], k__zero_epi16);
+  y1 = _mm_unpackhi_epi16(in[9], k__zero_epi16);
+  y2 = _mm_unpackhi_epi16(in[10], k__zero_epi16);
+  y3 = _mm_unpackhi_epi16(in[11], k__zero_epi16);
+  y4 = _mm_unpackhi_epi16(in[12], k__zero_epi16);
+  y5 = _mm_unpackhi_epi16(in[13], k__zero_epi16);
+  y6 = _mm_unpackhi_epi16(in[14], k__zero_epi16);
+  y7 = _mm_unpackhi_epi16(in[15], k__zero_epi16);
+
+  v0 = _mm_madd_epi16(v0, k__sqrt2_epi16);
+  v1 = _mm_madd_epi16(v1, k__sqrt2_epi16);
+  v2 = _mm_madd_epi16(v2, k__sqrt2_epi16);
+  v3 = _mm_madd_epi16(v3, k__sqrt2_epi16);
+  v4 = _mm_madd_epi16(v4, k__sqrt2_epi16);
+  v5 = _mm_madd_epi16(v5, k__sqrt2_epi16);
+  v6 = _mm_madd_epi16(v6, k__sqrt2_epi16);
+  v7 = _mm_madd_epi16(v7, k__sqrt2_epi16);
+
+  x0 = _mm_madd_epi16(x0, k__sqrt2_epi16);
+  x1 = _mm_madd_epi16(x1, k__sqrt2_epi16);
+  x2 = _mm_madd_epi16(x2, k__sqrt2_epi16);
+  x3 = _mm_madd_epi16(x3, k__sqrt2_epi16);
+  x4 = _mm_madd_epi16(x4, k__sqrt2_epi16);
+  x5 = _mm_madd_epi16(x5, k__sqrt2_epi16);
+  x6 = _mm_madd_epi16(x6, k__sqrt2_epi16);
+  x7 = _mm_madd_epi16(x7, k__sqrt2_epi16);
+
+  u0 = _mm_madd_epi16(u0, k__sqrt2_epi16);
+  u1 = _mm_madd_epi16(u1, k__sqrt2_epi16);
+  u2 = _mm_madd_epi16(u2, k__sqrt2_epi16);
+  u3 = _mm_madd_epi16(u3, k__sqrt2_epi16);
+  u4 = _mm_madd_epi16(u4, k__sqrt2_epi16);
+  u5 = _mm_madd_epi16(u5, k__sqrt2_epi16);
+  u6 = _mm_madd_epi16(u6, k__sqrt2_epi16);
+  u7 = _mm_madd_epi16(u7, k__sqrt2_epi16);
+
+  y0 = _mm_madd_epi16(y0, k__sqrt2_epi16);
+  y1 = _mm_madd_epi16(y1, k__sqrt2_epi16);
+  y2 = _mm_madd_epi16(y2, k__sqrt2_epi16);
+  y3 = _mm_madd_epi16(y3, k__sqrt2_epi16);
+  y4 = _mm_madd_epi16(y4, k__sqrt2_epi16);
+  y5 = _mm_madd_epi16(y5, k__sqrt2_epi16);
+  y6 = _mm_madd_epi16(y6, k__sqrt2_epi16);
+  y7 = _mm_madd_epi16(y7, k__sqrt2_epi16);
+
+  v0 = _mm_add_epi32(v0, k__DCT_CONST_ROUNDING);
+  v1 = _mm_add_epi32(v1, k__DCT_CONST_ROUNDING);
+  v2 = _mm_add_epi32(v2, k__DCT_CONST_ROUNDING);
+  v3 = _mm_add_epi32(v3, k__DCT_CONST_ROUNDING);
+  v4 = _mm_add_epi32(v4, k__DCT_CONST_ROUNDING);
+  v5 = _mm_add_epi32(v5, k__DCT_CONST_ROUNDING);
+  v6 = _mm_add_epi32(v6, k__DCT_CONST_ROUNDING);
+  v7 = _mm_add_epi32(v7, k__DCT_CONST_ROUNDING);
+
+  x0 = _mm_add_epi32(x0, k__DCT_CONST_ROUNDING);
+  x1 = _mm_add_epi32(x1, k__DCT_CONST_ROUNDING);
+  x2 = _mm_add_epi32(x2, k__DCT_CONST_ROUNDING);
+  x3 = _mm_add_epi32(x3, k__DCT_CONST_ROUNDING);
+  x4 = _mm_add_epi32(x4, k__DCT_CONST_ROUNDING);
+  x5 = _mm_add_epi32(x5, k__DCT_CONST_ROUNDING);
+  x6 = _mm_add_epi32(x6, k__DCT_CONST_ROUNDING);
+  x7 = _mm_add_epi32(x7, k__DCT_CONST_ROUNDING);
+
+  u0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
+  u1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING);
+  u2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
+  u3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING);
+  u4 = _mm_add_epi32(u4, k__DCT_CONST_ROUNDING);
+  u5 = _mm_add_epi32(u5, k__DCT_CONST_ROUNDING);
+  u6 = _mm_add_epi32(u6, k__DCT_CONST_ROUNDING);
+  u7 = _mm_add_epi32(u7, k__DCT_CONST_ROUNDING);
+
+  y0 = _mm_add_epi32(y0, k__DCT_CONST_ROUNDING);
+  y1 = _mm_add_epi32(y1, k__DCT_CONST_ROUNDING);
+  y2 = _mm_add_epi32(y2, k__DCT_CONST_ROUNDING);
+  y3 = _mm_add_epi32(y3, k__DCT_CONST_ROUNDING);
+  y4 = _mm_add_epi32(y4, k__DCT_CONST_ROUNDING);
+  y5 = _mm_add_epi32(y5, k__DCT_CONST_ROUNDING);
+  y6 = _mm_add_epi32(y6, k__DCT_CONST_ROUNDING);
+  y7 = _mm_add_epi32(y7, k__DCT_CONST_ROUNDING);
+
+  v0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
+  v1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
+  v2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
+  v3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
+  v4 = _mm_srai_epi32(v4, DCT_CONST_BITS);
+  v5 = _mm_srai_epi32(v5, DCT_CONST_BITS);
+  v6 = _mm_srai_epi32(v6, DCT_CONST_BITS);
+  v7 = _mm_srai_epi32(v7, DCT_CONST_BITS);
+
+  x0 = _mm_srai_epi32(x0, DCT_CONST_BITS);
+  x1 = _mm_srai_epi32(x1, DCT_CONST_BITS);
+  x2 = _mm_srai_epi32(x2, DCT_CONST_BITS);
+  x3 = _mm_srai_epi32(x3, DCT_CONST_BITS);
+  x4 = _mm_srai_epi32(x4, DCT_CONST_BITS);
+  x5 = _mm_srai_epi32(x5, DCT_CONST_BITS);
+  x6 = _mm_srai_epi32(x6, DCT_CONST_BITS);
+  x7 = _mm_srai_epi32(x7, DCT_CONST_BITS);
+
+  u0 = _mm_srai_epi32(u0, DCT_CONST_BITS);
+  u1 = _mm_srai_epi32(u1, DCT_CONST_BITS);
+  u2 = _mm_srai_epi32(u2, DCT_CONST_BITS);
+  u3 = _mm_srai_epi32(u3, DCT_CONST_BITS);
+  u4 = _mm_srai_epi32(u4, DCT_CONST_BITS);
+  u5 = _mm_srai_epi32(u5, DCT_CONST_BITS);
+  u6 = _mm_srai_epi32(u6, DCT_CONST_BITS);
+  u7 = _mm_srai_epi32(u7, DCT_CONST_BITS);
+
+  y0 = _mm_srai_epi32(y0, DCT_CONST_BITS);
+  y1 = _mm_srai_epi32(y1, DCT_CONST_BITS);
+  y2 = _mm_srai_epi32(y2, DCT_CONST_BITS);
+  y3 = _mm_srai_epi32(y3, DCT_CONST_BITS);
+  y4 = _mm_srai_epi32(y4, DCT_CONST_BITS);
+  y5 = _mm_srai_epi32(y5, DCT_CONST_BITS);
+  y6 = _mm_srai_epi32(y6, DCT_CONST_BITS);
+  y7 = _mm_srai_epi32(y7, DCT_CONST_BITS);
+
+  in[0] = _mm_packs_epi32(v0, x0);
+  in[1] = _mm_packs_epi32(v1, x1);
+  in[2] = _mm_packs_epi32(v2, x2);
+  in[3] = _mm_packs_epi32(v3, x3);
+  in[4] = _mm_packs_epi32(v4, x4);
+  in[5] = _mm_packs_epi32(v5, x5);
+  in[6] = _mm_packs_epi32(v6, x6);
+  in[7] = _mm_packs_epi32(v7, x7);
+
+  in[8] = _mm_packs_epi32(u0, y0);
+  in[9] = _mm_packs_epi32(u1, y1);
+  in[10] = _mm_packs_epi32(u2, y2);
+  in[11] = _mm_packs_epi32(u3, y3);
+  in[12] = _mm_packs_epi32(u4, y4);
+  in[13] = _mm_packs_epi32(u5, y5);
+  in[14] = _mm_packs_epi32(u6, y6);
+  in[15] = _mm_packs_epi32(u7, y7);
+}
+
+static void fidtx16_sse2(__m128i *in0, __m128i *in1) {
+  fidtx16_8col(in0);
+  fidtx16_8col(in1);
+  array_transpose_16x16(in0, in1);
+}
+#endif  // CONFIG_EXT_TX
+
+void vp10_fht16x16_sse2(const int16_t *input, tran_low_t *output, int stride,
+                        int tx_type) {
+  __m128i in0[16], in1[16];
+
+  switch (tx_type) {
+    case DCT_DCT: vpx_fdct16x16_sse2(input, output, stride); break;
+    case ADST_DCT:
+      load_buffer_16x16(input, in0, in1, stride, 0, 0);
+      fadst16_sse2(in0, in1);
+      right_shift_16x16(in0, in1);
+      fdct16_sse2(in0, in1);
+      write_buffer_16x16(output, in0, in1, 16);
+      break;
+    case DCT_ADST:
+      load_buffer_16x16(input, in0, in1, stride, 0, 0);
+      fdct16_sse2(in0, in1);
+      right_shift_16x16(in0, in1);
+      fadst16_sse2(in0, in1);
+      write_buffer_16x16(output, in0, in1, 16);
+      break;
+    case ADST_ADST:
+      load_buffer_16x16(input, in0, in1, stride, 0, 0);
+      fadst16_sse2(in0, in1);
+      right_shift_16x16(in0, in1);
+      fadst16_sse2(in0, in1);
+      write_buffer_16x16(output, in0, in1, 16);
+      break;
+#if CONFIG_EXT_TX
+    case FLIPADST_DCT:
+      load_buffer_16x16(input, in0, in1, stride, 1, 0);
+      fadst16_sse2(in0, in1);
+      right_shift_16x16(in0, in1);
+      fdct16_sse2(in0, in1);
+      write_buffer_16x16(output, in0, in1, 16);
+      break;
+    case DCT_FLIPADST:
+      load_buffer_16x16(input, in0, in1, stride, 0, 1);
+      fdct16_sse2(in0, in1);
+      right_shift_16x16(in0, in1);
+      fadst16_sse2(in0, in1);
+      write_buffer_16x16(output, in0, in1, 16);
+      break;
+    case FLIPADST_FLIPADST:
+      load_buffer_16x16(input, in0, in1, stride, 1, 1);
+      fadst16_sse2(in0, in1);
+      right_shift_16x16(in0, in1);
+      fadst16_sse2(in0, in1);
+      write_buffer_16x16(output, in0, in1, 16);
+      break;
+    case ADST_FLIPADST:
+      load_buffer_16x16(input, in0, in1, stride, 0, 1);
+      fadst16_sse2(in0, in1);
+      right_shift_16x16(in0, in1);
+      fadst16_sse2(in0, in1);
+      write_buffer_16x16(output, in0, in1, 16);
+      break;
+    case FLIPADST_ADST:
+      load_buffer_16x16(input, in0, in1, stride, 1, 0);
+      fadst16_sse2(in0, in1);
+      right_shift_16x16(in0, in1);
+      fadst16_sse2(in0, in1);
+      write_buffer_16x16(output, in0, in1, 16);
+      break;
+    case V_DCT:
+      load_buffer_16x16(input, in0, in1, stride, 0, 0);
+      fdct16_sse2(in0, in1);
+      right_shift_16x16(in0, in1);
+      fidtx16_sse2(in0, in1);
+      write_buffer_16x16(output, in0, in1, 16);
+      break;
+    case H_DCT:
+      load_buffer_16x16(input, in0, in1, stride, 0, 0);
+      fidtx16_sse2(in0, in1);
+      right_shift_16x16(in0, in1);
+      fdct16_sse2(in0, in1);
+      write_buffer_16x16(output, in0, in1, 16);
+      break;
+    case V_ADST:
+      load_buffer_16x16(input, in0, in1, stride, 0, 0);
+      fadst16_sse2(in0, in1);
+      right_shift_16x16(in0, in1);
+      fidtx16_sse2(in0, in1);
+      write_buffer_16x16(output, in0, in1, 16);
+      break;
+    case H_ADST:
+      load_buffer_16x16(input, in0, in1, stride, 0, 0);
+      fidtx16_sse2(in0, in1);
+      right_shift_16x16(in0, in1);
+      fadst16_sse2(in0, in1);
+      write_buffer_16x16(output, in0, in1, 16);
+      break;
+    case V_FLIPADST:
+      load_buffer_16x16(input, in0, in1, stride, 1, 0);
+      fadst16_sse2(in0, in1);
+      right_shift_16x16(in0, in1);
+      fidtx16_sse2(in0, in1);
+      write_buffer_16x16(output, in0, in1, 16);
+      break;
+    case H_FLIPADST:
+      load_buffer_16x16(input, in0, in1, stride, 0, 1);
+      fidtx16_sse2(in0, in1);
+      right_shift_16x16(in0, in1);
+      fadst16_sse2(in0, in1);
+      write_buffer_16x16(output, in0, in1, 16);
+      break;
+#endif  // CONFIG_EXT_TX
+    default: assert(0); break;
+  }
+}
diff --git a/av1/encoder/x86/dct_sse2.asm b/av1/encoder/x86/dct_sse2.asm
new file mode 100644
index 0000000..c3a5fb5
--- /dev/null
+++ b/av1/encoder/x86/dct_sse2.asm
@@ -0,0 +1,86 @@
+;
+;  Copyright (c) 2016 The WebM project authors. All Rights Reserved.
+;
+;  Use of this source code is governed by a BSD-style license
+;  that can be found in the LICENSE file in the root of the source
+;  tree. An additional intellectual property rights grant can be found
+;  in the file PATENTS.  All contributing project authors may
+;  be found in the AUTHORS file in the root of the source tree.
+;
+
+%define private_prefix vp10
+
+%include "third_party/x86inc/x86inc.asm"
+
+SECTION .text
+
+%macro TRANSFORM_COLS 0
+  paddw           m0,        m1
+  movq            m4,        m0
+  psubw           m3,        m2
+  psubw           m4,        m3
+  psraw           m4,        1
+  movq            m5,        m4
+  psubw           m5,        m1 ;b1
+  psubw           m4,        m2 ;c1
+  psubw           m0,        m4
+  paddw           m3,        m5
+                                ; m0 a0
+  SWAP            1,         4  ; m1 c1
+  SWAP            2,         3  ; m2 d1
+  SWAP            3,         5  ; m3 b1
+%endmacro
+
+%macro TRANSPOSE_4X4 0
+                                ; 00 01 02 03
+                                ; 10 11 12 13
+                                ; 20 21 22 23
+                                ; 30 31 32 33
+  punpcklwd       m0,        m1 ; 00 10 01 11  02 12 03 13
+  punpcklwd       m2,        m3 ; 20 30 21 31  22 32 23 33
+  mova            m1,        m0
+  punpckldq       m0,        m2 ; 00 10 20 30  01 11 21 31
+  punpckhdq       m1,        m2 ; 02 12 22 32  03 13 23 33
+%endmacro
+
+INIT_XMM sse2
+cglobal fwht4x4, 3, 4, 8, input, output, stride
+  lea             r3q,       [inputq + strideq*4]
+  movq            m0,        [inputq] ;a1
+  movq            m1,        [inputq + strideq*2] ;b1
+  movq            m2,        [r3q] ;c1
+  movq            m3,        [r3q + strideq*2] ;d1
+
+  TRANSFORM_COLS
+  TRANSPOSE_4X4
+  SWAP            1,         2
+  psrldq          m1,        m0, 8
+  psrldq          m3,        m2, 8
+  TRANSFORM_COLS
+  TRANSPOSE_4X4
+
+  psllw           m0,        2
+  psllw           m1,        2
+
+%if CONFIG_VP9_HIGHBITDEPTH
+  ; sign extension
+  mova            m2,             m0
+  mova            m3,             m1
+  punpcklwd       m0,             m0
+  punpcklwd       m1,             m1
+  punpckhwd       m2,             m2
+  punpckhwd       m3,             m3
+  psrad           m0,             16
+  psrad           m1,             16
+  psrad           m2,             16
+  psrad           m3,             16
+  mova            [outputq],      m0
+  mova            [outputq + 16], m2
+  mova            [outputq + 32], m1
+  mova            [outputq + 48], m3
+%else
+  mova            [outputq],      m0
+  mova            [outputq + 16], m1
+%endif
+
+  RET
diff --git a/av1/encoder/x86/dct_ssse3.c b/av1/encoder/x86/dct_ssse3.c
new file mode 100644
index 0000000..aa018a1
--- /dev/null
+++ b/av1/encoder/x86/dct_ssse3.c
@@ -0,0 +1,468 @@
+/*
+ *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#if defined(_MSC_VER) && _MSC_VER <= 1500
+// Need to include math.h before calling tmmintrin.h/intrin.h
+// in certain versions of MSVS.
+#include <math.h>
+#endif
+#include <tmmintrin.h>  // SSSE3
+
+#include "./vp10_rtcd.h"
+#include "aom_dsp/x86/inv_txfm_sse2.h"
+#include "aom_dsp/x86/txfm_common_sse2.h"
+
+void vp10_fdct8x8_quant_ssse3(
+    const int16_t *input, int stride, int16_t *coeff_ptr, intptr_t n_coeffs,
+    int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr,
+    const int16_t *quant_ptr, const int16_t *quant_shift_ptr,
+    int16_t *qcoeff_ptr, int16_t *dqcoeff_ptr, const int16_t *dequant_ptr,
+    uint16_t *eob_ptr, const int16_t *scan_ptr, const int16_t *iscan_ptr) {
+  __m128i zero;
+  int pass;
+  // Constants
+  //    When we use them, in one case, they are all the same. In all others
+  //    it's a pair of them that we need to repeat four times. This is done
+  //    by constructing the 32 bit constant corresponding to that pair.
+  const __m128i k__dual_p16_p16 = dual_set_epi16(23170, 23170);
+  const __m128i k__cospi_p16_p16 = _mm_set1_epi16((int16_t)cospi_16_64);
+  const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
+  const __m128i k__cospi_p24_p08 = pair_set_epi16(cospi_24_64, cospi_8_64);
+  const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64, cospi_24_64);
+  const __m128i k__cospi_p28_p04 = pair_set_epi16(cospi_28_64, cospi_4_64);
+  const __m128i k__cospi_m04_p28 = pair_set_epi16(-cospi_4_64, cospi_28_64);
+  const __m128i k__cospi_p12_p20 = pair_set_epi16(cospi_12_64, cospi_20_64);
+  const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64);
+  const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
+  // Load input
+  __m128i in0 = _mm_load_si128((const __m128i *)(input + 0 * stride));
+  __m128i in1 = _mm_load_si128((const __m128i *)(input + 1 * stride));
+  __m128i in2 = _mm_load_si128((const __m128i *)(input + 2 * stride));
+  __m128i in3 = _mm_load_si128((const __m128i *)(input + 3 * stride));
+  __m128i in4 = _mm_load_si128((const __m128i *)(input + 4 * stride));
+  __m128i in5 = _mm_load_si128((const __m128i *)(input + 5 * stride));
+  __m128i in6 = _mm_load_si128((const __m128i *)(input + 6 * stride));
+  __m128i in7 = _mm_load_si128((const __m128i *)(input + 7 * stride));
+  __m128i *in[8];
+  int index = 0;
+
+  (void)scan_ptr;
+  (void)zbin_ptr;
+  (void)quant_shift_ptr;
+  (void)coeff_ptr;
+
+  // Pre-condition input (shift by two)
+  in0 = _mm_slli_epi16(in0, 2);
+  in1 = _mm_slli_epi16(in1, 2);
+  in2 = _mm_slli_epi16(in2, 2);
+  in3 = _mm_slli_epi16(in3, 2);
+  in4 = _mm_slli_epi16(in4, 2);
+  in5 = _mm_slli_epi16(in5, 2);
+  in6 = _mm_slli_epi16(in6, 2);
+  in7 = _mm_slli_epi16(in7, 2);
+
+  in[0] = &in0;
+  in[1] = &in1;
+  in[2] = &in2;
+  in[3] = &in3;
+  in[4] = &in4;
+  in[5] = &in5;
+  in[6] = &in6;
+  in[7] = &in7;
+
+  // We do two passes, first the columns, then the rows. The results of the
+  // first pass are transposed so that the same column code can be reused. The
+  // results of the second pass are also transposed so that the rows (processed
+  // as columns) are put back in row positions.
+  for (pass = 0; pass < 2; pass++) {
+    // To store results of each pass before the transpose.
+    __m128i res0, res1, res2, res3, res4, res5, res6, res7;
+    // Add/subtract
+    const __m128i q0 = _mm_add_epi16(in0, in7);
+    const __m128i q1 = _mm_add_epi16(in1, in6);
+    const __m128i q2 = _mm_add_epi16(in2, in5);
+    const __m128i q3 = _mm_add_epi16(in3, in4);
+    const __m128i q4 = _mm_sub_epi16(in3, in4);
+    const __m128i q5 = _mm_sub_epi16(in2, in5);
+    const __m128i q6 = _mm_sub_epi16(in1, in6);
+    const __m128i q7 = _mm_sub_epi16(in0, in7);
+    // Work on first four results
+    {
+      // Add/subtract
+      const __m128i r0 = _mm_add_epi16(q0, q3);
+      const __m128i r1 = _mm_add_epi16(q1, q2);
+      const __m128i r2 = _mm_sub_epi16(q1, q2);
+      const __m128i r3 = _mm_sub_epi16(q0, q3);
+      // Interleave to do the multiply by constants which gets us into 32bits
+      const __m128i t0 = _mm_unpacklo_epi16(r0, r1);
+      const __m128i t1 = _mm_unpackhi_epi16(r0, r1);
+      const __m128i t2 = _mm_unpacklo_epi16(r2, r3);
+      const __m128i t3 = _mm_unpackhi_epi16(r2, r3);
+
+      const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p16_p16);
+      const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p16_p16);
+      const __m128i u2 = _mm_madd_epi16(t0, k__cospi_p16_m16);
+      const __m128i u3 = _mm_madd_epi16(t1, k__cospi_p16_m16);
+
+      const __m128i u4 = _mm_madd_epi16(t2, k__cospi_p24_p08);
+      const __m128i u5 = _mm_madd_epi16(t3, k__cospi_p24_p08);
+      const __m128i u6 = _mm_madd_epi16(t2, k__cospi_m08_p24);
+      const __m128i u7 = _mm_madd_epi16(t3, k__cospi_m08_p24);
+      // dct_const_round_shift
+
+      const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
+      const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING);
+      const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
+      const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING);
+
+      const __m128i v4 = _mm_add_epi32(u4, k__DCT_CONST_ROUNDING);
+      const __m128i v5 = _mm_add_epi32(u5, k__DCT_CONST_ROUNDING);
+      const __m128i v6 = _mm_add_epi32(u6, k__DCT_CONST_ROUNDING);
+      const __m128i v7 = _mm_add_epi32(u7, k__DCT_CONST_ROUNDING);
+
+      const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
+      const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
+      const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
+      const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
+
+      const __m128i w4 = _mm_srai_epi32(v4, DCT_CONST_BITS);
+      const __m128i w5 = _mm_srai_epi32(v5, DCT_CONST_BITS);
+      const __m128i w6 = _mm_srai_epi32(v6, DCT_CONST_BITS);
+      const __m128i w7 = _mm_srai_epi32(v7, DCT_CONST_BITS);
+      // Combine
+
+      res0 = _mm_packs_epi32(w0, w1);
+      res4 = _mm_packs_epi32(w2, w3);
+      res2 = _mm_packs_epi32(w4, w5);
+      res6 = _mm_packs_epi32(w6, w7);
+    }
+    // Work on next four results
+    {
+      // Interleave to do the multiply by constants which gets us into 32bits
+      const __m128i d0 = _mm_sub_epi16(q6, q5);
+      const __m128i d1 = _mm_add_epi16(q6, q5);
+      const __m128i r0 = _mm_mulhrs_epi16(d0, k__dual_p16_p16);
+      const __m128i r1 = _mm_mulhrs_epi16(d1, k__dual_p16_p16);
+
+      // Add/subtract
+      const __m128i x0 = _mm_add_epi16(q4, r0);
+      const __m128i x1 = _mm_sub_epi16(q4, r0);
+      const __m128i x2 = _mm_sub_epi16(q7, r1);
+      const __m128i x3 = _mm_add_epi16(q7, r1);
+      // Interleave to do the multiply by constants which gets us into 32bits
+      const __m128i t0 = _mm_unpacklo_epi16(x0, x3);
+      const __m128i t1 = _mm_unpackhi_epi16(x0, x3);
+      const __m128i t2 = _mm_unpacklo_epi16(x1, x2);
+      const __m128i t3 = _mm_unpackhi_epi16(x1, x2);
+      const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p28_p04);
+      const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p28_p04);
+      const __m128i u2 = _mm_madd_epi16(t0, k__cospi_m04_p28);
+      const __m128i u3 = _mm_madd_epi16(t1, k__cospi_m04_p28);
+      const __m128i u4 = _mm_madd_epi16(t2, k__cospi_p12_p20);
+      const __m128i u5 = _mm_madd_epi16(t3, k__cospi_p12_p20);
+      const __m128i u6 = _mm_madd_epi16(t2, k__cospi_m20_p12);
+      const __m128i u7 = _mm_madd_epi16(t3, k__cospi_m20_p12);
+      // dct_const_round_shift
+      const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING);
+      const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING);
+      const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING);
+      const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING);
+      const __m128i v4 = _mm_add_epi32(u4, k__DCT_CONST_ROUNDING);
+      const __m128i v5 = _mm_add_epi32(u5, k__DCT_CONST_ROUNDING);
+      const __m128i v6 = _mm_add_epi32(u6, k__DCT_CONST_ROUNDING);
+      const __m128i v7 = _mm_add_epi32(u7, k__DCT_CONST_ROUNDING);
+      const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS);
+      const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS);
+      const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS);
+      const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS);
+      const __m128i w4 = _mm_srai_epi32(v4, DCT_CONST_BITS);
+      const __m128i w5 = _mm_srai_epi32(v5, DCT_CONST_BITS);
+      const __m128i w6 = _mm_srai_epi32(v6, DCT_CONST_BITS);
+      const __m128i w7 = _mm_srai_epi32(v7, DCT_CONST_BITS);
+      // Combine
+      res1 = _mm_packs_epi32(w0, w1);
+      res7 = _mm_packs_epi32(w2, w3);
+      res5 = _mm_packs_epi32(w4, w5);
+      res3 = _mm_packs_epi32(w6, w7);
+    }
+    // Transpose the 8x8.
+    {
+      // 00 01 02 03 04 05 06 07
+      // 10 11 12 13 14 15 16 17
+      // 20 21 22 23 24 25 26 27
+      // 30 31 32 33 34 35 36 37
+      // 40 41 42 43 44 45 46 47
+      // 50 51 52 53 54 55 56 57
+      // 60 61 62 63 64 65 66 67
+      // 70 71 72 73 74 75 76 77
+      const __m128i tr0_0 = _mm_unpacklo_epi16(res0, res1);
+      const __m128i tr0_1 = _mm_unpacklo_epi16(res2, res3);
+      const __m128i tr0_2 = _mm_unpackhi_epi16(res0, res1);
+      const __m128i tr0_3 = _mm_unpackhi_epi16(res2, res3);
+      const __m128i tr0_4 = _mm_unpacklo_epi16(res4, res5);
+      const __m128i tr0_5 = _mm_unpacklo_epi16(res6, res7);
+      const __m128i tr0_6 = _mm_unpackhi_epi16(res4, res5);
+      const __m128i tr0_7 = _mm_unpackhi_epi16(res6, res7);
+      // 00 10 01 11 02 12 03 13
+      // 20 30 21 31 22 32 23 33
+      // 04 14 05 15 06 16 07 17
+      // 24 34 25 35 26 36 27 37
+      // 40 50 41 51 42 52 43 53
+      // 60 70 61 71 62 72 63 73
+      // 54 54 55 55 56 56 57 57
+      // 64 74 65 75 66 76 67 77
+      const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1);
+      const __m128i tr1_1 = _mm_unpacklo_epi32(tr0_2, tr0_3);
+      const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1);
+      const __m128i tr1_3 = _mm_unpackhi_epi32(tr0_2, tr0_3);
+      const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5);
+      const __m128i tr1_5 = _mm_unpacklo_epi32(tr0_6, tr0_7);
+      const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5);
+      const __m128i tr1_7 = _mm_unpackhi_epi32(tr0_6, tr0_7);
+      // 00 10 20 30 01 11 21 31
+      // 40 50 60 70 41 51 61 71
+      // 02 12 22 32 03 13 23 33
+      // 42 52 62 72 43 53 63 73
+      // 04 14 24 34 05 15 21 36
+      // 44 54 64 74 45 55 61 76
+      // 06 16 26 36 07 17 27 37
+      // 46 56 66 76 47 57 67 77
+      in0 = _mm_unpacklo_epi64(tr1_0, tr1_4);
+      in1 = _mm_unpackhi_epi64(tr1_0, tr1_4);
+      in2 = _mm_unpacklo_epi64(tr1_2, tr1_6);
+      in3 = _mm_unpackhi_epi64(tr1_2, tr1_6);
+      in4 = _mm_unpacklo_epi64(tr1_1, tr1_5);
+      in5 = _mm_unpackhi_epi64(tr1_1, tr1_5);
+      in6 = _mm_unpacklo_epi64(tr1_3, tr1_7);
+      in7 = _mm_unpackhi_epi64(tr1_3, tr1_7);
+      // 00 10 20 30 40 50 60 70
+      // 01 11 21 31 41 51 61 71
+      // 02 12 22 32 42 52 62 72
+      // 03 13 23 33 43 53 63 73
+      // 04 14 24 34 44 54 64 74
+      // 05 15 25 35 45 55 65 75
+      // 06 16 26 36 46 56 66 76
+      // 07 17 27 37 47 57 67 77
+    }
+  }
+  // Post-condition output and store it
+  {
+    // Post-condition (division by two)
+    //    division of two 16 bits signed numbers using shifts
+    //    n / 2 = (n - (n >> 15)) >> 1
+    const __m128i sign_in0 = _mm_srai_epi16(in0, 15);
+    const __m128i sign_in1 = _mm_srai_epi16(in1, 15);
+    const __m128i sign_in2 = _mm_srai_epi16(in2, 15);
+    const __m128i sign_in3 = _mm_srai_epi16(in3, 15);
+    const __m128i sign_in4 = _mm_srai_epi16(in4, 15);
+    const __m128i sign_in5 = _mm_srai_epi16(in5, 15);
+    const __m128i sign_in6 = _mm_srai_epi16(in6, 15);
+    const __m128i sign_in7 = _mm_srai_epi16(in7, 15);
+    in0 = _mm_sub_epi16(in0, sign_in0);
+    in1 = _mm_sub_epi16(in1, sign_in1);
+    in2 = _mm_sub_epi16(in2, sign_in2);
+    in3 = _mm_sub_epi16(in3, sign_in3);
+    in4 = _mm_sub_epi16(in4, sign_in4);
+    in5 = _mm_sub_epi16(in5, sign_in5);
+    in6 = _mm_sub_epi16(in6, sign_in6);
+    in7 = _mm_sub_epi16(in7, sign_in7);
+    in0 = _mm_srai_epi16(in0, 1);
+    in1 = _mm_srai_epi16(in1, 1);
+    in2 = _mm_srai_epi16(in2, 1);
+    in3 = _mm_srai_epi16(in3, 1);
+    in4 = _mm_srai_epi16(in4, 1);
+    in5 = _mm_srai_epi16(in5, 1);
+    in6 = _mm_srai_epi16(in6, 1);
+    in7 = _mm_srai_epi16(in7, 1);
+  }
+
+  iscan_ptr += n_coeffs;
+  qcoeff_ptr += n_coeffs;
+  dqcoeff_ptr += n_coeffs;
+  n_coeffs = -n_coeffs;
+  zero = _mm_setzero_si128();
+
+  if (!skip_block) {
+    __m128i eob;
+    __m128i round, quant, dequant, thr;
+    int16_t nzflag;
+    {
+      __m128i coeff0, coeff1;
+
+      // Setup global values
+      {
+        round = _mm_load_si128((const __m128i *)round_ptr);
+        quant = _mm_load_si128((const __m128i *)quant_ptr);
+        dequant = _mm_load_si128((const __m128i *)dequant_ptr);
+      }
+
+      {
+        __m128i coeff0_sign, coeff1_sign;
+        __m128i qcoeff0, qcoeff1;
+        __m128i qtmp0, qtmp1;
+        // Do DC and first 15 AC
+        coeff0 = *in[0];
+        coeff1 = *in[1];
+
+        // Poor man's sign extract
+        coeff0_sign = _mm_srai_epi16(coeff0, 15);
+        coeff1_sign = _mm_srai_epi16(coeff1, 15);
+        qcoeff0 = _mm_xor_si128(coeff0, coeff0_sign);
+        qcoeff1 = _mm_xor_si128(coeff1, coeff1_sign);
+        qcoeff0 = _mm_sub_epi16(qcoeff0, coeff0_sign);
+        qcoeff1 = _mm_sub_epi16(qcoeff1, coeff1_sign);
+
+        qcoeff0 = _mm_adds_epi16(qcoeff0, round);
+        round = _mm_unpackhi_epi64(round, round);
+        qcoeff1 = _mm_adds_epi16(qcoeff1, round);
+        qtmp0 = _mm_mulhi_epi16(qcoeff0, quant);
+        quant = _mm_unpackhi_epi64(quant, quant);
+        qtmp1 = _mm_mulhi_epi16(qcoeff1, quant);
+
+        // Reinsert signs
+        qcoeff0 = _mm_xor_si128(qtmp0, coeff0_sign);
+        qcoeff1 = _mm_xor_si128(qtmp1, coeff1_sign);
+        qcoeff0 = _mm_sub_epi16(qcoeff0, coeff0_sign);
+        qcoeff1 = _mm_sub_epi16(qcoeff1, coeff1_sign);
+
+        _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs), qcoeff0);
+        _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs) + 1, qcoeff1);
+
+        coeff0 = _mm_mullo_epi16(qcoeff0, dequant);
+        dequant = _mm_unpackhi_epi64(dequant, dequant);
+        coeff1 = _mm_mullo_epi16(qcoeff1, dequant);
+
+        _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs), coeff0);
+        _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs) + 1, coeff1);
+      }
+
+      {
+        // Scan for eob
+        __m128i zero_coeff0, zero_coeff1;
+        __m128i nzero_coeff0, nzero_coeff1;
+        __m128i iscan0, iscan1;
+        __m128i eob1;
+        zero_coeff0 = _mm_cmpeq_epi16(coeff0, zero);
+        zero_coeff1 = _mm_cmpeq_epi16(coeff1, zero);
+        nzero_coeff0 = _mm_cmpeq_epi16(zero_coeff0, zero);
+        nzero_coeff1 = _mm_cmpeq_epi16(zero_coeff1, zero);
+        iscan0 = _mm_load_si128((const __m128i *)(iscan_ptr + n_coeffs));
+        iscan1 = _mm_load_si128((const __m128i *)(iscan_ptr + n_coeffs) + 1);
+        // Add one to convert from indices to counts
+        iscan0 = _mm_sub_epi16(iscan0, nzero_coeff0);
+        iscan1 = _mm_sub_epi16(iscan1, nzero_coeff1);
+        eob = _mm_and_si128(iscan0, nzero_coeff0);
+        eob1 = _mm_and_si128(iscan1, nzero_coeff1);
+        eob = _mm_max_epi16(eob, eob1);
+      }
+      n_coeffs += 8 * 2;
+    }
+
+    // AC only loop
+    index = 2;
+    thr = _mm_srai_epi16(dequant, 1);
+    while (n_coeffs < 0) {
+      __m128i coeff0, coeff1;
+      {
+        __m128i coeff0_sign, coeff1_sign;
+        __m128i qcoeff0, qcoeff1;
+        __m128i qtmp0, qtmp1;
+
+        assert(index < (int)(sizeof(in) / sizeof(in[0])) - 1);
+        coeff0 = *in[index];
+        coeff1 = *in[index + 1];
+
+        // Poor man's sign extract
+        coeff0_sign = _mm_srai_epi16(coeff0, 15);
+        coeff1_sign = _mm_srai_epi16(coeff1, 15);
+        qcoeff0 = _mm_xor_si128(coeff0, coeff0_sign);
+        qcoeff1 = _mm_xor_si128(coeff1, coeff1_sign);
+        qcoeff0 = _mm_sub_epi16(qcoeff0, coeff0_sign);
+        qcoeff1 = _mm_sub_epi16(qcoeff1, coeff1_sign);
+
+        nzflag = _mm_movemask_epi8(_mm_cmpgt_epi16(qcoeff0, thr)) |
+                 _mm_movemask_epi8(_mm_cmpgt_epi16(qcoeff1, thr));
+
+        if (nzflag) {
+          qcoeff0 = _mm_adds_epi16(qcoeff0, round);
+          qcoeff1 = _mm_adds_epi16(qcoeff1, round);
+          qtmp0 = _mm_mulhi_epi16(qcoeff0, quant);
+          qtmp1 = _mm_mulhi_epi16(qcoeff1, quant);
+
+          // Reinsert signs
+          qcoeff0 = _mm_xor_si128(qtmp0, coeff0_sign);
+          qcoeff1 = _mm_xor_si128(qtmp1, coeff1_sign);
+          qcoeff0 = _mm_sub_epi16(qcoeff0, coeff0_sign);
+          qcoeff1 = _mm_sub_epi16(qcoeff1, coeff1_sign);
+
+          _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs), qcoeff0);
+          _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs) + 1, qcoeff1);
+
+          coeff0 = _mm_mullo_epi16(qcoeff0, dequant);
+          coeff1 = _mm_mullo_epi16(qcoeff1, dequant);
+
+          _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs), coeff0);
+          _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs) + 1, coeff1);
+        } else {
+          _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs), zero);
+          _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs) + 1, zero);
+
+          _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs), zero);
+          _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs) + 1, zero);
+        }
+      }
+
+      if (nzflag) {
+        // Scan for eob
+        __m128i zero_coeff0, zero_coeff1;
+        __m128i nzero_coeff0, nzero_coeff1;
+        __m128i iscan0, iscan1;
+        __m128i eob0, eob1;
+        zero_coeff0 = _mm_cmpeq_epi16(coeff0, zero);
+        zero_coeff1 = _mm_cmpeq_epi16(coeff1, zero);
+        nzero_coeff0 = _mm_cmpeq_epi16(zero_coeff0, zero);
+        nzero_coeff1 = _mm_cmpeq_epi16(zero_coeff1, zero);
+        iscan0 = _mm_load_si128((const __m128i *)(iscan_ptr + n_coeffs));
+        iscan1 = _mm_load_si128((const __m128i *)(iscan_ptr + n_coeffs) + 1);
+        // Add one to convert from indices to counts
+        iscan0 = _mm_sub_epi16(iscan0, nzero_coeff0);
+        iscan1 = _mm_sub_epi16(iscan1, nzero_coeff1);
+        eob0 = _mm_and_si128(iscan0, nzero_coeff0);
+        eob1 = _mm_and_si128(iscan1, nzero_coeff1);
+        eob0 = _mm_max_epi16(eob0, eob1);
+        eob = _mm_max_epi16(eob, eob0);
+      }
+      n_coeffs += 8 * 2;
+      index += 2;
+    }
+
+    // Accumulate EOB
+    {
+      __m128i eob_shuffled;
+      eob_shuffled = _mm_shuffle_epi32(eob, 0xe);
+      eob = _mm_max_epi16(eob, eob_shuffled);
+      eob_shuffled = _mm_shufflelo_epi16(eob, 0xe);
+      eob = _mm_max_epi16(eob, eob_shuffled);
+      eob_shuffled = _mm_shufflelo_epi16(eob, 0x1);
+      eob = _mm_max_epi16(eob, eob_shuffled);
+      *eob_ptr = _mm_extract_epi16(eob, 1);
+    }
+  } else {
+    do {
+      _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs), zero);
+      _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs) + 1, zero);
+      _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs), zero);
+      _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs) + 1, zero);
+      n_coeffs += 8 * 2;
+    } while (n_coeffs < 0);
+    *eob_ptr = 0;
+  }
+}
diff --git a/av1/encoder/x86/error_intrin_avx2.c b/av1/encoder/x86/error_intrin_avx2.c
new file mode 100644
index 0000000..6e7c093
--- /dev/null
+++ b/av1/encoder/x86/error_intrin_avx2.c
@@ -0,0 +1,71 @@
+/*
+ *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+ *
+ *  Usee of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <immintrin.h>  // AVX2
+
+#include "./vp10_rtcd.h"
+#include "aom/vpx_integer.h"
+
+int64_t vp10_block_error_avx2(const int16_t *coeff, const int16_t *dqcoeff,
+                              intptr_t block_size, int64_t *ssz) {
+  __m256i sse_reg, ssz_reg, coeff_reg, dqcoeff_reg;
+  __m256i exp_dqcoeff_lo, exp_dqcoeff_hi, exp_coeff_lo, exp_coeff_hi;
+  __m256i sse_reg_64hi, ssz_reg_64hi;
+  __m128i sse_reg128, ssz_reg128;
+  int64_t sse;
+  int i;
+  const __m256i zero_reg = _mm256_set1_epi16(0);
+
+  // init sse and ssz registerd to zero
+  sse_reg = _mm256_set1_epi16(0);
+  ssz_reg = _mm256_set1_epi16(0);
+
+  for (i = 0; i < block_size; i += 16) {
+    // load 32 bytes from coeff and dqcoeff
+    coeff_reg = _mm256_loadu_si256((const __m256i *)(coeff + i));
+    dqcoeff_reg = _mm256_loadu_si256((const __m256i *)(dqcoeff + i));
+    // dqcoeff - coeff
+    dqcoeff_reg = _mm256_sub_epi16(dqcoeff_reg, coeff_reg);
+    // madd (dqcoeff - coeff)
+    dqcoeff_reg = _mm256_madd_epi16(dqcoeff_reg, dqcoeff_reg);
+    // madd coeff
+    coeff_reg = _mm256_madd_epi16(coeff_reg, coeff_reg);
+    // expand each double word of madd (dqcoeff - coeff) to quad word
+    exp_dqcoeff_lo = _mm256_unpacklo_epi32(dqcoeff_reg, zero_reg);
+    exp_dqcoeff_hi = _mm256_unpackhi_epi32(dqcoeff_reg, zero_reg);
+    // expand each double word of madd (coeff) to quad word
+    exp_coeff_lo = _mm256_unpacklo_epi32(coeff_reg, zero_reg);
+    exp_coeff_hi = _mm256_unpackhi_epi32(coeff_reg, zero_reg);
+    // add each quad word of madd (dqcoeff - coeff) and madd (coeff)
+    sse_reg = _mm256_add_epi64(sse_reg, exp_dqcoeff_lo);
+    ssz_reg = _mm256_add_epi64(ssz_reg, exp_coeff_lo);
+    sse_reg = _mm256_add_epi64(sse_reg, exp_dqcoeff_hi);
+    ssz_reg = _mm256_add_epi64(ssz_reg, exp_coeff_hi);
+  }
+  // save the higher 64 bit of each 128 bit lane
+  sse_reg_64hi = _mm256_srli_si256(sse_reg, 8);
+  ssz_reg_64hi = _mm256_srli_si256(ssz_reg, 8);
+  // add the higher 64 bit to the low 64 bit
+  sse_reg = _mm256_add_epi64(sse_reg, sse_reg_64hi);
+  ssz_reg = _mm256_add_epi64(ssz_reg, ssz_reg_64hi);
+
+  // add each 64 bit from each of the 128 bit lane of the 256 bit
+  sse_reg128 = _mm_add_epi64(_mm256_castsi256_si128(sse_reg),
+                             _mm256_extractf128_si256(sse_reg, 1));
+
+  ssz_reg128 = _mm_add_epi64(_mm256_castsi256_si128(ssz_reg),
+                             _mm256_extractf128_si256(ssz_reg, 1));
+
+  // store the results
+  _mm_storel_epi64((__m128i *)(&sse), sse_reg128);
+
+  _mm_storel_epi64((__m128i *)(ssz), ssz_reg128);
+  return sse;
+}
diff --git a/av1/encoder/x86/error_sse2.asm b/av1/encoder/x86/error_sse2.asm
new file mode 100644
index 0000000..0772da4
--- /dev/null
+++ b/av1/encoder/x86/error_sse2.asm
@@ -0,0 +1,122 @@
+;
+;  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+;  Use of this source code is governed by a BSD-style license
+;  that can be found in the LICENSE file in the root of the source
+;  tree. An additional intellectual property rights grant can be found
+;  in the file PATENTS.  All contributing project authors may
+;  be found in the AUTHORS file in the root of the source tree.
+;
+
+%define private_prefix vp10
+
+%include "third_party/x86inc/x86inc.asm"
+
+SECTION .text
+
+; int64_t vp10_block_error(int16_t *coeff, int16_t *dqcoeff, intptr_t block_size,
+;                         int64_t *ssz)
+
+INIT_XMM sse2
+cglobal block_error, 3, 3, 8, uqc, dqc, size, ssz
+  pxor      m4, m4                 ; sse accumulator
+  pxor      m6, m6                 ; ssz accumulator
+  pxor      m5, m5                 ; dedicated zero register
+  lea     uqcq, [uqcq+sizeq*2]
+  lea     dqcq, [dqcq+sizeq*2]
+  neg    sizeq
+.loop:
+  mova      m2, [uqcq+sizeq*2]
+  mova      m0, [dqcq+sizeq*2]
+  mova      m3, [uqcq+sizeq*2+mmsize]
+  mova      m1, [dqcq+sizeq*2+mmsize]
+  psubw     m0, m2
+  psubw     m1, m3
+  ; individual errors are max. 15bit+sign, so squares are 30bit, and
+  ; thus the sum of 2 should fit in a 31bit integer (+ unused sign bit)
+  pmaddwd   m0, m0
+  pmaddwd   m1, m1
+  pmaddwd   m2, m2
+  pmaddwd   m3, m3
+  ; accumulate in 64bit
+  punpckldq m7, m0, m5
+  punpckhdq m0, m5
+  paddq     m4, m7
+  punpckldq m7, m1, m5
+  paddq     m4, m0
+  punpckhdq m1, m5
+  paddq     m4, m7
+  punpckldq m7, m2, m5
+  paddq     m4, m1
+  punpckhdq m2, m5
+  paddq     m6, m7
+  punpckldq m7, m3, m5
+  paddq     m6, m2
+  punpckhdq m3, m5
+  paddq     m6, m7
+  paddq     m6, m3
+  add    sizeq, mmsize
+  jl .loop
+
+  ; accumulate horizontally and store in return value
+  movhlps   m5, m4
+  movhlps   m7, m6
+  paddq     m4, m5
+  paddq     m6, m7
+%if ARCH_X86_64
+  movq    rax, m4
+  movq [sszq], m6
+%else
+  mov     eax, sszm
+  pshufd   m5, m4, 0x1
+  movq  [eax], m6
+  movd    eax, m4
+  movd    edx, m5
+%endif
+  RET
+
+; Compute the sum of squared difference between two int16_t vectors.
+; int64_t vp10_block_error_fp(int16_t *coeff, int16_t *dqcoeff,
+;                            intptr_t block_size)
+
+INIT_XMM sse2
+cglobal block_error_fp, 3, 3, 6, uqc, dqc, size
+  pxor      m4, m4                 ; sse accumulator
+  pxor      m5, m5                 ; dedicated zero register
+  lea     uqcq, [uqcq+sizeq*2]
+  lea     dqcq, [dqcq+sizeq*2]
+  neg    sizeq
+.loop:
+  mova      m2, [uqcq+sizeq*2]
+  mova      m0, [dqcq+sizeq*2]
+  mova      m3, [uqcq+sizeq*2+mmsize]
+  mova      m1, [dqcq+sizeq*2+mmsize]
+  psubw     m0, m2
+  psubw     m1, m3
+  ; individual errors are max. 15bit+sign, so squares are 30bit, and
+  ; thus the sum of 2 should fit in a 31bit integer (+ unused sign bit)
+  pmaddwd   m0, m0
+  pmaddwd   m1, m1
+  ; accumulate in 64bit
+  punpckldq m3, m0, m5
+  punpckhdq m0, m5
+  paddq     m4, m3
+  punpckldq m3, m1, m5
+  paddq     m4, m0
+  punpckhdq m1, m5
+  paddq     m4, m3
+  paddq     m4, m1
+  add    sizeq, mmsize
+  jl .loop
+
+  ; accumulate horizontally and store in return value
+  movhlps   m5, m4
+  paddq     m4, m5
+%if ARCH_X86_64
+  movq    rax, m4
+%else
+  pshufd   m5, m4, 0x1
+  movd    eax, m4
+  movd    edx, m5
+%endif
+  RET
diff --git a/av1/encoder/x86/highbd_block_error_intrin_sse2.c b/av1/encoder/x86/highbd_block_error_intrin_sse2.c
new file mode 100644
index 0000000..2728880
--- /dev/null
+++ b/av1/encoder/x86/highbd_block_error_intrin_sse2.c
@@ -0,0 +1,71 @@
+/*
+ *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <emmintrin.h>
+#include <stdio.h>
+
+#include "av1/common/common.h"
+
+int64_t vp10_highbd_block_error_sse2(tran_low_t *coeff, tran_low_t *dqcoeff,
+                                     intptr_t block_size, int64_t *ssz,
+                                     int bps) {
+  int i, j, test;
+  uint32_t temp[4];
+  __m128i max, min, cmp0, cmp1, cmp2, cmp3;
+  int64_t error = 0, sqcoeff = 0;
+  const int shift = 2 * (bps - 8);
+  const int rounding = shift > 0 ? 1 << (shift - 1) : 0;
+
+  for (i = 0; i < block_size; i += 8) {
+    // Load the data into xmm registers
+    __m128i mm_coeff = _mm_load_si128((__m128i *)(coeff + i));
+    __m128i mm_coeff2 = _mm_load_si128((__m128i *)(coeff + i + 4));
+    __m128i mm_dqcoeff = _mm_load_si128((__m128i *)(dqcoeff + i));
+    __m128i mm_dqcoeff2 = _mm_load_si128((__m128i *)(dqcoeff + i + 4));
+    // Check if any values require more than 15 bit
+    max = _mm_set1_epi32(0x3fff);
+    min = _mm_set1_epi32(0xffffc000);
+    cmp0 = _mm_xor_si128(_mm_cmpgt_epi32(mm_coeff, max),
+                         _mm_cmplt_epi32(mm_coeff, min));
+    cmp1 = _mm_xor_si128(_mm_cmpgt_epi32(mm_coeff2, max),
+                         _mm_cmplt_epi32(mm_coeff2, min));
+    cmp2 = _mm_xor_si128(_mm_cmpgt_epi32(mm_dqcoeff, max),
+                         _mm_cmplt_epi32(mm_dqcoeff, min));
+    cmp3 = _mm_xor_si128(_mm_cmpgt_epi32(mm_dqcoeff2, max),
+                         _mm_cmplt_epi32(mm_dqcoeff2, min));
+    test = _mm_movemask_epi8(
+        _mm_or_si128(_mm_or_si128(cmp0, cmp1), _mm_or_si128(cmp2, cmp3)));
+
+    if (!test) {
+      __m128i mm_diff, error_sse2, sqcoeff_sse2;
+      mm_coeff = _mm_packs_epi32(mm_coeff, mm_coeff2);
+      mm_dqcoeff = _mm_packs_epi32(mm_dqcoeff, mm_dqcoeff2);
+      mm_diff = _mm_sub_epi16(mm_coeff, mm_dqcoeff);
+      error_sse2 = _mm_madd_epi16(mm_diff, mm_diff);
+      sqcoeff_sse2 = _mm_madd_epi16(mm_coeff, mm_coeff);
+      _mm_storeu_si128((__m128i *)temp, error_sse2);
+      error = error + temp[0] + temp[1] + temp[2] + temp[3];
+      _mm_storeu_si128((__m128i *)temp, sqcoeff_sse2);
+      sqcoeff += temp[0] + temp[1] + temp[2] + temp[3];
+    } else {
+      for (j = 0; j < 8; j++) {
+        const int64_t diff = coeff[i + j] - dqcoeff[i + j];
+        error += diff * diff;
+        sqcoeff += (int64_t)coeff[i + j] * (int64_t)coeff[i + j];
+      }
+    }
+  }
+  assert(error >= 0 && sqcoeff >= 0);
+  error = (error + rounding) >> shift;
+  sqcoeff = (sqcoeff + rounding) >> shift;
+
+  *ssz = sqcoeff;
+  return error;
+}
diff --git a/av1/encoder/x86/highbd_fwd_txfm_sse4.c b/av1/encoder/x86/highbd_fwd_txfm_sse4.c
new file mode 100644
index 0000000..a6cb454
--- /dev/null
+++ b/av1/encoder/x86/highbd_fwd_txfm_sse4.c
@@ -0,0 +1,1895 @@
+/*
+ *  Copyright (c) 2016 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include <smmintrin.h> /* SSE4.1 */
+
+#include "./vp10_rtcd.h"
+#include "./vpx_config.h"
+#include "av1/common/vp10_fwd_txfm2d_cfg.h"
+#include "av1/common/vp10_txfm.h"
+#include "av1/common/x86/highbd_txfm_utility_sse4.h"
+#include "aom_dsp/txfm_common.h"
+#include "aom_dsp/x86/txfm_common_sse2.h"
+#include "aom_ports/mem.h"
+
+static INLINE void load_buffer_4x4(const int16_t *input, __m128i *in,
+                                   int stride, int flipud, int fliplr,
+                                   int shift) {
+  if (!flipud) {
+    in[0] = _mm_loadl_epi64((const __m128i *)(input + 0 * stride));
+    in[1] = _mm_loadl_epi64((const __m128i *)(input + 1 * stride));
+    in[2] = _mm_loadl_epi64((const __m128i *)(input + 2 * stride));
+    in[3] = _mm_loadl_epi64((const __m128i *)(input + 3 * stride));
+  } else {
+    in[0] = _mm_loadl_epi64((const __m128i *)(input + 3 * stride));
+    in[1] = _mm_loadl_epi64((const __m128i *)(input + 2 * stride));
+    in[2] = _mm_loadl_epi64((const __m128i *)(input + 1 * stride));
+    in[3] = _mm_loadl_epi64((const __m128i *)(input + 0 * stride));
+  }
+
+  if (fliplr) {
+    in[0] = _mm_shufflelo_epi16(in[0], 0x1b);
+    in[1] = _mm_shufflelo_epi16(in[1], 0x1b);
+    in[2] = _mm_shufflelo_epi16(in[2], 0x1b);
+    in[3] = _mm_shufflelo_epi16(in[3], 0x1b);
+  }
+
+  in[0] = _mm_cvtepi16_epi32(in[0]);
+  in[1] = _mm_cvtepi16_epi32(in[1]);
+  in[2] = _mm_cvtepi16_epi32(in[2]);
+  in[3] = _mm_cvtepi16_epi32(in[3]);
+
+  in[0] = _mm_slli_epi32(in[0], shift);
+  in[1] = _mm_slli_epi32(in[1], shift);
+  in[2] = _mm_slli_epi32(in[2], shift);
+  in[3] = _mm_slli_epi32(in[3], shift);
+}
+
+// We only use stage-2 bit;
+// shift[0] is used in load_buffer_4x4()
+// shift[1] is used in txfm_func_col()
+// shift[2] is used in txfm_func_row()
+static void fdct4x4_sse4_1(__m128i *in, int bit) {
+  const int32_t *cospi = cospi_arr[bit - cos_bit_min];
+  const __m128i cospi32 = _mm_set1_epi32(cospi[32]);
+  const __m128i cospi48 = _mm_set1_epi32(cospi[48]);
+  const __m128i cospi16 = _mm_set1_epi32(cospi[16]);
+  const __m128i rnding = _mm_set1_epi32(1 << (bit - 1));
+  __m128i s0, s1, s2, s3;
+  __m128i u0, u1, u2, u3;
+  __m128i v0, v1, v2, v3;
+
+  s0 = _mm_add_epi32(in[0], in[3]);
+  s1 = _mm_add_epi32(in[1], in[2]);
+  s2 = _mm_sub_epi32(in[1], in[2]);
+  s3 = _mm_sub_epi32(in[0], in[3]);
+
+  // btf_32_sse4_1_type0(cospi32, cospi32, s[01], u[02], bit);
+  u0 = _mm_mullo_epi32(s0, cospi32);
+  u1 = _mm_mullo_epi32(s1, cospi32);
+  u2 = _mm_add_epi32(u0, u1);
+  v0 = _mm_sub_epi32(u0, u1);
+
+  u3 = _mm_add_epi32(u2, rnding);
+  v1 = _mm_add_epi32(v0, rnding);
+
+  u0 = _mm_srai_epi32(u3, bit);
+  u2 = _mm_srai_epi32(v1, bit);
+
+  // btf_32_sse4_1_type1(cospi48, cospi16, s[23], u[13], bit);
+  v0 = _mm_mullo_epi32(s2, cospi48);
+  v1 = _mm_mullo_epi32(s3, cospi16);
+  v2 = _mm_add_epi32(v0, v1);
+
+  v3 = _mm_add_epi32(v2, rnding);
+  u1 = _mm_srai_epi32(v3, bit);
+
+  v0 = _mm_mullo_epi32(s2, cospi16);
+  v1 = _mm_mullo_epi32(s3, cospi48);
+  v2 = _mm_sub_epi32(v1, v0);
+
+  v3 = _mm_add_epi32(v2, rnding);
+  u3 = _mm_srai_epi32(v3, bit);
+
+  // Note: shift[1] and shift[2] are zeros
+
+  // Transpose 4x4 32-bit
+  v0 = _mm_unpacklo_epi32(u0, u1);
+  v1 = _mm_unpackhi_epi32(u0, u1);
+  v2 = _mm_unpacklo_epi32(u2, u3);
+  v3 = _mm_unpackhi_epi32(u2, u3);
+
+  in[0] = _mm_unpacklo_epi64(v0, v2);
+  in[1] = _mm_unpackhi_epi64(v0, v2);
+  in[2] = _mm_unpacklo_epi64(v1, v3);
+  in[3] = _mm_unpackhi_epi64(v1, v3);
+}
+
+static INLINE void write_buffer_4x4(__m128i *res, tran_low_t *output) {
+  _mm_store_si128((__m128i *)(output + 0 * 4), res[0]);
+  _mm_store_si128((__m128i *)(output + 1 * 4), res[1]);
+  _mm_store_si128((__m128i *)(output + 2 * 4), res[2]);
+  _mm_store_si128((__m128i *)(output + 3 * 4), res[3]);
+}
+
+// Note:
+//  We implement vp10_fwd_txfm2d_4x4(). This function is kept here since
+//  vp10_highbd_fht4x4_c() is not removed yet
+void vp10_highbd_fht4x4_sse4_1(const int16_t *input, tran_low_t *output,
+                               int stride, int tx_type) {
+  (void)input;
+  (void)output;
+  (void)stride;
+  (void)tx_type;
+  assert(0);
+}
+
+static void fadst4x4_sse4_1(__m128i *in, int bit) {
+  const int32_t *cospi = cospi_arr[bit - cos_bit_min];
+  const __m128i cospi8 = _mm_set1_epi32(cospi[8]);
+  const __m128i cospi56 = _mm_set1_epi32(cospi[56]);
+  const __m128i cospi40 = _mm_set1_epi32(cospi[40]);
+  const __m128i cospi24 = _mm_set1_epi32(cospi[24]);
+  const __m128i cospi32 = _mm_set1_epi32(cospi[32]);
+  const __m128i rnding = _mm_set1_epi32(1 << (bit - 1));
+  const __m128i kZero = _mm_setzero_si128();
+  __m128i s0, s1, s2, s3;
+  __m128i u0, u1, u2, u3;
+  __m128i v0, v1, v2, v3;
+
+  // stage 0
+  // stage 1
+  // stage 2
+  u0 = _mm_mullo_epi32(in[3], cospi8);
+  u1 = _mm_mullo_epi32(in[0], cospi56);
+  u2 = _mm_add_epi32(u0, u1);
+  s0 = _mm_add_epi32(u2, rnding);
+  s0 = _mm_srai_epi32(s0, bit);
+
+  v0 = _mm_mullo_epi32(in[3], cospi56);
+  v1 = _mm_mullo_epi32(in[0], cospi8);
+  v2 = _mm_sub_epi32(v0, v1);
+  s1 = _mm_add_epi32(v2, rnding);
+  s1 = _mm_srai_epi32(s1, bit);
+
+  u0 = _mm_mullo_epi32(in[1], cospi40);
+  u1 = _mm_mullo_epi32(in[2], cospi24);
+  u2 = _mm_add_epi32(u0, u1);
+  s2 = _mm_add_epi32(u2, rnding);
+  s2 = _mm_srai_epi32(s2, bit);
+
+  v0 = _mm_mullo_epi32(in[1], cospi24);
+  v1 = _mm_mullo_epi32(in[2], cospi40);
+  v2 = _mm_sub_epi32(v0, v1);
+  s3 = _mm_add_epi32(v2, rnding);
+  s3 = _mm_srai_epi32(s3, bit);
+
+  // stage 3
+  u0 = _mm_add_epi32(s0, s2);
+  u2 = _mm_sub_epi32(s0, s2);
+  u1 = _mm_add_epi32(s1, s3);
+  u3 = _mm_sub_epi32(s1, s3);
+
+  // stage 4
+  v0 = _mm_mullo_epi32(u2, cospi32);
+  v1 = _mm_mullo_epi32(u3, cospi32);
+  v2 = _mm_add_epi32(v0, v1);
+  s2 = _mm_add_epi32(v2, rnding);
+  u2 = _mm_srai_epi32(s2, bit);
+
+  v2 = _mm_sub_epi32(v0, v1);
+  s3 = _mm_add_epi32(v2, rnding);
+  u3 = _mm_srai_epi32(s3, bit);
+
+  // u0, u1, u2, u3
+  u2 = _mm_sub_epi32(kZero, u2);
+  u1 = _mm_sub_epi32(kZero, u1);
+
+  // u0, u2, u3, u1
+  // Transpose 4x4 32-bit
+  v0 = _mm_unpacklo_epi32(u0, u2);
+  v1 = _mm_unpackhi_epi32(u0, u2);
+  v2 = _mm_unpacklo_epi32(u3, u1);
+  v3 = _mm_unpackhi_epi32(u3, u1);
+
+  in[0] = _mm_unpacklo_epi64(v0, v2);
+  in[1] = _mm_unpackhi_epi64(v0, v2);
+  in[2] = _mm_unpacklo_epi64(v1, v3);
+  in[3] = _mm_unpackhi_epi64(v1, v3);
+}
+
+void vp10_fwd_txfm2d_4x4_sse4_1(const int16_t *input, int32_t *coeff,
+                                int input_stride, int tx_type, int bd) {
+  __m128i in[4];
+  const TXFM_2D_CFG *cfg = NULL;
+
+  switch (tx_type) {
+    case DCT_DCT:
+      cfg = &fwd_txfm_2d_cfg_dct_dct_4;
+      load_buffer_4x4(input, in, input_stride, 0, 0, cfg->shift[0]);
+      fdct4x4_sse4_1(in, cfg->cos_bit_col[2]);
+      fdct4x4_sse4_1(in, cfg->cos_bit_row[2]);
+      write_buffer_4x4(in, coeff);
+      break;
+    case ADST_DCT:
+      cfg = &fwd_txfm_2d_cfg_adst_dct_4;
+      load_buffer_4x4(input, in, input_stride, 0, 0, cfg->shift[0]);
+      fadst4x4_sse4_1(in, cfg->cos_bit_col[2]);
+      fdct4x4_sse4_1(in, cfg->cos_bit_row[2]);
+      write_buffer_4x4(in, coeff);
+      break;
+    case DCT_ADST:
+      cfg = &fwd_txfm_2d_cfg_dct_adst_4;
+      load_buffer_4x4(input, in, input_stride, 0, 0, cfg->shift[0]);
+      fdct4x4_sse4_1(in, cfg->cos_bit_col[2]);
+      fadst4x4_sse4_1(in, cfg->cos_bit_row[2]);
+      write_buffer_4x4(in, coeff);
+      break;
+    case ADST_ADST:
+      cfg = &fwd_txfm_2d_cfg_adst_adst_4;
+      load_buffer_4x4(input, in, input_stride, 0, 0, cfg->shift[0]);
+      fadst4x4_sse4_1(in, cfg->cos_bit_col[2]);
+      fadst4x4_sse4_1(in, cfg->cos_bit_row[2]);
+      write_buffer_4x4(in, coeff);
+      break;
+#if CONFIG_EXT_TX
+    case FLIPADST_DCT:
+      cfg = &fwd_txfm_2d_cfg_adst_dct_4;
+      load_buffer_4x4(input, in, input_stride, 1, 0, cfg->shift[0]);
+      fadst4x4_sse4_1(in, cfg->cos_bit_col[2]);
+      fdct4x4_sse4_1(in, cfg->cos_bit_row[2]);
+      write_buffer_4x4(in, coeff);
+      break;
+    case DCT_FLIPADST:
+      cfg = &fwd_txfm_2d_cfg_dct_adst_4;
+      load_buffer_4x4(input, in, input_stride, 0, 1, cfg->shift[0]);
+      fdct4x4_sse4_1(in, cfg->cos_bit_col[2]);
+      fadst4x4_sse4_1(in, cfg->cos_bit_row[2]);
+      write_buffer_4x4(in, coeff);
+      break;
+    case FLIPADST_FLIPADST:
+      cfg = &fwd_txfm_2d_cfg_adst_adst_4;
+      load_buffer_4x4(input, in, input_stride, 1, 1, cfg->shift[0]);
+      fadst4x4_sse4_1(in, cfg->cos_bit_col[2]);
+      fadst4x4_sse4_1(in, cfg->cos_bit_row[2]);
+      write_buffer_4x4(in, coeff);
+      break;
+    case ADST_FLIPADST:
+      cfg = &fwd_txfm_2d_cfg_adst_adst_4;
+      load_buffer_4x4(input, in, input_stride, 0, 1, cfg->shift[0]);
+      fadst4x4_sse4_1(in, cfg->cos_bit_col[2]);
+      fadst4x4_sse4_1(in, cfg->cos_bit_row[2]);
+      write_buffer_4x4(in, coeff);
+      break;
+    case FLIPADST_ADST:
+      cfg = &fwd_txfm_2d_cfg_adst_adst_4;
+      load_buffer_4x4(input, in, input_stride, 1, 0, cfg->shift[0]);
+      fadst4x4_sse4_1(in, cfg->cos_bit_col[2]);
+      fadst4x4_sse4_1(in, cfg->cos_bit_row[2]);
+      write_buffer_4x4(in, coeff);
+      break;
+#endif
+    default: assert(0);
+  }
+  (void)bd;
+}
+
+static INLINE void load_buffer_8x8(const int16_t *input, __m128i *in,
+                                   int stride, int flipud, int fliplr,
+                                   int shift) {
+  __m128i u;
+  if (!flipud) {
+    in[0] = _mm_load_si128((const __m128i *)(input + 0 * stride));
+    in[1] = _mm_load_si128((const __m128i *)(input + 1 * stride));
+    in[2] = _mm_load_si128((const __m128i *)(input + 2 * stride));
+    in[3] = _mm_load_si128((const __m128i *)(input + 3 * stride));
+    in[4] = _mm_load_si128((const __m128i *)(input + 4 * stride));
+    in[5] = _mm_load_si128((const __m128i *)(input + 5 * stride));
+    in[6] = _mm_load_si128((const __m128i *)(input + 6 * stride));
+    in[7] = _mm_load_si128((const __m128i *)(input + 7 * stride));
+  } else {
+    in[0] = _mm_load_si128((const __m128i *)(input + 7 * stride));
+    in[1] = _mm_load_si128((const __m128i *)(input + 6 * stride));
+    in[2] = _mm_load_si128((const __m128i *)(input + 5 * stride));
+    in[3] = _mm_load_si128((const __m128i *)(input + 4 * stride));
+    in[4] = _mm_load_si128((const __m128i *)(input + 3 * stride));
+    in[5] = _mm_load_si128((const __m128i *)(input + 2 * stride));
+    in[6] = _mm_load_si128((const __m128i *)(input + 1 * stride));
+    in[7] = _mm_load_si128((const __m128i *)(input + 0 * stride));
+  }
+
+  if (fliplr) {
+    in[0] = mm_reverse_epi16(in[0]);
+    in[1] = mm_reverse_epi16(in[1]);
+    in[2] = mm_reverse_epi16(in[2]);
+    in[3] = mm_reverse_epi16(in[3]);
+    in[4] = mm_reverse_epi16(in[4]);
+    in[5] = mm_reverse_epi16(in[5]);
+    in[6] = mm_reverse_epi16(in[6]);
+    in[7] = mm_reverse_epi16(in[7]);
+  }
+
+  u = _mm_unpackhi_epi64(in[4], in[4]);
+  in[8] = _mm_cvtepi16_epi32(in[4]);
+  in[9] = _mm_cvtepi16_epi32(u);
+
+  u = _mm_unpackhi_epi64(in[5], in[5]);
+  in[10] = _mm_cvtepi16_epi32(in[5]);
+  in[11] = _mm_cvtepi16_epi32(u);
+
+  u = _mm_unpackhi_epi64(in[6], in[6]);
+  in[12] = _mm_cvtepi16_epi32(in[6]);
+  in[13] = _mm_cvtepi16_epi32(u);
+
+  u = _mm_unpackhi_epi64(in[7], in[7]);
+  in[14] = _mm_cvtepi16_epi32(in[7]);
+  in[15] = _mm_cvtepi16_epi32(u);
+
+  u = _mm_unpackhi_epi64(in[3], in[3]);
+  in[6] = _mm_cvtepi16_epi32(in[3]);
+  in[7] = _mm_cvtepi16_epi32(u);
+
+  u = _mm_unpackhi_epi64(in[2], in[2]);
+  in[4] = _mm_cvtepi16_epi32(in[2]);
+  in[5] = _mm_cvtepi16_epi32(u);
+
+  u = _mm_unpackhi_epi64(in[1], in[1]);
+  in[2] = _mm_cvtepi16_epi32(in[1]);
+  in[3] = _mm_cvtepi16_epi32(u);
+
+  u = _mm_unpackhi_epi64(in[0], in[0]);
+  in[0] = _mm_cvtepi16_epi32(in[0]);
+  in[1] = _mm_cvtepi16_epi32(u);
+
+  in[0] = _mm_slli_epi32(in[0], shift);
+  in[1] = _mm_slli_epi32(in[1], shift);
+  in[2] = _mm_slli_epi32(in[2], shift);
+  in[3] = _mm_slli_epi32(in[3], shift);
+  in[4] = _mm_slli_epi32(in[4], shift);
+  in[5] = _mm_slli_epi32(in[5], shift);
+  in[6] = _mm_slli_epi32(in[6], shift);
+  in[7] = _mm_slli_epi32(in[7], shift);
+
+  in[8] = _mm_slli_epi32(in[8], shift);
+  in[9] = _mm_slli_epi32(in[9], shift);
+  in[10] = _mm_slli_epi32(in[10], shift);
+  in[11] = _mm_slli_epi32(in[11], shift);
+  in[12] = _mm_slli_epi32(in[12], shift);
+  in[13] = _mm_slli_epi32(in[13], shift);
+  in[14] = _mm_slli_epi32(in[14], shift);
+  in[15] = _mm_slli_epi32(in[15], shift);
+}
+
+static INLINE void col_txfm_8x8_rounding(__m128i *in, int shift) {
+  const __m128i rounding = _mm_set1_epi32(1 << (shift - 1));
+
+  in[0] = _mm_add_epi32(in[0], rounding);
+  in[1] = _mm_add_epi32(in[1], rounding);
+  in[2] = _mm_add_epi32(in[2], rounding);
+  in[3] = _mm_add_epi32(in[3], rounding);
+  in[4] = _mm_add_epi32(in[4], rounding);
+  in[5] = _mm_add_epi32(in[5], rounding);
+  in[6] = _mm_add_epi32(in[6], rounding);
+  in[7] = _mm_add_epi32(in[7], rounding);
+  in[8] = _mm_add_epi32(in[8], rounding);
+  in[9] = _mm_add_epi32(in[9], rounding);
+  in[10] = _mm_add_epi32(in[10], rounding);
+  in[11] = _mm_add_epi32(in[11], rounding);
+  in[12] = _mm_add_epi32(in[12], rounding);
+  in[13] = _mm_add_epi32(in[13], rounding);
+  in[14] = _mm_add_epi32(in[14], rounding);
+  in[15] = _mm_add_epi32(in[15], rounding);
+
+  in[0] = _mm_srai_epi32(in[0], shift);
+  in[1] = _mm_srai_epi32(in[1], shift);
+  in[2] = _mm_srai_epi32(in[2], shift);
+  in[3] = _mm_srai_epi32(in[3], shift);
+  in[4] = _mm_srai_epi32(in[4], shift);
+  in[5] = _mm_srai_epi32(in[5], shift);
+  in[6] = _mm_srai_epi32(in[6], shift);
+  in[7] = _mm_srai_epi32(in[7], shift);
+  in[8] = _mm_srai_epi32(in[8], shift);
+  in[9] = _mm_srai_epi32(in[9], shift);
+  in[10] = _mm_srai_epi32(in[10], shift);
+  in[11] = _mm_srai_epi32(in[11], shift);
+  in[12] = _mm_srai_epi32(in[12], shift);
+  in[13] = _mm_srai_epi32(in[13], shift);
+  in[14] = _mm_srai_epi32(in[14], shift);
+  in[15] = _mm_srai_epi32(in[15], shift);
+}
+
+static INLINE void write_buffer_8x8(const __m128i *res, tran_low_t *output) {
+  _mm_store_si128((__m128i *)(output + 0 * 4), res[0]);
+  _mm_store_si128((__m128i *)(output + 1 * 4), res[1]);
+  _mm_store_si128((__m128i *)(output + 2 * 4), res[2]);
+  _mm_store_si128((__m128i *)(output + 3 * 4), res[3]);
+
+  _mm_store_si128((__m128i *)(output + 4 * 4), res[4]);
+  _mm_store_si128((__m128i *)(output + 5 * 4), res[5]);
+  _mm_store_si128((__m128i *)(output + 6 * 4), res[6]);
+  _mm_store_si128((__m128i *)(output + 7 * 4), res[7]);
+
+  _mm_store_si128((__m128i *)(output + 8 * 4), res[8]);
+  _mm_store_si128((__m128i *)(output + 9 * 4), res[9]);
+  _mm_store_si128((__m128i *)(output + 10 * 4), res[10]);
+  _mm_store_si128((__m128i *)(output + 11 * 4), res[11]);
+
+  _mm_store_si128((__m128i *)(output + 12 * 4), res[12]);
+  _mm_store_si128((__m128i *)(output + 13 * 4), res[13]);
+  _mm_store_si128((__m128i *)(output + 14 * 4), res[14]);
+  _mm_store_si128((__m128i *)(output + 15 * 4), res[15]);
+}
+
+static void fdct8x8_sse4_1(__m128i *in, __m128i *out, int bit) {
+  const int32_t *cospi = cospi_arr[bit - cos_bit_min];
+  const __m128i cospi32 = _mm_set1_epi32(cospi[32]);
+  const __m128i cospim32 = _mm_set1_epi32(-cospi[32]);
+  const __m128i cospi48 = _mm_set1_epi32(cospi[48]);
+  const __m128i cospi16 = _mm_set1_epi32(cospi[16]);
+  const __m128i cospi56 = _mm_set1_epi32(cospi[56]);
+  const __m128i cospi8 = _mm_set1_epi32(cospi[8]);
+  const __m128i cospi24 = _mm_set1_epi32(cospi[24]);
+  const __m128i cospi40 = _mm_set1_epi32(cospi[40]);
+  const __m128i rnding = _mm_set1_epi32(1 << (bit - 1));
+  __m128i u[8], v[8];
+
+  // Even 8 points 0, 2, ..., 14
+  // stage 0
+  // stage 1
+  u[0] = _mm_add_epi32(in[0], in[14]);
+  v[7] = _mm_sub_epi32(in[0], in[14]);  // v[7]
+  u[1] = _mm_add_epi32(in[2], in[12]);
+  u[6] = _mm_sub_epi32(in[2], in[12]);
+  u[2] = _mm_add_epi32(in[4], in[10]);
+  u[5] = _mm_sub_epi32(in[4], in[10]);
+  u[3] = _mm_add_epi32(in[6], in[8]);
+  v[4] = _mm_sub_epi32(in[6], in[8]);  // v[4]
+
+  // stage 2
+  v[0] = _mm_add_epi32(u[0], u[3]);
+  v[3] = _mm_sub_epi32(u[0], u[3]);
+  v[1] = _mm_add_epi32(u[1], u[2]);
+  v[2] = _mm_sub_epi32(u[1], u[2]);
+
+  v[5] = _mm_mullo_epi32(u[5], cospim32);
+  v[6] = _mm_mullo_epi32(u[6], cospi32);
+  v[5] = _mm_add_epi32(v[5], v[6]);
+  v[5] = _mm_add_epi32(v[5], rnding);
+  v[5] = _mm_srai_epi32(v[5], bit);
+
+  u[0] = _mm_mullo_epi32(u[5], cospi32);
+  v[6] = _mm_mullo_epi32(u[6], cospim32);
+  v[6] = _mm_sub_epi32(u[0], v[6]);
+  v[6] = _mm_add_epi32(v[6], rnding);
+  v[6] = _mm_srai_epi32(v[6], bit);
+
+  // stage 3
+  // type 0
+  v[0] = _mm_mullo_epi32(v[0], cospi32);
+  v[1] = _mm_mullo_epi32(v[1], cospi32);
+  u[0] = _mm_add_epi32(v[0], v[1]);
+  u[0] = _mm_add_epi32(u[0], rnding);
+  u[0] = _mm_srai_epi32(u[0], bit);
+
+  u[1] = _mm_sub_epi32(v[0], v[1]);
+  u[1] = _mm_add_epi32(u[1], rnding);
+  u[1] = _mm_srai_epi32(u[1], bit);
+
+  // type 1
+  v[0] = _mm_mullo_epi32(v[2], cospi48);
+  v[1] = _mm_mullo_epi32(v[3], cospi16);
+  u[2] = _mm_add_epi32(v[0], v[1]);
+  u[2] = _mm_add_epi32(u[2], rnding);
+  u[2] = _mm_srai_epi32(u[2], bit);
+
+  v[0] = _mm_mullo_epi32(v[2], cospi16);
+  v[1] = _mm_mullo_epi32(v[3], cospi48);
+  u[3] = _mm_sub_epi32(v[1], v[0]);
+  u[3] = _mm_add_epi32(u[3], rnding);
+  u[3] = _mm_srai_epi32(u[3], bit);
+
+  u[4] = _mm_add_epi32(v[4], v[5]);
+  u[5] = _mm_sub_epi32(v[4], v[5]);
+  u[6] = _mm_sub_epi32(v[7], v[6]);
+  u[7] = _mm_add_epi32(v[7], v[6]);
+
+  // stage 4
+  // stage 5
+  v[0] = _mm_mullo_epi32(u[4], cospi56);
+  v[1] = _mm_mullo_epi32(u[7], cospi8);
+  v[0] = _mm_add_epi32(v[0], v[1]);
+  v[0] = _mm_add_epi32(v[0], rnding);
+  out[2] = _mm_srai_epi32(v[0], bit);  // buf0[4]
+
+  v[0] = _mm_mullo_epi32(u[4], cospi8);
+  v[1] = _mm_mullo_epi32(u[7], cospi56);
+  v[0] = _mm_sub_epi32(v[1], v[0]);
+  v[0] = _mm_add_epi32(v[0], rnding);
+  out[14] = _mm_srai_epi32(v[0], bit);  // buf0[7]
+
+  v[0] = _mm_mullo_epi32(u[5], cospi24);
+  v[1] = _mm_mullo_epi32(u[6], cospi40);
+  v[0] = _mm_add_epi32(v[0], v[1]);
+  v[0] = _mm_add_epi32(v[0], rnding);
+  out[10] = _mm_srai_epi32(v[0], bit);  // buf0[5]
+
+  v[0] = _mm_mullo_epi32(u[5], cospi40);
+  v[1] = _mm_mullo_epi32(u[6], cospi24);
+  v[0] = _mm_sub_epi32(v[1], v[0]);
+  v[0] = _mm_add_epi32(v[0], rnding);
+  out[6] = _mm_srai_epi32(v[0], bit);  // buf0[6]
+
+  out[0] = u[0];   // buf0[0]
+  out[8] = u[1];   // buf0[1]
+  out[4] = u[2];   // buf0[2]
+  out[12] = u[3];  // buf0[3]
+
+  // Odd 8 points: 1, 3, ..., 15
+  // stage 0
+  // stage 1
+  u[0] = _mm_add_epi32(in[1], in[15]);
+  v[7] = _mm_sub_epi32(in[1], in[15]);  // v[7]
+  u[1] = _mm_add_epi32(in[3], in[13]);
+  u[6] = _mm_sub_epi32(in[3], in[13]);
+  u[2] = _mm_add_epi32(in[5], in[11]);
+  u[5] = _mm_sub_epi32(in[5], in[11]);
+  u[3] = _mm_add_epi32(in[7], in[9]);
+  v[4] = _mm_sub_epi32(in[7], in[9]);  // v[4]
+
+  // stage 2
+  v[0] = _mm_add_epi32(u[0], u[3]);
+  v[3] = _mm_sub_epi32(u[0], u[3]);
+  v[1] = _mm_add_epi32(u[1], u[2]);
+  v[2] = _mm_sub_epi32(u[1], u[2]);
+
+  v[5] = _mm_mullo_epi32(u[5], cospim32);
+  v[6] = _mm_mullo_epi32(u[6], cospi32);
+  v[5] = _mm_add_epi32(v[5], v[6]);
+  v[5] = _mm_add_epi32(v[5], rnding);
+  v[5] = _mm_srai_epi32(v[5], bit);
+
+  u[0] = _mm_mullo_epi32(u[5], cospi32);
+  v[6] = _mm_mullo_epi32(u[6], cospim32);
+  v[6] = _mm_sub_epi32(u[0], v[6]);
+  v[6] = _mm_add_epi32(v[6], rnding);
+  v[6] = _mm_srai_epi32(v[6], bit);
+
+  // stage 3
+  // type 0
+  v[0] = _mm_mullo_epi32(v[0], cospi32);
+  v[1] = _mm_mullo_epi32(v[1], cospi32);
+  u[0] = _mm_add_epi32(v[0], v[1]);
+  u[0] = _mm_add_epi32(u[0], rnding);
+  u[0] = _mm_srai_epi32(u[0], bit);
+
+  u[1] = _mm_sub_epi32(v[0], v[1]);
+  u[1] = _mm_add_epi32(u[1], rnding);
+  u[1] = _mm_srai_epi32(u[1], bit);
+
+  // type 1
+  v[0] = _mm_mullo_epi32(v[2], cospi48);
+  v[1] = _mm_mullo_epi32(v[3], cospi16);
+  u[2] = _mm_add_epi32(v[0], v[1]);
+  u[2] = _mm_add_epi32(u[2], rnding);
+  u[2] = _mm_srai_epi32(u[2], bit);
+
+  v[0] = _mm_mullo_epi32(v[2], cospi16);
+  v[1] = _mm_mullo_epi32(v[3], cospi48);
+  u[3] = _mm_sub_epi32(v[1], v[0]);
+  u[3] = _mm_add_epi32(u[3], rnding);
+  u[3] = _mm_srai_epi32(u[3], bit);
+
+  u[4] = _mm_add_epi32(v[4], v[5]);
+  u[5] = _mm_sub_epi32(v[4], v[5]);
+  u[6] = _mm_sub_epi32(v[7], v[6]);
+  u[7] = _mm_add_epi32(v[7], v[6]);
+
+  // stage 4
+  // stage 5
+  v[0] = _mm_mullo_epi32(u[4], cospi56);
+  v[1] = _mm_mullo_epi32(u[7], cospi8);
+  v[0] = _mm_add_epi32(v[0], v[1]);
+  v[0] = _mm_add_epi32(v[0], rnding);
+  out[3] = _mm_srai_epi32(v[0], bit);  // buf0[4]
+
+  v[0] = _mm_mullo_epi32(u[4], cospi8);
+  v[1] = _mm_mullo_epi32(u[7], cospi56);
+  v[0] = _mm_sub_epi32(v[1], v[0]);
+  v[0] = _mm_add_epi32(v[0], rnding);
+  out[15] = _mm_srai_epi32(v[0], bit);  // buf0[7]
+
+  v[0] = _mm_mullo_epi32(u[5], cospi24);
+  v[1] = _mm_mullo_epi32(u[6], cospi40);
+  v[0] = _mm_add_epi32(v[0], v[1]);
+  v[0] = _mm_add_epi32(v[0], rnding);
+  out[11] = _mm_srai_epi32(v[0], bit);  // buf0[5]
+
+  v[0] = _mm_mullo_epi32(u[5], cospi40);
+  v[1] = _mm_mullo_epi32(u[6], cospi24);
+  v[0] = _mm_sub_epi32(v[1], v[0]);
+  v[0] = _mm_add_epi32(v[0], rnding);
+  out[7] = _mm_srai_epi32(v[0], bit);  // buf0[6]
+
+  out[1] = u[0];   // buf0[0]
+  out[9] = u[1];   // buf0[1]
+  out[5] = u[2];   // buf0[2]
+  out[13] = u[3];  // buf0[3]
+}
+
+static void fadst8x8_sse4_1(__m128i *in, __m128i *out, int bit) {
+  const int32_t *cospi = cospi_arr[bit - cos_bit_min];
+  const __m128i cospi4 = _mm_set1_epi32(cospi[4]);
+  const __m128i cospi60 = _mm_set1_epi32(cospi[60]);
+  const __m128i cospi20 = _mm_set1_epi32(cospi[20]);
+  const __m128i cospi44 = _mm_set1_epi32(cospi[44]);
+  const __m128i cospi36 = _mm_set1_epi32(cospi[36]);
+  const __m128i cospi28 = _mm_set1_epi32(cospi[28]);
+  const __m128i cospi52 = _mm_set1_epi32(cospi[52]);
+  const __m128i cospi12 = _mm_set1_epi32(cospi[12]);
+  const __m128i cospi16 = _mm_set1_epi32(cospi[16]);
+  const __m128i cospi48 = _mm_set1_epi32(cospi[48]);
+  const __m128i cospim48 = _mm_set1_epi32(-cospi[48]);
+  const __m128i cospi32 = _mm_set1_epi32(cospi[32]);
+  const __m128i rnding = _mm_set1_epi32(1 << (bit - 1));
+  const __m128i kZero = _mm_setzero_si128();
+  __m128i u[8], v[8], x;
+
+  // Even 8 points: 0, 2, ..., 14
+  // stage 0
+  // stage 1
+  // stage 2
+  // (1)
+  u[0] = _mm_mullo_epi32(in[14], cospi4);
+  x = _mm_mullo_epi32(in[0], cospi60);
+  u[0] = _mm_add_epi32(u[0], x);
+  u[0] = _mm_add_epi32(u[0], rnding);
+  u[0] = _mm_srai_epi32(u[0], bit);
+
+  u[1] = _mm_mullo_epi32(in[14], cospi60);
+  x = _mm_mullo_epi32(in[0], cospi4);
+  u[1] = _mm_sub_epi32(u[1], x);
+  u[1] = _mm_add_epi32(u[1], rnding);
+  u[1] = _mm_srai_epi32(u[1], bit);
+
+  // (2)
+  u[2] = _mm_mullo_epi32(in[10], cospi20);
+  x = _mm_mullo_epi32(in[4], cospi44);
+  u[2] = _mm_add_epi32(u[2], x);
+  u[2] = _mm_add_epi32(u[2], rnding);
+  u[2] = _mm_srai_epi32(u[2], bit);
+
+  u[3] = _mm_mullo_epi32(in[10], cospi44);
+  x = _mm_mullo_epi32(in[4], cospi20);
+  u[3] = _mm_sub_epi32(u[3], x);
+  u[3] = _mm_add_epi32(u[3], rnding);
+  u[3] = _mm_srai_epi32(u[3], bit);
+
+  // (3)
+  u[4] = _mm_mullo_epi32(in[6], cospi36);
+  x = _mm_mullo_epi32(in[8], cospi28);
+  u[4] = _mm_add_epi32(u[4], x);
+  u[4] = _mm_add_epi32(u[4], rnding);
+  u[4] = _mm_srai_epi32(u[4], bit);
+
+  u[5] = _mm_mullo_epi32(in[6], cospi28);
+  x = _mm_mullo_epi32(in[8], cospi36);
+  u[5] = _mm_sub_epi32(u[5], x);
+  u[5] = _mm_add_epi32(u[5], rnding);
+  u[5] = _mm_srai_epi32(u[5], bit);
+
+  // (4)
+  u[6] = _mm_mullo_epi32(in[2], cospi52);
+  x = _mm_mullo_epi32(in[12], cospi12);
+  u[6] = _mm_add_epi32(u[6], x);
+  u[6] = _mm_add_epi32(u[6], rnding);
+  u[6] = _mm_srai_epi32(u[6], bit);
+
+  u[7] = _mm_mullo_epi32(in[2], cospi12);
+  x = _mm_mullo_epi32(in[12], cospi52);
+  u[7] = _mm_sub_epi32(u[7], x);
+  u[7] = _mm_add_epi32(u[7], rnding);
+  u[7] = _mm_srai_epi32(u[7], bit);
+
+  // stage 3
+  v[0] = _mm_add_epi32(u[0], u[4]);
+  v[4] = _mm_sub_epi32(u[0], u[4]);
+  v[1] = _mm_add_epi32(u[1], u[5]);
+  v[5] = _mm_sub_epi32(u[1], u[5]);
+  v[2] = _mm_add_epi32(u[2], u[6]);
+  v[6] = _mm_sub_epi32(u[2], u[6]);
+  v[3] = _mm_add_epi32(u[3], u[7]);
+  v[7] = _mm_sub_epi32(u[3], u[7]);
+
+  // stage 4
+  u[0] = v[0];
+  u[1] = v[1];
+  u[2] = v[2];
+  u[3] = v[3];
+
+  u[4] = _mm_mullo_epi32(v[4], cospi16);
+  x = _mm_mullo_epi32(v[5], cospi48);
+  u[4] = _mm_add_epi32(u[4], x);
+  u[4] = _mm_add_epi32(u[4], rnding);
+  u[4] = _mm_srai_epi32(u[4], bit);
+
+  u[5] = _mm_mullo_epi32(v[4], cospi48);
+  x = _mm_mullo_epi32(v[5], cospi16);
+  u[5] = _mm_sub_epi32(u[5], x);
+  u[5] = _mm_add_epi32(u[5], rnding);
+  u[5] = _mm_srai_epi32(u[5], bit);
+
+  u[6] = _mm_mullo_epi32(v[6], cospim48);
+  x = _mm_mullo_epi32(v[7], cospi16);
+  u[6] = _mm_add_epi32(u[6], x);
+  u[6] = _mm_add_epi32(u[6], rnding);
+  u[6] = _mm_srai_epi32(u[6], bit);
+
+  u[7] = _mm_mullo_epi32(v[6], cospi16);
+  x = _mm_mullo_epi32(v[7], cospim48);
+  u[7] = _mm_sub_epi32(u[7], x);
+  u[7] = _mm_add_epi32(u[7], rnding);
+  u[7] = _mm_srai_epi32(u[7], bit);
+
+  // stage 5
+  v[0] = _mm_add_epi32(u[0], u[2]);
+  v[2] = _mm_sub_epi32(u[0], u[2]);
+  v[1] = _mm_add_epi32(u[1], u[3]);
+  v[3] = _mm_sub_epi32(u[1], u[3]);
+  v[4] = _mm_add_epi32(u[4], u[6]);
+  v[6] = _mm_sub_epi32(u[4], u[6]);
+  v[5] = _mm_add_epi32(u[5], u[7]);
+  v[7] = _mm_sub_epi32(u[5], u[7]);
+
+  // stage 6
+  u[0] = v[0];
+  u[1] = v[1];
+  u[4] = v[4];
+  u[5] = v[5];
+
+  v[0] = _mm_mullo_epi32(v[2], cospi32);
+  x = _mm_mullo_epi32(v[3], cospi32);
+  u[2] = _mm_add_epi32(v[0], x);
+  u[2] = _mm_add_epi32(u[2], rnding);
+  u[2] = _mm_srai_epi32(u[2], bit);
+
+  u[3] = _mm_sub_epi32(v[0], x);
+  u[3] = _mm_add_epi32(u[3], rnding);
+  u[3] = _mm_srai_epi32(u[3], bit);
+
+  v[0] = _mm_mullo_epi32(v[6], cospi32);
+  x = _mm_mullo_epi32(v[7], cospi32);
+  u[6] = _mm_add_epi32(v[0], x);
+  u[6] = _mm_add_epi32(u[6], rnding);
+  u[6] = _mm_srai_epi32(u[6], bit);
+
+  u[7] = _mm_sub_epi32(v[0], x);
+  u[7] = _mm_add_epi32(u[7], rnding);
+  u[7] = _mm_srai_epi32(u[7], bit);
+
+  // stage 7
+  out[0] = u[0];
+  out[2] = _mm_sub_epi32(kZero, u[4]);
+  out[4] = u[6];
+  out[6] = _mm_sub_epi32(kZero, u[2]);
+  out[8] = u[3];
+  out[10] = _mm_sub_epi32(kZero, u[7]);
+  out[12] = u[5];
+  out[14] = _mm_sub_epi32(kZero, u[1]);
+
+  // Odd 8 points: 1, 3, ..., 15
+  // stage 0
+  // stage 1
+  // stage 2
+  // (1)
+  u[0] = _mm_mullo_epi32(in[15], cospi4);
+  x = _mm_mullo_epi32(in[1], cospi60);
+  u[0] = _mm_add_epi32(u[0], x);
+  u[0] = _mm_add_epi32(u[0], rnding);
+  u[0] = _mm_srai_epi32(u[0], bit);
+
+  u[1] = _mm_mullo_epi32(in[15], cospi60);
+  x = _mm_mullo_epi32(in[1], cospi4);
+  u[1] = _mm_sub_epi32(u[1], x);
+  u[1] = _mm_add_epi32(u[1], rnding);
+  u[1] = _mm_srai_epi32(u[1], bit);
+
+  // (2)
+  u[2] = _mm_mullo_epi32(in[11], cospi20);
+  x = _mm_mullo_epi32(in[5], cospi44);
+  u[2] = _mm_add_epi32(u[2], x);
+  u[2] = _mm_add_epi32(u[2], rnding);
+  u[2] = _mm_srai_epi32(u[2], bit);
+
+  u[3] = _mm_mullo_epi32(in[11], cospi44);
+  x = _mm_mullo_epi32(in[5], cospi20);
+  u[3] = _mm_sub_epi32(u[3], x);
+  u[3] = _mm_add_epi32(u[3], rnding);
+  u[3] = _mm_srai_epi32(u[3], bit);
+
+  // (3)
+  u[4] = _mm_mullo_epi32(in[7], cospi36);
+  x = _mm_mullo_epi32(in[9], cospi28);
+  u[4] = _mm_add_epi32(u[4], x);
+  u[4] = _mm_add_epi32(u[4], rnding);
+  u[4] = _mm_srai_epi32(u[4], bit);
+
+  u[5] = _mm_mullo_epi32(in[7], cospi28);
+  x = _mm_mullo_epi32(in[9], cospi36);
+  u[5] = _mm_sub_epi32(u[5], x);
+  u[5] = _mm_add_epi32(u[5], rnding);
+  u[5] = _mm_srai_epi32(u[5], bit);
+
+  // (4)
+  u[6] = _mm_mullo_epi32(in[3], cospi52);
+  x = _mm_mullo_epi32(in[13], cospi12);
+  u[6] = _mm_add_epi32(u[6], x);
+  u[6] = _mm_add_epi32(u[6], rnding);
+  u[6] = _mm_srai_epi32(u[6], bit);
+
+  u[7] = _mm_mullo_epi32(in[3], cospi12);
+  x = _mm_mullo_epi32(in[13], cospi52);
+  u[7] = _mm_sub_epi32(u[7], x);
+  u[7] = _mm_add_epi32(u[7], rnding);
+  u[7] = _mm_srai_epi32(u[7], bit);
+
+  // stage 3
+  v[0] = _mm_add_epi32(u[0], u[4]);
+  v[4] = _mm_sub_epi32(u[0], u[4]);
+  v[1] = _mm_add_epi32(u[1], u[5]);
+  v[5] = _mm_sub_epi32(u[1], u[5]);
+  v[2] = _mm_add_epi32(u[2], u[6]);
+  v[6] = _mm_sub_epi32(u[2], u[6]);
+  v[3] = _mm_add_epi32(u[3], u[7]);
+  v[7] = _mm_sub_epi32(u[3], u[7]);
+
+  // stage 4
+  u[0] = v[0];
+  u[1] = v[1];
+  u[2] = v[2];
+  u[3] = v[3];
+
+  u[4] = _mm_mullo_epi32(v[4], cospi16);
+  x = _mm_mullo_epi32(v[5], cospi48);
+  u[4] = _mm_add_epi32(u[4], x);
+  u[4] = _mm_add_epi32(u[4], rnding);
+  u[4] = _mm_srai_epi32(u[4], bit);
+
+  u[5] = _mm_mullo_epi32(v[4], cospi48);
+  x = _mm_mullo_epi32(v[5], cospi16);
+  u[5] = _mm_sub_epi32(u[5], x);
+  u[5] = _mm_add_epi32(u[5], rnding);
+  u[5] = _mm_srai_epi32(u[5], bit);
+
+  u[6] = _mm_mullo_epi32(v[6], cospim48);
+  x = _mm_mullo_epi32(v[7], cospi16);
+  u[6] = _mm_add_epi32(u[6], x);
+  u[6] = _mm_add_epi32(u[6], rnding);
+  u[6] = _mm_srai_epi32(u[6], bit);
+
+  u[7] = _mm_mullo_epi32(v[6], cospi16);
+  x = _mm_mullo_epi32(v[7], cospim48);
+  u[7] = _mm_sub_epi32(u[7], x);
+  u[7] = _mm_add_epi32(u[7], rnding);
+  u[7] = _mm_srai_epi32(u[7], bit);
+
+  // stage 5
+  v[0] = _mm_add_epi32(u[0], u[2]);
+  v[2] = _mm_sub_epi32(u[0], u[2]);
+  v[1] = _mm_add_epi32(u[1], u[3]);
+  v[3] = _mm_sub_epi32(u[1], u[3]);
+  v[4] = _mm_add_epi32(u[4], u[6]);
+  v[6] = _mm_sub_epi32(u[4], u[6]);
+  v[5] = _mm_add_epi32(u[5], u[7]);
+  v[7] = _mm_sub_epi32(u[5], u[7]);
+
+  // stage 6
+  u[0] = v[0];
+  u[1] = v[1];
+  u[4] = v[4];
+  u[5] = v[5];
+
+  v[0] = _mm_mullo_epi32(v[2], cospi32);
+  x = _mm_mullo_epi32(v[3], cospi32);
+  u[2] = _mm_add_epi32(v[0], x);
+  u[2] = _mm_add_epi32(u[2], rnding);
+  u[2] = _mm_srai_epi32(u[2], bit);
+
+  u[3] = _mm_sub_epi32(v[0], x);
+  u[3] = _mm_add_epi32(u[3], rnding);
+  u[3] = _mm_srai_epi32(u[3], bit);
+
+  v[0] = _mm_mullo_epi32(v[6], cospi32);
+  x = _mm_mullo_epi32(v[7], cospi32);
+  u[6] = _mm_add_epi32(v[0], x);
+  u[6] = _mm_add_epi32(u[6], rnding);
+  u[6] = _mm_srai_epi32(u[6], bit);
+
+  u[7] = _mm_sub_epi32(v[0], x);
+  u[7] = _mm_add_epi32(u[7], rnding);
+  u[7] = _mm_srai_epi32(u[7], bit);
+
+  // stage 7
+  out[1] = u[0];
+  out[3] = _mm_sub_epi32(kZero, u[4]);
+  out[5] = u[6];
+  out[7] = _mm_sub_epi32(kZero, u[2]);
+  out[9] = u[3];
+  out[11] = _mm_sub_epi32(kZero, u[7]);
+  out[13] = u[5];
+  out[15] = _mm_sub_epi32(kZero, u[1]);
+}
+
+void vp10_fwd_txfm2d_8x8_sse4_1(const int16_t *input, int32_t *coeff,
+                                int stride, int tx_type, int bd) {
+  __m128i in[16], out[16];
+  const TXFM_2D_CFG *cfg = NULL;
+
+  switch (tx_type) {
+    case DCT_DCT:
+      cfg = &fwd_txfm_2d_cfg_dct_dct_8;
+      load_buffer_8x8(input, in, stride, 0, 0, cfg->shift[0]);
+      fdct8x8_sse4_1(in, out, cfg->cos_bit_col[2]);
+      col_txfm_8x8_rounding(out, -cfg->shift[1]);
+      transpose_8x8(out, in);
+      fdct8x8_sse4_1(in, out, cfg->cos_bit_row[2]);
+      transpose_8x8(out, in);
+      write_buffer_8x8(in, coeff);
+      break;
+    case ADST_DCT:
+      cfg = &fwd_txfm_2d_cfg_adst_dct_8;
+      load_buffer_8x8(input, in, stride, 0, 0, cfg->shift[0]);
+      fadst8x8_sse4_1(in, out, cfg->cos_bit_col[2]);
+      col_txfm_8x8_rounding(out, -cfg->shift[1]);
+      transpose_8x8(out, in);
+      fdct8x8_sse4_1(in, out, cfg->cos_bit_row[2]);
+      transpose_8x8(out, in);
+      write_buffer_8x8(in, coeff);
+      break;
+    case DCT_ADST:
+      cfg = &fwd_txfm_2d_cfg_dct_adst_8;
+      load_buffer_8x8(input, in, stride, 0, 0, cfg->shift[0]);
+      fdct8x8_sse4_1(in, out, cfg->cos_bit_col[2]);
+      col_txfm_8x8_rounding(out, -cfg->shift[1]);
+      transpose_8x8(out, in);
+      fadst8x8_sse4_1(in, out, cfg->cos_bit_row[2]);
+      transpose_8x8(out, in);
+      write_buffer_8x8(in, coeff);
+      break;
+    case ADST_ADST:
+      cfg = &fwd_txfm_2d_cfg_adst_adst_8;
+      load_buffer_8x8(input, in, stride, 0, 0, cfg->shift[0]);
+      fadst8x8_sse4_1(in, out, cfg->cos_bit_col[2]);
+      col_txfm_8x8_rounding(out, -cfg->shift[1]);
+      transpose_8x8(out, in);
+      fadst8x8_sse4_1(in, out, cfg->cos_bit_row[2]);
+      transpose_8x8(out, in);
+      write_buffer_8x8(in, coeff);
+      break;
+#if CONFIG_EXT_TX
+    case FLIPADST_DCT:
+      cfg = &fwd_txfm_2d_cfg_adst_dct_8;
+      load_buffer_8x8(input, in, stride, 1, 0, cfg->shift[0]);
+      fadst8x8_sse4_1(in, out, cfg->cos_bit_col[2]);
+      col_txfm_8x8_rounding(out, -cfg->shift[1]);
+      transpose_8x8(out, in);
+      fdct8x8_sse4_1(in, out, cfg->cos_bit_row[2]);
+      transpose_8x8(out, in);
+      write_buffer_8x8(in, coeff);
+      break;
+    case DCT_FLIPADST:
+      cfg = &fwd_txfm_2d_cfg_dct_adst_8;
+      load_buffer_8x8(input, in, stride, 0, 1, cfg->shift[0]);
+      fdct8x8_sse4_1(in, out, cfg->cos_bit_col[2]);
+      col_txfm_8x8_rounding(out, -cfg->shift[1]);
+      transpose_8x8(out, in);
+      fadst8x8_sse4_1(in, out, cfg->cos_bit_row[2]);
+      transpose_8x8(out, in);
+      write_buffer_8x8(in, coeff);
+      break;
+    case FLIPADST_FLIPADST:
+      cfg = &fwd_txfm_2d_cfg_adst_adst_8;
+      load_buffer_8x8(input, in, stride, 1, 1, cfg->shift[0]);
+      fadst8x8_sse4_1(in, out, cfg->cos_bit_col[2]);
+      col_txfm_8x8_rounding(out, -cfg->shift[1]);
+      transpose_8x8(out, in);
+      fadst8x8_sse4_1(in, out, cfg->cos_bit_row[2]);
+      transpose_8x8(out, in);
+      write_buffer_8x8(in, coeff);
+      break;
+    case ADST_FLIPADST:
+      cfg = &fwd_txfm_2d_cfg_adst_adst_8;
+      load_buffer_8x8(input, in, stride, 0, 1, cfg->shift[0]);
+      fadst8x8_sse4_1(in, out, cfg->cos_bit_col[2]);
+      col_txfm_8x8_rounding(out, -cfg->shift[1]);
+      transpose_8x8(out, in);
+      fadst8x8_sse4_1(in, out, cfg->cos_bit_row[2]);
+      transpose_8x8(out, in);
+      write_buffer_8x8(in, coeff);
+      break;
+    case FLIPADST_ADST:
+      cfg = &fwd_txfm_2d_cfg_adst_adst_8;
+      load_buffer_8x8(input, in, stride, 1, 0, cfg->shift[0]);
+      fadst8x8_sse4_1(in, out, cfg->cos_bit_col[2]);
+      col_txfm_8x8_rounding(out, -cfg->shift[1]);
+      transpose_8x8(out, in);
+      fadst8x8_sse4_1(in, out, cfg->cos_bit_row[2]);
+      transpose_8x8(out, in);
+      write_buffer_8x8(in, coeff);
+      break;
+#endif  // CONFIG_EXT_TX
+    default: assert(0);
+  }
+  (void)bd;
+}
+
+// Hybrid Transform 16x16
+
+static INLINE void convert_8x8_to_16x16(const __m128i *in, __m128i *out) {
+  int row_index = 0;
+  int dst_index = 0;
+  int src_index = 0;
+
+  // row 0, 1, .., 7
+  do {
+    out[dst_index] = in[src_index];
+    out[dst_index + 1] = in[src_index + 1];
+    out[dst_index + 2] = in[src_index + 16];
+    out[dst_index + 3] = in[src_index + 17];
+    dst_index += 4;
+    src_index += 2;
+    row_index += 1;
+  } while (row_index < 8);
+
+  // row 8, 9, ..., 15
+  src_index += 16;
+  do {
+    out[dst_index] = in[src_index];
+    out[dst_index + 1] = in[src_index + 1];
+    out[dst_index + 2] = in[src_index + 16];
+    out[dst_index + 3] = in[src_index + 17];
+    dst_index += 4;
+    src_index += 2;
+    row_index += 1;
+  } while (row_index < 16);
+}
+
+static INLINE void load_buffer_16x16(const int16_t *input, __m128i *out,
+                                     int stride, int flipud, int fliplr,
+                                     int shift) {
+  __m128i in[64];
+  // Load 4 8x8 blocks
+  const int16_t *topL = input;
+  const int16_t *topR = input + 8;
+  const int16_t *botL = input + 8 * stride;
+  const int16_t *botR = input + 8 * stride + 8;
+
+  const int16_t *tmp;
+
+  if (flipud) {
+    // Swap left columns
+    tmp = topL;
+    topL = botL;
+    botL = tmp;
+    // Swap right columns
+    tmp = topR;
+    topR = botR;
+    botR = tmp;
+  }
+
+  if (fliplr) {
+    // Swap top rows
+    tmp = topL;
+    topL = topR;
+    topR = tmp;
+    // Swap bottom rows
+    tmp = botL;
+    botL = botR;
+    botR = tmp;
+  }
+
+  // load first 8 columns
+  load_buffer_8x8(topL, &in[0], stride, flipud, fliplr, shift);
+  load_buffer_8x8(botL, &in[32], stride, flipud, fliplr, shift);
+
+  // load second 8 columns
+  load_buffer_8x8(topR, &in[16], stride, flipud, fliplr, shift);
+  load_buffer_8x8(botR, &in[48], stride, flipud, fliplr, shift);
+
+  convert_8x8_to_16x16(in, out);
+}
+
+static void fdct16x16_sse4_1(__m128i *in, __m128i *out, int bit) {
+  const int32_t *cospi = cospi_arr[bit - cos_bit_min];
+  const __m128i cospi32 = _mm_set1_epi32(cospi[32]);
+  const __m128i cospim32 = _mm_set1_epi32(-cospi[32]);
+  const __m128i cospi48 = _mm_set1_epi32(cospi[48]);
+  const __m128i cospi16 = _mm_set1_epi32(cospi[16]);
+  const __m128i cospim48 = _mm_set1_epi32(-cospi[48]);
+  const __m128i cospim16 = _mm_set1_epi32(-cospi[16]);
+  const __m128i cospi56 = _mm_set1_epi32(cospi[56]);
+  const __m128i cospi8 = _mm_set1_epi32(cospi[8]);
+  const __m128i cospi24 = _mm_set1_epi32(cospi[24]);
+  const __m128i cospi40 = _mm_set1_epi32(cospi[40]);
+  const __m128i cospi60 = _mm_set1_epi32(cospi[60]);
+  const __m128i cospi4 = _mm_set1_epi32(cospi[4]);
+  const __m128i cospi28 = _mm_set1_epi32(cospi[28]);
+  const __m128i cospi36 = _mm_set1_epi32(cospi[36]);
+  const __m128i cospi44 = _mm_set1_epi32(cospi[44]);
+  const __m128i cospi20 = _mm_set1_epi32(cospi[20]);
+  const __m128i cospi12 = _mm_set1_epi32(cospi[12]);
+  const __m128i cospi52 = _mm_set1_epi32(cospi[52]);
+  const __m128i rnding = _mm_set1_epi32(1 << (bit - 1));
+  __m128i u[16], v[16], x;
+  const int col_num = 4;
+  int col;
+
+  // Calculate the column 0, 1, 2, 3
+  for (col = 0; col < col_num; ++col) {
+    // stage 0
+    // stage 1
+    u[0] = _mm_add_epi32(in[0 * col_num + col], in[15 * col_num + col]);
+    u[15] = _mm_sub_epi32(in[0 * col_num + col], in[15 * col_num + col]);
+    u[1] = _mm_add_epi32(in[1 * col_num + col], in[14 * col_num + col]);
+    u[14] = _mm_sub_epi32(in[1 * col_num + col], in[14 * col_num + col]);
+    u[2] = _mm_add_epi32(in[2 * col_num + col], in[13 * col_num + col]);
+    u[13] = _mm_sub_epi32(in[2 * col_num + col], in[13 * col_num + col]);
+    u[3] = _mm_add_epi32(in[3 * col_num + col], in[12 * col_num + col]);
+    u[12] = _mm_sub_epi32(in[3 * col_num + col], in[12 * col_num + col]);
+    u[4] = _mm_add_epi32(in[4 * col_num + col], in[11 * col_num + col]);
+    u[11] = _mm_sub_epi32(in[4 * col_num + col], in[11 * col_num + col]);
+    u[5] = _mm_add_epi32(in[5 * col_num + col], in[10 * col_num + col]);
+    u[10] = _mm_sub_epi32(in[5 * col_num + col], in[10 * col_num + col]);
+    u[6] = _mm_add_epi32(in[6 * col_num + col], in[9 * col_num + col]);
+    u[9] = _mm_sub_epi32(in[6 * col_num + col], in[9 * col_num + col]);
+    u[7] = _mm_add_epi32(in[7 * col_num + col], in[8 * col_num + col]);
+    u[8] = _mm_sub_epi32(in[7 * col_num + col], in[8 * col_num + col]);
+
+    // stage 2
+    v[0] = _mm_add_epi32(u[0], u[7]);
+    v[7] = _mm_sub_epi32(u[0], u[7]);
+    v[1] = _mm_add_epi32(u[1], u[6]);
+    v[6] = _mm_sub_epi32(u[1], u[6]);
+    v[2] = _mm_add_epi32(u[2], u[5]);
+    v[5] = _mm_sub_epi32(u[2], u[5]);
+    v[3] = _mm_add_epi32(u[3], u[4]);
+    v[4] = _mm_sub_epi32(u[3], u[4]);
+    v[8] = u[8];
+    v[9] = u[9];
+
+    v[10] = _mm_mullo_epi32(u[10], cospim32);
+    x = _mm_mullo_epi32(u[13], cospi32);
+    v[10] = _mm_add_epi32(v[10], x);
+    v[10] = _mm_add_epi32(v[10], rnding);
+    v[10] = _mm_srai_epi32(v[10], bit);
+
+    v[13] = _mm_mullo_epi32(u[10], cospi32);
+    x = _mm_mullo_epi32(u[13], cospim32);
+    v[13] = _mm_sub_epi32(v[13], x);
+    v[13] = _mm_add_epi32(v[13], rnding);
+    v[13] = _mm_srai_epi32(v[13], bit);
+
+    v[11] = _mm_mullo_epi32(u[11], cospim32);
+    x = _mm_mullo_epi32(u[12], cospi32);
+    v[11] = _mm_add_epi32(v[11], x);
+    v[11] = _mm_add_epi32(v[11], rnding);
+    v[11] = _mm_srai_epi32(v[11], bit);
+
+    v[12] = _mm_mullo_epi32(u[11], cospi32);
+    x = _mm_mullo_epi32(u[12], cospim32);
+    v[12] = _mm_sub_epi32(v[12], x);
+    v[12] = _mm_add_epi32(v[12], rnding);
+    v[12] = _mm_srai_epi32(v[12], bit);
+    v[14] = u[14];
+    v[15] = u[15];
+
+    // stage 3
+    u[0] = _mm_add_epi32(v[0], v[3]);
+    u[3] = _mm_sub_epi32(v[0], v[3]);
+    u[1] = _mm_add_epi32(v[1], v[2]);
+    u[2] = _mm_sub_epi32(v[1], v[2]);
+    u[4] = v[4];
+
+    u[5] = _mm_mullo_epi32(v[5], cospim32);
+    x = _mm_mullo_epi32(v[6], cospi32);
+    u[5] = _mm_add_epi32(u[5], x);
+    u[5] = _mm_add_epi32(u[5], rnding);
+    u[5] = _mm_srai_epi32(u[5], bit);
+
+    u[6] = _mm_mullo_epi32(v[5], cospi32);
+    x = _mm_mullo_epi32(v[6], cospim32);
+    u[6] = _mm_sub_epi32(u[6], x);
+    u[6] = _mm_add_epi32(u[6], rnding);
+    u[6] = _mm_srai_epi32(u[6], bit);
+
+    u[7] = v[7];
+    u[8] = _mm_add_epi32(v[8], v[11]);
+    u[11] = _mm_sub_epi32(v[8], v[11]);
+    u[9] = _mm_add_epi32(v[9], v[10]);
+    u[10] = _mm_sub_epi32(v[9], v[10]);
+    u[12] = _mm_sub_epi32(v[15], v[12]);
+    u[15] = _mm_add_epi32(v[15], v[12]);
+    u[13] = _mm_sub_epi32(v[14], v[13]);
+    u[14] = _mm_add_epi32(v[14], v[13]);
+
+    // stage 4
+    u[0] = _mm_mullo_epi32(u[0], cospi32);
+    u[1] = _mm_mullo_epi32(u[1], cospi32);
+    v[0] = _mm_add_epi32(u[0], u[1]);
+    v[0] = _mm_add_epi32(v[0], rnding);
+    v[0] = _mm_srai_epi32(v[0], bit);
+
+    v[1] = _mm_sub_epi32(u[0], u[1]);
+    v[1] = _mm_add_epi32(v[1], rnding);
+    v[1] = _mm_srai_epi32(v[1], bit);
+
+    v[2] = _mm_mullo_epi32(u[2], cospi48);
+    x = _mm_mullo_epi32(u[3], cospi16);
+    v[2] = _mm_add_epi32(v[2], x);
+    v[2] = _mm_add_epi32(v[2], rnding);
+    v[2] = _mm_srai_epi32(v[2], bit);
+
+    v[3] = _mm_mullo_epi32(u[2], cospi16);
+    x = _mm_mullo_epi32(u[3], cospi48);
+    v[3] = _mm_sub_epi32(x, v[3]);
+    v[3] = _mm_add_epi32(v[3], rnding);
+    v[3] = _mm_srai_epi32(v[3], bit);
+
+    v[4] = _mm_add_epi32(u[4], u[5]);
+    v[5] = _mm_sub_epi32(u[4], u[5]);
+    v[6] = _mm_sub_epi32(u[7], u[6]);
+    v[7] = _mm_add_epi32(u[7], u[6]);
+    v[8] = u[8];
+
+    v[9] = _mm_mullo_epi32(u[9], cospim16);
+    x = _mm_mullo_epi32(u[14], cospi48);
+    v[9] = _mm_add_epi32(v[9], x);
+    v[9] = _mm_add_epi32(v[9], rnding);
+    v[9] = _mm_srai_epi32(v[9], bit);
+
+    v[14] = _mm_mullo_epi32(u[9], cospi48);
+    x = _mm_mullo_epi32(u[14], cospim16);
+    v[14] = _mm_sub_epi32(v[14], x);
+    v[14] = _mm_add_epi32(v[14], rnding);
+    v[14] = _mm_srai_epi32(v[14], bit);
+
+    v[10] = _mm_mullo_epi32(u[10], cospim48);
+    x = _mm_mullo_epi32(u[13], cospim16);
+    v[10] = _mm_add_epi32(v[10], x);
+    v[10] = _mm_add_epi32(v[10], rnding);
+    v[10] = _mm_srai_epi32(v[10], bit);
+
+    v[13] = _mm_mullo_epi32(u[10], cospim16);
+    x = _mm_mullo_epi32(u[13], cospim48);
+    v[13] = _mm_sub_epi32(v[13], x);
+    v[13] = _mm_add_epi32(v[13], rnding);
+    v[13] = _mm_srai_epi32(v[13], bit);
+
+    v[11] = u[11];
+    v[12] = u[12];
+    v[15] = u[15];
+
+    // stage 5
+    u[0] = v[0];
+    u[1] = v[1];
+    u[2] = v[2];
+    u[3] = v[3];
+
+    u[4] = _mm_mullo_epi32(v[4], cospi56);
+    x = _mm_mullo_epi32(v[7], cospi8);
+    u[4] = _mm_add_epi32(u[4], x);
+    u[4] = _mm_add_epi32(u[4], rnding);
+    u[4] = _mm_srai_epi32(u[4], bit);
+
+    u[7] = _mm_mullo_epi32(v[4], cospi8);
+    x = _mm_mullo_epi32(v[7], cospi56);
+    u[7] = _mm_sub_epi32(x, u[7]);
+    u[7] = _mm_add_epi32(u[7], rnding);
+    u[7] = _mm_srai_epi32(u[7], bit);
+
+    u[5] = _mm_mullo_epi32(v[5], cospi24);
+    x = _mm_mullo_epi32(v[6], cospi40);
+    u[5] = _mm_add_epi32(u[5], x);
+    u[5] = _mm_add_epi32(u[5], rnding);
+    u[5] = _mm_srai_epi32(u[5], bit);
+
+    u[6] = _mm_mullo_epi32(v[5], cospi40);
+    x = _mm_mullo_epi32(v[6], cospi24);
+    u[6] = _mm_sub_epi32(x, u[6]);
+    u[6] = _mm_add_epi32(u[6], rnding);
+    u[6] = _mm_srai_epi32(u[6], bit);
+
+    u[8] = _mm_add_epi32(v[8], v[9]);
+    u[9] = _mm_sub_epi32(v[8], v[9]);
+    u[10] = _mm_sub_epi32(v[11], v[10]);
+    u[11] = _mm_add_epi32(v[11], v[10]);
+    u[12] = _mm_add_epi32(v[12], v[13]);
+    u[13] = _mm_sub_epi32(v[12], v[13]);
+    u[14] = _mm_sub_epi32(v[15], v[14]);
+    u[15] = _mm_add_epi32(v[15], v[14]);
+
+    // stage 6
+    v[0] = u[0];
+    v[1] = u[1];
+    v[2] = u[2];
+    v[3] = u[3];
+    v[4] = u[4];
+    v[5] = u[5];
+    v[6] = u[6];
+    v[7] = u[7];
+
+    v[8] = _mm_mullo_epi32(u[8], cospi60);
+    x = _mm_mullo_epi32(u[15], cospi4);
+    v[8] = _mm_add_epi32(v[8], x);
+    v[8] = _mm_add_epi32(v[8], rnding);
+    v[8] = _mm_srai_epi32(v[8], bit);
+
+    v[15] = _mm_mullo_epi32(u[8], cospi4);
+    x = _mm_mullo_epi32(u[15], cospi60);
+    v[15] = _mm_sub_epi32(x, v[15]);
+    v[15] = _mm_add_epi32(v[15], rnding);
+    v[15] = _mm_srai_epi32(v[15], bit);
+
+    v[9] = _mm_mullo_epi32(u[9], cospi28);
+    x = _mm_mullo_epi32(u[14], cospi36);
+    v[9] = _mm_add_epi32(v[9], x);
+    v[9] = _mm_add_epi32(v[9], rnding);
+    v[9] = _mm_srai_epi32(v[9], bit);
+
+    v[14] = _mm_mullo_epi32(u[9], cospi36);
+    x = _mm_mullo_epi32(u[14], cospi28);
+    v[14] = _mm_sub_epi32(x, v[14]);
+    v[14] = _mm_add_epi32(v[14], rnding);
+    v[14] = _mm_srai_epi32(v[14], bit);
+
+    v[10] = _mm_mullo_epi32(u[10], cospi44);
+    x = _mm_mullo_epi32(u[13], cospi20);
+    v[10] = _mm_add_epi32(v[10], x);
+    v[10] = _mm_add_epi32(v[10], rnding);
+    v[10] = _mm_srai_epi32(v[10], bit);
+
+    v[13] = _mm_mullo_epi32(u[10], cospi20);
+    x = _mm_mullo_epi32(u[13], cospi44);
+    v[13] = _mm_sub_epi32(x, v[13]);
+    v[13] = _mm_add_epi32(v[13], rnding);
+    v[13] = _mm_srai_epi32(v[13], bit);
+
+    v[11] = _mm_mullo_epi32(u[11], cospi12);
+    x = _mm_mullo_epi32(u[12], cospi52);
+    v[11] = _mm_add_epi32(v[11], x);
+    v[11] = _mm_add_epi32(v[11], rnding);
+    v[11] = _mm_srai_epi32(v[11], bit);
+
+    v[12] = _mm_mullo_epi32(u[11], cospi52);
+    x = _mm_mullo_epi32(u[12], cospi12);
+    v[12] = _mm_sub_epi32(x, v[12]);
+    v[12] = _mm_add_epi32(v[12], rnding);
+    v[12] = _mm_srai_epi32(v[12], bit);
+
+    out[0 * col_num + col] = v[0];
+    out[1 * col_num + col] = v[8];
+    out[2 * col_num + col] = v[4];
+    out[3 * col_num + col] = v[12];
+    out[4 * col_num + col] = v[2];
+    out[5 * col_num + col] = v[10];
+    out[6 * col_num + col] = v[6];
+    out[7 * col_num + col] = v[14];
+    out[8 * col_num + col] = v[1];
+    out[9 * col_num + col] = v[9];
+    out[10 * col_num + col] = v[5];
+    out[11 * col_num + col] = v[13];
+    out[12 * col_num + col] = v[3];
+    out[13 * col_num + col] = v[11];
+    out[14 * col_num + col] = v[7];
+    out[15 * col_num + col] = v[15];
+  }
+}
+
+static void fadst16x16_sse4_1(__m128i *in, __m128i *out, int bit) {
+  const int32_t *cospi = cospi_arr[bit - cos_bit_min];
+  const __m128i cospi2 = _mm_set1_epi32(cospi[2]);
+  const __m128i cospi62 = _mm_set1_epi32(cospi[62]);
+  const __m128i cospi10 = _mm_set1_epi32(cospi[10]);
+  const __m128i cospi54 = _mm_set1_epi32(cospi[54]);
+  const __m128i cospi18 = _mm_set1_epi32(cospi[18]);
+  const __m128i cospi46 = _mm_set1_epi32(cospi[46]);
+  const __m128i cospi26 = _mm_set1_epi32(cospi[26]);
+  const __m128i cospi38 = _mm_set1_epi32(cospi[38]);
+  const __m128i cospi34 = _mm_set1_epi32(cospi[34]);
+  const __m128i cospi30 = _mm_set1_epi32(cospi[30]);
+  const __m128i cospi42 = _mm_set1_epi32(cospi[42]);
+  const __m128i cospi22 = _mm_set1_epi32(cospi[22]);
+  const __m128i cospi50 = _mm_set1_epi32(cospi[50]);
+  const __m128i cospi14 = _mm_set1_epi32(cospi[14]);
+  const __m128i cospi58 = _mm_set1_epi32(cospi[58]);
+  const __m128i cospi6 = _mm_set1_epi32(cospi[6]);
+  const __m128i cospi8 = _mm_set1_epi32(cospi[8]);
+  const __m128i cospi56 = _mm_set1_epi32(cospi[56]);
+  const __m128i cospi40 = _mm_set1_epi32(cospi[40]);
+  const __m128i cospi24 = _mm_set1_epi32(cospi[24]);
+  const __m128i cospim56 = _mm_set1_epi32(-cospi[56]);
+  const __m128i cospim24 = _mm_set1_epi32(-cospi[24]);
+  const __m128i cospi48 = _mm_set1_epi32(cospi[48]);
+  const __m128i cospi16 = _mm_set1_epi32(cospi[16]);
+  const __m128i cospim48 = _mm_set1_epi32(-cospi[48]);
+  const __m128i cospi32 = _mm_set1_epi32(cospi[32]);
+  const __m128i rnding = _mm_set1_epi32(1 << (bit - 1));
+  __m128i u[16], v[16], x, y;
+  const int col_num = 4;
+  int col;
+
+  // Calculate the column 0, 1, 2, 3
+  for (col = 0; col < col_num; ++col) {
+    // stage 0
+    // stage 1
+    // stage 2
+    v[0] = _mm_mullo_epi32(in[15 * col_num + col], cospi2);
+    x = _mm_mullo_epi32(in[0 * col_num + col], cospi62);
+    v[0] = _mm_add_epi32(v[0], x);
+    v[0] = _mm_add_epi32(v[0], rnding);
+    v[0] = _mm_srai_epi32(v[0], bit);
+
+    v[1] = _mm_mullo_epi32(in[15 * col_num + col], cospi62);
+    x = _mm_mullo_epi32(in[0 * col_num + col], cospi2);
+    v[1] = _mm_sub_epi32(v[1], x);
+    v[1] = _mm_add_epi32(v[1], rnding);
+    v[1] = _mm_srai_epi32(v[1], bit);
+
+    v[2] = _mm_mullo_epi32(in[13 * col_num + col], cospi10);
+    x = _mm_mullo_epi32(in[2 * col_num + col], cospi54);
+    v[2] = _mm_add_epi32(v[2], x);
+    v[2] = _mm_add_epi32(v[2], rnding);
+    v[2] = _mm_srai_epi32(v[2], bit);
+
+    v[3] = _mm_mullo_epi32(in[13 * col_num + col], cospi54);
+    x = _mm_mullo_epi32(in[2 * col_num + col], cospi10);
+    v[3] = _mm_sub_epi32(v[3], x);
+    v[3] = _mm_add_epi32(v[3], rnding);
+    v[3] = _mm_srai_epi32(v[3], bit);
+
+    v[4] = _mm_mullo_epi32(in[11 * col_num + col], cospi18);
+    x = _mm_mullo_epi32(in[4 * col_num + col], cospi46);
+    v[4] = _mm_add_epi32(v[4], x);
+    v[4] = _mm_add_epi32(v[4], rnding);
+    v[4] = _mm_srai_epi32(v[4], bit);
+
+    v[5] = _mm_mullo_epi32(in[11 * col_num + col], cospi46);
+    x = _mm_mullo_epi32(in[4 * col_num + col], cospi18);
+    v[5] = _mm_sub_epi32(v[5], x);
+    v[5] = _mm_add_epi32(v[5], rnding);
+    v[5] = _mm_srai_epi32(v[5], bit);
+
+    v[6] = _mm_mullo_epi32(in[9 * col_num + col], cospi26);
+    x = _mm_mullo_epi32(in[6 * col_num + col], cospi38);
+    v[6] = _mm_add_epi32(v[6], x);
+    v[6] = _mm_add_epi32(v[6], rnding);
+    v[6] = _mm_srai_epi32(v[6], bit);
+
+    v[7] = _mm_mullo_epi32(in[9 * col_num + col], cospi38);
+    x = _mm_mullo_epi32(in[6 * col_num + col], cospi26);
+    v[7] = _mm_sub_epi32(v[7], x);
+    v[7] = _mm_add_epi32(v[7], rnding);
+    v[7] = _mm_srai_epi32(v[7], bit);
+
+    v[8] = _mm_mullo_epi32(in[7 * col_num + col], cospi34);
+    x = _mm_mullo_epi32(in[8 * col_num + col], cospi30);
+    v[8] = _mm_add_epi32(v[8], x);
+    v[8] = _mm_add_epi32(v[8], rnding);
+    v[8] = _mm_srai_epi32(v[8], bit);
+
+    v[9] = _mm_mullo_epi32(in[7 * col_num + col], cospi30);
+    x = _mm_mullo_epi32(in[8 * col_num + col], cospi34);
+    v[9] = _mm_sub_epi32(v[9], x);
+    v[9] = _mm_add_epi32(v[9], rnding);
+    v[9] = _mm_srai_epi32(v[9], bit);
+
+    v[10] = _mm_mullo_epi32(in[5 * col_num + col], cospi42);
+    x = _mm_mullo_epi32(in[10 * col_num + col], cospi22);
+    v[10] = _mm_add_epi32(v[10], x);
+    v[10] = _mm_add_epi32(v[10], rnding);
+    v[10] = _mm_srai_epi32(v[10], bit);
+
+    v[11] = _mm_mullo_epi32(in[5 * col_num + col], cospi22);
+    x = _mm_mullo_epi32(in[10 * col_num + col], cospi42);
+    v[11] = _mm_sub_epi32(v[11], x);
+    v[11] = _mm_add_epi32(v[11], rnding);
+    v[11] = _mm_srai_epi32(v[11], bit);
+
+    v[12] = _mm_mullo_epi32(in[3 * col_num + col], cospi50);
+    x = _mm_mullo_epi32(in[12 * col_num + col], cospi14);
+    v[12] = _mm_add_epi32(v[12], x);
+    v[12] = _mm_add_epi32(v[12], rnding);
+    v[12] = _mm_srai_epi32(v[12], bit);
+
+    v[13] = _mm_mullo_epi32(in[3 * col_num + col], cospi14);
+    x = _mm_mullo_epi32(in[12 * col_num + col], cospi50);
+    v[13] = _mm_sub_epi32(v[13], x);
+    v[13] = _mm_add_epi32(v[13], rnding);
+    v[13] = _mm_srai_epi32(v[13], bit);
+
+    v[14] = _mm_mullo_epi32(in[1 * col_num + col], cospi58);
+    x = _mm_mullo_epi32(in[14 * col_num + col], cospi6);
+    v[14] = _mm_add_epi32(v[14], x);
+    v[14] = _mm_add_epi32(v[14], rnding);
+    v[14] = _mm_srai_epi32(v[14], bit);
+
+    v[15] = _mm_mullo_epi32(in[1 * col_num + col], cospi6);
+    x = _mm_mullo_epi32(in[14 * col_num + col], cospi58);
+    v[15] = _mm_sub_epi32(v[15], x);
+    v[15] = _mm_add_epi32(v[15], rnding);
+    v[15] = _mm_srai_epi32(v[15], bit);
+
+    // stage 3
+    u[0] = _mm_add_epi32(v[0], v[8]);
+    u[8] = _mm_sub_epi32(v[0], v[8]);
+    u[1] = _mm_add_epi32(v[1], v[9]);
+    u[9] = _mm_sub_epi32(v[1], v[9]);
+    u[2] = _mm_add_epi32(v[2], v[10]);
+    u[10] = _mm_sub_epi32(v[2], v[10]);
+    u[3] = _mm_add_epi32(v[3], v[11]);
+    u[11] = _mm_sub_epi32(v[3], v[11]);
+    u[4] = _mm_add_epi32(v[4], v[12]);
+    u[12] = _mm_sub_epi32(v[4], v[12]);
+    u[5] = _mm_add_epi32(v[5], v[13]);
+    u[13] = _mm_sub_epi32(v[5], v[13]);
+    u[6] = _mm_add_epi32(v[6], v[14]);
+    u[14] = _mm_sub_epi32(v[6], v[14]);
+    u[7] = _mm_add_epi32(v[7], v[15]);
+    u[15] = _mm_sub_epi32(v[7], v[15]);
+
+    // stage 4
+    v[0] = u[0];
+    v[1] = u[1];
+    v[2] = u[2];
+    v[3] = u[3];
+    v[4] = u[4];
+    v[5] = u[5];
+    v[6] = u[6];
+    v[7] = u[7];
+
+    v[8] = _mm_mullo_epi32(u[8], cospi8);
+    x = _mm_mullo_epi32(u[9], cospi56);
+    v[8] = _mm_add_epi32(v[8], x);
+    v[8] = _mm_add_epi32(v[8], rnding);
+    v[8] = _mm_srai_epi32(v[8], bit);
+
+    v[9] = _mm_mullo_epi32(u[8], cospi56);
+    x = _mm_mullo_epi32(u[9], cospi8);
+    v[9] = _mm_sub_epi32(v[9], x);
+    v[9] = _mm_add_epi32(v[9], rnding);
+    v[9] = _mm_srai_epi32(v[9], bit);
+
+    v[10] = _mm_mullo_epi32(u[10], cospi40);
+    x = _mm_mullo_epi32(u[11], cospi24);
+    v[10] = _mm_add_epi32(v[10], x);
+    v[10] = _mm_add_epi32(v[10], rnding);
+    v[10] = _mm_srai_epi32(v[10], bit);
+
+    v[11] = _mm_mullo_epi32(u[10], cospi24);
+    x = _mm_mullo_epi32(u[11], cospi40);
+    v[11] = _mm_sub_epi32(v[11], x);
+    v[11] = _mm_add_epi32(v[11], rnding);
+    v[11] = _mm_srai_epi32(v[11], bit);
+
+    v[12] = _mm_mullo_epi32(u[12], cospim56);
+    x = _mm_mullo_epi32(u[13], cospi8);
+    v[12] = _mm_add_epi32(v[12], x);
+    v[12] = _mm_add_epi32(v[12], rnding);
+    v[12] = _mm_srai_epi32(v[12], bit);
+
+    v[13] = _mm_mullo_epi32(u[12], cospi8);
+    x = _mm_mullo_epi32(u[13], cospim56);
+    v[13] = _mm_sub_epi32(v[13], x);
+    v[13] = _mm_add_epi32(v[13], rnding);
+    v[13] = _mm_srai_epi32(v[13], bit);
+
+    v[14] = _mm_mullo_epi32(u[14], cospim24);
+    x = _mm_mullo_epi32(u[15], cospi40);
+    v[14] = _mm_add_epi32(v[14], x);
+    v[14] = _mm_add_epi32(v[14], rnding);
+    v[14] = _mm_srai_epi32(v[14], bit);
+
+    v[15] = _mm_mullo_epi32(u[14], cospi40);
+    x = _mm_mullo_epi32(u[15], cospim24);
+    v[15] = _mm_sub_epi32(v[15], x);
+    v[15] = _mm_add_epi32(v[15], rnding);
+    v[15] = _mm_srai_epi32(v[15], bit);
+
+    // stage 5
+    u[0] = _mm_add_epi32(v[0], v[4]);
+    u[4] = _mm_sub_epi32(v[0], v[4]);
+    u[1] = _mm_add_epi32(v[1], v[5]);
+    u[5] = _mm_sub_epi32(v[1], v[5]);
+    u[2] = _mm_add_epi32(v[2], v[6]);
+    u[6] = _mm_sub_epi32(v[2], v[6]);
+    u[3] = _mm_add_epi32(v[3], v[7]);
+    u[7] = _mm_sub_epi32(v[3], v[7]);
+    u[8] = _mm_add_epi32(v[8], v[12]);
+    u[12] = _mm_sub_epi32(v[8], v[12]);
+    u[9] = _mm_add_epi32(v[9], v[13]);
+    u[13] = _mm_sub_epi32(v[9], v[13]);
+    u[10] = _mm_add_epi32(v[10], v[14]);
+    u[14] = _mm_sub_epi32(v[10], v[14]);
+    u[11] = _mm_add_epi32(v[11], v[15]);
+    u[15] = _mm_sub_epi32(v[11], v[15]);
+
+    // stage 6
+    v[0] = u[0];
+    v[1] = u[1];
+    v[2] = u[2];
+    v[3] = u[3];
+
+    v[4] = _mm_mullo_epi32(u[4], cospi16);
+    x = _mm_mullo_epi32(u[5], cospi48);
+    v[4] = _mm_add_epi32(v[4], x);
+    v[4] = _mm_add_epi32(v[4], rnding);
+    v[4] = _mm_srai_epi32(v[4], bit);
+
+    v[5] = _mm_mullo_epi32(u[4], cospi48);
+    x = _mm_mullo_epi32(u[5], cospi16);
+    v[5] = _mm_sub_epi32(v[5], x);
+    v[5] = _mm_add_epi32(v[5], rnding);
+    v[5] = _mm_srai_epi32(v[5], bit);
+
+    v[6] = _mm_mullo_epi32(u[6], cospim48);
+    x = _mm_mullo_epi32(u[7], cospi16);
+    v[6] = _mm_add_epi32(v[6], x);
+    v[6] = _mm_add_epi32(v[6], rnding);
+    v[6] = _mm_srai_epi32(v[6], bit);
+
+    v[7] = _mm_mullo_epi32(u[6], cospi16);
+    x = _mm_mullo_epi32(u[7], cospim48);
+    v[7] = _mm_sub_epi32(v[7], x);
+    v[7] = _mm_add_epi32(v[7], rnding);
+    v[7] = _mm_srai_epi32(v[7], bit);
+
+    v[8] = u[8];
+    v[9] = u[9];
+    v[10] = u[10];
+    v[11] = u[11];
+
+    v[12] = _mm_mullo_epi32(u[12], cospi16);
+    x = _mm_mullo_epi32(u[13], cospi48);
+    v[12] = _mm_add_epi32(v[12], x);
+    v[12] = _mm_add_epi32(v[12], rnding);
+    v[12] = _mm_srai_epi32(v[12], bit);
+
+    v[13] = _mm_mullo_epi32(u[12], cospi48);
+    x = _mm_mullo_epi32(u[13], cospi16);
+    v[13] = _mm_sub_epi32(v[13], x);
+    v[13] = _mm_add_epi32(v[13], rnding);
+    v[13] = _mm_srai_epi32(v[13], bit);
+
+    v[14] = _mm_mullo_epi32(u[14], cospim48);
+    x = _mm_mullo_epi32(u[15], cospi16);
+    v[14] = _mm_add_epi32(v[14], x);
+    v[14] = _mm_add_epi32(v[14], rnding);
+    v[14] = _mm_srai_epi32(v[14], bit);
+
+    v[15] = _mm_mullo_epi32(u[14], cospi16);
+    x = _mm_mullo_epi32(u[15], cospim48);
+    v[15] = _mm_sub_epi32(v[15], x);
+    v[15] = _mm_add_epi32(v[15], rnding);
+    v[15] = _mm_srai_epi32(v[15], bit);
+
+    // stage 7
+    u[0] = _mm_add_epi32(v[0], v[2]);
+    u[2] = _mm_sub_epi32(v[0], v[2]);
+    u[1] = _mm_add_epi32(v[1], v[3]);
+    u[3] = _mm_sub_epi32(v[1], v[3]);
+    u[4] = _mm_add_epi32(v[4], v[6]);
+    u[6] = _mm_sub_epi32(v[4], v[6]);
+    u[5] = _mm_add_epi32(v[5], v[7]);
+    u[7] = _mm_sub_epi32(v[5], v[7]);
+    u[8] = _mm_add_epi32(v[8], v[10]);
+    u[10] = _mm_sub_epi32(v[8], v[10]);
+    u[9] = _mm_add_epi32(v[9], v[11]);
+    u[11] = _mm_sub_epi32(v[9], v[11]);
+    u[12] = _mm_add_epi32(v[12], v[14]);
+    u[14] = _mm_sub_epi32(v[12], v[14]);
+    u[13] = _mm_add_epi32(v[13], v[15]);
+    u[15] = _mm_sub_epi32(v[13], v[15]);
+
+    // stage 8
+    v[0] = u[0];
+    v[1] = u[1];
+
+    y = _mm_mullo_epi32(u[2], cospi32);
+    x = _mm_mullo_epi32(u[3], cospi32);
+    v[2] = _mm_add_epi32(y, x);
+    v[2] = _mm_add_epi32(v[2], rnding);
+    v[2] = _mm_srai_epi32(v[2], bit);
+
+    v[3] = _mm_sub_epi32(y, x);
+    v[3] = _mm_add_epi32(v[3], rnding);
+    v[3] = _mm_srai_epi32(v[3], bit);
+
+    v[4] = u[4];
+    v[5] = u[5];
+
+    y = _mm_mullo_epi32(u[6], cospi32);
+    x = _mm_mullo_epi32(u[7], cospi32);
+    v[6] = _mm_add_epi32(y, x);
+    v[6] = _mm_add_epi32(v[6], rnding);
+    v[6] = _mm_srai_epi32(v[6], bit);
+
+    v[7] = _mm_sub_epi32(y, x);
+    v[7] = _mm_add_epi32(v[7], rnding);
+    v[7] = _mm_srai_epi32(v[7], bit);
+
+    v[8] = u[8];
+    v[9] = u[9];
+
+    y = _mm_mullo_epi32(u[10], cospi32);
+    x = _mm_mullo_epi32(u[11], cospi32);
+    v[10] = _mm_add_epi32(y, x);
+    v[10] = _mm_add_epi32(v[10], rnding);
+    v[10] = _mm_srai_epi32(v[10], bit);
+
+    v[11] = _mm_sub_epi32(y, x);
+    v[11] = _mm_add_epi32(v[11], rnding);
+    v[11] = _mm_srai_epi32(v[11], bit);
+
+    v[12] = u[12];
+    v[13] = u[13];
+
+    y = _mm_mullo_epi32(u[14], cospi32);
+    x = _mm_mullo_epi32(u[15], cospi32);
+    v[14] = _mm_add_epi32(y, x);
+    v[14] = _mm_add_epi32(v[14], rnding);
+    v[14] = _mm_srai_epi32(v[14], bit);
+
+    v[15] = _mm_sub_epi32(y, x);
+    v[15] = _mm_add_epi32(v[15], rnding);
+    v[15] = _mm_srai_epi32(v[15], bit);
+
+    // stage 9
+    out[0 * col_num + col] = v[0];
+    out[1 * col_num + col] = _mm_sub_epi32(_mm_set1_epi32(0), v[8]);
+    out[2 * col_num + col] = v[12];
+    out[3 * col_num + col] = _mm_sub_epi32(_mm_set1_epi32(0), v[4]);
+    out[4 * col_num + col] = v[6];
+    out[5 * col_num + col] = _mm_sub_epi32(_mm_set1_epi32(0), v[14]);
+    out[6 * col_num + col] = v[10];
+    out[7 * col_num + col] = _mm_sub_epi32(_mm_set1_epi32(0), v[2]);
+    out[8 * col_num + col] = v[3];
+    out[9 * col_num + col] = _mm_sub_epi32(_mm_set1_epi32(0), v[11]);
+    out[10 * col_num + col] = v[15];
+    out[11 * col_num + col] = _mm_sub_epi32(_mm_set1_epi32(0), v[7]);
+    out[12 * col_num + col] = v[5];
+    out[13 * col_num + col] = _mm_sub_epi32(_mm_set1_epi32(0), v[13]);
+    out[14 * col_num + col] = v[9];
+    out[15 * col_num + col] = _mm_sub_epi32(_mm_set1_epi32(0), v[1]);
+  }
+}
+
+static void col_txfm_16x16_rounding(__m128i *in, int shift) {
+  // Note:
+  //  We split 16x16 rounding into 4 sections of 8x8 rounding,
+  //  instead of 4 columns
+  col_txfm_8x8_rounding(&in[0], shift);
+  col_txfm_8x8_rounding(&in[16], shift);
+  col_txfm_8x8_rounding(&in[32], shift);
+  col_txfm_8x8_rounding(&in[48], shift);
+}
+
+static void write_buffer_16x16(const __m128i *in, tran_low_t *output) {
+  const int size_8x8 = 16 * 4;
+  write_buffer_8x8(&in[0], output);
+  output += size_8x8;
+  write_buffer_8x8(&in[16], output);
+  output += size_8x8;
+  write_buffer_8x8(&in[32], output);
+  output += size_8x8;
+  write_buffer_8x8(&in[48], output);
+}
+
+void vp10_fwd_txfm2d_16x16_sse4_1(const int16_t *input, int32_t *coeff,
+                                  int stride, int tx_type, int bd) {
+  __m128i in[64], out[64];
+  const TXFM_2D_CFG *cfg = NULL;
+
+  switch (tx_type) {
+    case DCT_DCT:
+      cfg = &fwd_txfm_2d_cfg_dct_dct_16;
+      load_buffer_16x16(input, in, stride, 0, 0, cfg->shift[0]);
+      fdct16x16_sse4_1(in, out, cfg->cos_bit_col[0]);
+      col_txfm_16x16_rounding(out, -cfg->shift[1]);
+      transpose_16x16(out, in);
+      fdct16x16_sse4_1(in, out, cfg->cos_bit_row[0]);
+      transpose_16x16(out, in);
+      write_buffer_16x16(in, coeff);
+      break;
+    case ADST_DCT:
+      cfg = &fwd_txfm_2d_cfg_adst_dct_16;
+      load_buffer_16x16(input, in, stride, 0, 0, cfg->shift[0]);
+      fadst16x16_sse4_1(in, out, cfg->cos_bit_col[0]);
+      col_txfm_16x16_rounding(out, -cfg->shift[1]);
+      transpose_16x16(out, in);
+      fdct16x16_sse4_1(in, out, cfg->cos_bit_row[0]);
+      transpose_16x16(out, in);
+      write_buffer_16x16(in, coeff);
+      break;
+    case DCT_ADST:
+      cfg = &fwd_txfm_2d_cfg_dct_adst_16;
+      load_buffer_16x16(input, in, stride, 0, 0, cfg->shift[0]);
+      fdct16x16_sse4_1(in, out, cfg->cos_bit_col[0]);
+      col_txfm_16x16_rounding(out, -cfg->shift[1]);
+      transpose_16x16(out, in);
+      fadst16x16_sse4_1(in, out, cfg->cos_bit_row[0]);
+      transpose_16x16(out, in);
+      write_buffer_16x16(in, coeff);
+      break;
+    case ADST_ADST:
+      cfg = &fwd_txfm_2d_cfg_adst_adst_16;
+      load_buffer_16x16(input, in, stride, 0, 0, cfg->shift[0]);
+      fadst16x16_sse4_1(in, out, cfg->cos_bit_col[0]);
+      col_txfm_16x16_rounding(out, -cfg->shift[1]);
+      transpose_16x16(out, in);
+      fadst16x16_sse4_1(in, out, cfg->cos_bit_row[0]);
+      transpose_16x16(out, in);
+      write_buffer_16x16(in, coeff);
+      break;
+#if CONFIG_EXT_TX
+    case FLIPADST_DCT:
+      cfg = &fwd_txfm_2d_cfg_adst_dct_16;
+      load_buffer_16x16(input, in, stride, 1, 0, cfg->shift[0]);
+      fadst16x16_sse4_1(in, out, cfg->cos_bit_col[0]);
+      col_txfm_16x16_rounding(out, -cfg->shift[1]);
+      transpose_16x16(out, in);
+      fdct16x16_sse4_1(in, out, cfg->cos_bit_row[0]);
+      transpose_16x16(out, in);
+      write_buffer_16x16(in, coeff);
+      break;
+    case DCT_FLIPADST:
+      cfg = &fwd_txfm_2d_cfg_dct_adst_16;
+      load_buffer_16x16(input, in, stride, 0, 1, cfg->shift[0]);
+      fdct16x16_sse4_1(in, out, cfg->cos_bit_col[0]);
+      col_txfm_16x16_rounding(out, -cfg->shift[1]);
+      transpose_16x16(out, in);
+      fadst16x16_sse4_1(in, out, cfg->cos_bit_row[0]);
+      transpose_16x16(out, in);
+      write_buffer_16x16(in, coeff);
+      break;
+    case FLIPADST_FLIPADST:
+      cfg = &fwd_txfm_2d_cfg_adst_adst_16;
+      load_buffer_16x16(input, in, stride, 1, 1, cfg->shift[0]);
+      fadst16x16_sse4_1(in, out, cfg->cos_bit_col[0]);
+      col_txfm_16x16_rounding(out, -cfg->shift[1]);
+      transpose_16x16(out, in);
+      fadst16x16_sse4_1(in, out, cfg->cos_bit_row[0]);
+      transpose_16x16(out, in);
+      write_buffer_16x16(in, coeff);
+      break;
+    case ADST_FLIPADST:
+      cfg = &fwd_txfm_2d_cfg_adst_adst_16;
+      load_buffer_16x16(input, in, stride, 0, 1, cfg->shift[0]);
+      fadst16x16_sse4_1(in, out, cfg->cos_bit_col[0]);
+      col_txfm_16x16_rounding(out, -cfg->shift[1]);
+      transpose_16x16(out, in);
+      fadst16x16_sse4_1(in, out, cfg->cos_bit_row[0]);
+      transpose_16x16(out, in);
+      write_buffer_16x16(in, coeff);
+      break;
+    case FLIPADST_ADST:
+      cfg = &fwd_txfm_2d_cfg_adst_adst_16;
+      load_buffer_16x16(input, in, stride, 1, 0, cfg->shift[0]);
+      fadst16x16_sse4_1(in, out, cfg->cos_bit_col[0]);
+      col_txfm_16x16_rounding(out, -cfg->shift[1]);
+      transpose_16x16(out, in);
+      fadst16x16_sse4_1(in, out, cfg->cos_bit_row[0]);
+      transpose_16x16(out, in);
+      write_buffer_16x16(in, coeff);
+      break;
+#endif  // CONFIG_EXT_TX
+    default: assert(0);
+  }
+  (void)bd;
+}
diff --git a/av1/encoder/x86/quantize_sse2.c b/av1/encoder/x86/quantize_sse2.c
new file mode 100644
index 0000000..b8cd0c7
--- /dev/null
+++ b/av1/encoder/x86/quantize_sse2.c
@@ -0,0 +1,210 @@
+/*
+ *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <emmintrin.h>
+#include <xmmintrin.h>
+
+#include "./vp10_rtcd.h"
+#include "aom/vpx_integer.h"
+
+void vp10_quantize_fp_sse2(const int16_t *coeff_ptr, intptr_t n_coeffs,
+                           int skip_block, const int16_t *zbin_ptr,
+                           const int16_t *round_ptr, const int16_t *quant_ptr,
+                           const int16_t *quant_shift_ptr, int16_t *qcoeff_ptr,
+                           int16_t *dqcoeff_ptr, const int16_t *dequant_ptr,
+                           uint16_t *eob_ptr, const int16_t *scan_ptr,
+                           const int16_t *iscan_ptr) {
+  __m128i zero;
+  __m128i thr;
+  int16_t nzflag;
+  (void)scan_ptr;
+  (void)zbin_ptr;
+  (void)quant_shift_ptr;
+
+  coeff_ptr += n_coeffs;
+  iscan_ptr += n_coeffs;
+  qcoeff_ptr += n_coeffs;
+  dqcoeff_ptr += n_coeffs;
+  n_coeffs = -n_coeffs;
+  zero = _mm_setzero_si128();
+
+  if (!skip_block) {
+    __m128i eob;
+    __m128i round, quant, dequant;
+    {
+      __m128i coeff0, coeff1;
+
+      // Setup global values
+      {
+        round = _mm_load_si128((const __m128i *)round_ptr);
+        quant = _mm_load_si128((const __m128i *)quant_ptr);
+        dequant = _mm_load_si128((const __m128i *)dequant_ptr);
+      }
+
+      {
+        __m128i coeff0_sign, coeff1_sign;
+        __m128i qcoeff0, qcoeff1;
+        __m128i qtmp0, qtmp1;
+        // Do DC and first 15 AC
+        coeff0 = _mm_load_si128((const __m128i *)(coeff_ptr + n_coeffs));
+        coeff1 = _mm_load_si128((const __m128i *)(coeff_ptr + n_coeffs) + 1);
+
+        // Poor man's sign extract
+        coeff0_sign = _mm_srai_epi16(coeff0, 15);
+        coeff1_sign = _mm_srai_epi16(coeff1, 15);
+        qcoeff0 = _mm_xor_si128(coeff0, coeff0_sign);
+        qcoeff1 = _mm_xor_si128(coeff1, coeff1_sign);
+        qcoeff0 = _mm_sub_epi16(qcoeff0, coeff0_sign);
+        qcoeff1 = _mm_sub_epi16(qcoeff1, coeff1_sign);
+
+        qcoeff0 = _mm_adds_epi16(qcoeff0, round);
+        round = _mm_unpackhi_epi64(round, round);
+        qcoeff1 = _mm_adds_epi16(qcoeff1, round);
+        qtmp0 = _mm_mulhi_epi16(qcoeff0, quant);
+        quant = _mm_unpackhi_epi64(quant, quant);
+        qtmp1 = _mm_mulhi_epi16(qcoeff1, quant);
+
+        // Reinsert signs
+        qcoeff0 = _mm_xor_si128(qtmp0, coeff0_sign);
+        qcoeff1 = _mm_xor_si128(qtmp1, coeff1_sign);
+        qcoeff0 = _mm_sub_epi16(qcoeff0, coeff0_sign);
+        qcoeff1 = _mm_sub_epi16(qcoeff1, coeff1_sign);
+
+        _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs), qcoeff0);
+        _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs) + 1, qcoeff1);
+
+        coeff0 = _mm_mullo_epi16(qcoeff0, dequant);
+        dequant = _mm_unpackhi_epi64(dequant, dequant);
+        coeff1 = _mm_mullo_epi16(qcoeff1, dequant);
+
+        _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs), coeff0);
+        _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs) + 1, coeff1);
+      }
+
+      {
+        // Scan for eob
+        __m128i zero_coeff0, zero_coeff1;
+        __m128i nzero_coeff0, nzero_coeff1;
+        __m128i iscan0, iscan1;
+        __m128i eob1;
+        zero_coeff0 = _mm_cmpeq_epi16(coeff0, zero);
+        zero_coeff1 = _mm_cmpeq_epi16(coeff1, zero);
+        nzero_coeff0 = _mm_cmpeq_epi16(zero_coeff0, zero);
+        nzero_coeff1 = _mm_cmpeq_epi16(zero_coeff1, zero);
+        iscan0 = _mm_load_si128((const __m128i *)(iscan_ptr + n_coeffs));
+        iscan1 = _mm_load_si128((const __m128i *)(iscan_ptr + n_coeffs) + 1);
+        // Add one to convert from indices to counts
+        iscan0 = _mm_sub_epi16(iscan0, nzero_coeff0);
+        iscan1 = _mm_sub_epi16(iscan1, nzero_coeff1);
+        eob = _mm_and_si128(iscan0, nzero_coeff0);
+        eob1 = _mm_and_si128(iscan1, nzero_coeff1);
+        eob = _mm_max_epi16(eob, eob1);
+      }
+      n_coeffs += 8 * 2;
+    }
+
+    thr = _mm_srai_epi16(dequant, 1);
+
+    // AC only loop
+    while (n_coeffs < 0) {
+      __m128i coeff0, coeff1;
+      {
+        __m128i coeff0_sign, coeff1_sign;
+        __m128i qcoeff0, qcoeff1;
+        __m128i qtmp0, qtmp1;
+
+        coeff0 = _mm_load_si128((const __m128i *)(coeff_ptr + n_coeffs));
+        coeff1 = _mm_load_si128((const __m128i *)(coeff_ptr + n_coeffs) + 1);
+
+        // Poor man's sign extract
+        coeff0_sign = _mm_srai_epi16(coeff0, 15);
+        coeff1_sign = _mm_srai_epi16(coeff1, 15);
+        qcoeff0 = _mm_xor_si128(coeff0, coeff0_sign);
+        qcoeff1 = _mm_xor_si128(coeff1, coeff1_sign);
+        qcoeff0 = _mm_sub_epi16(qcoeff0, coeff0_sign);
+        qcoeff1 = _mm_sub_epi16(qcoeff1, coeff1_sign);
+
+        nzflag = _mm_movemask_epi8(_mm_cmpgt_epi16(qcoeff0, thr)) |
+                 _mm_movemask_epi8(_mm_cmpgt_epi16(qcoeff1, thr));
+
+        if (nzflag) {
+          qcoeff0 = _mm_adds_epi16(qcoeff0, round);
+          qcoeff1 = _mm_adds_epi16(qcoeff1, round);
+          qtmp0 = _mm_mulhi_epi16(qcoeff0, quant);
+          qtmp1 = _mm_mulhi_epi16(qcoeff1, quant);
+
+          // Reinsert signs
+          qcoeff0 = _mm_xor_si128(qtmp0, coeff0_sign);
+          qcoeff1 = _mm_xor_si128(qtmp1, coeff1_sign);
+          qcoeff0 = _mm_sub_epi16(qcoeff0, coeff0_sign);
+          qcoeff1 = _mm_sub_epi16(qcoeff1, coeff1_sign);
+
+          _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs), qcoeff0);
+          _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs) + 1, qcoeff1);
+
+          coeff0 = _mm_mullo_epi16(qcoeff0, dequant);
+          coeff1 = _mm_mullo_epi16(qcoeff1, dequant);
+
+          _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs), coeff0);
+          _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs) + 1, coeff1);
+        } else {
+          _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs), zero);
+          _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs) + 1, zero);
+
+          _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs), zero);
+          _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs) + 1, zero);
+        }
+      }
+
+      if (nzflag) {
+        // Scan for eob
+        __m128i zero_coeff0, zero_coeff1;
+        __m128i nzero_coeff0, nzero_coeff1;
+        __m128i iscan0, iscan1;
+        __m128i eob0, eob1;
+        zero_coeff0 = _mm_cmpeq_epi16(coeff0, zero);
+        zero_coeff1 = _mm_cmpeq_epi16(coeff1, zero);
+        nzero_coeff0 = _mm_cmpeq_epi16(zero_coeff0, zero);
+        nzero_coeff1 = _mm_cmpeq_epi16(zero_coeff1, zero);
+        iscan0 = _mm_load_si128((const __m128i *)(iscan_ptr + n_coeffs));
+        iscan1 = _mm_load_si128((const __m128i *)(iscan_ptr + n_coeffs) + 1);
+        // Add one to convert from indices to counts
+        iscan0 = _mm_sub_epi16(iscan0, nzero_coeff0);
+        iscan1 = _mm_sub_epi16(iscan1, nzero_coeff1);
+        eob0 = _mm_and_si128(iscan0, nzero_coeff0);
+        eob1 = _mm_and_si128(iscan1, nzero_coeff1);
+        eob0 = _mm_max_epi16(eob0, eob1);
+        eob = _mm_max_epi16(eob, eob0);
+      }
+      n_coeffs += 8 * 2;
+    }
+
+    // Accumulate EOB
+    {
+      __m128i eob_shuffled;
+      eob_shuffled = _mm_shuffle_epi32(eob, 0xe);
+      eob = _mm_max_epi16(eob, eob_shuffled);
+      eob_shuffled = _mm_shufflelo_epi16(eob, 0xe);
+      eob = _mm_max_epi16(eob, eob_shuffled);
+      eob_shuffled = _mm_shufflelo_epi16(eob, 0x1);
+      eob = _mm_max_epi16(eob, eob_shuffled);
+      *eob_ptr = _mm_extract_epi16(eob, 1);
+    }
+  } else {
+    do {
+      _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs), zero);
+      _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs) + 1, zero);
+      _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs), zero);
+      _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs) + 1, zero);
+      n_coeffs += 8 * 2;
+    } while (n_coeffs < 0);
+    *eob_ptr = 0;
+  }
+}
diff --git a/av1/encoder/x86/quantize_ssse3_x86_64.asm b/av1/encoder/x86/quantize_ssse3_x86_64.asm
new file mode 100644
index 0000000..b8fefa2
--- /dev/null
+++ b/av1/encoder/x86/quantize_ssse3_x86_64.asm
@@ -0,0 +1,201 @@
+;
+;  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+;  Use of this source code is governed by a BSD-style license
+;  that can be found in the LICENSE file in the root of the source
+;  tree. An additional intellectual property rights grant can be found
+;  in the file PATENTS.  All contributing project authors may
+;  be found in the AUTHORS file in the root of the source tree.
+;
+
+%define private_prefix vp10
+
+%include "third_party/x86inc/x86inc.asm"
+
+SECTION_RODATA
+pw_1: times 8 dw 1
+
+SECTION .text
+
+%macro QUANTIZE_FP 2
+cglobal quantize_%1, 0, %2, 15, coeff, ncoeff, skip, zbin, round, quant, \
+                                shift, qcoeff, dqcoeff, dequant, \
+                                eob, scan, iscan
+  cmp                    dword skipm, 0
+  jne .blank
+
+  ; actual quantize loop - setup pointers, rounders, etc.
+  movifnidn                   coeffq, coeffmp
+  movifnidn                  ncoeffq, ncoeffmp
+  mov                             r2, dequantmp
+  movifnidn                    zbinq, zbinmp
+  movifnidn                   roundq, roundmp
+  movifnidn                   quantq, quantmp
+  mova                            m1, [roundq]             ; m1 = round
+  mova                            m2, [quantq]             ; m2 = quant
+%ifidn %1, fp_32x32
+  pcmpeqw                         m5, m5
+  psrlw                           m5, 15
+  paddw                           m1, m5
+  psrlw                           m1, 1                    ; m1 = (m1 + 1) / 2
+%endif
+  mova                            m3, [r2q]                ; m3 = dequant
+  mov                             r3, qcoeffmp
+  mov                             r4, dqcoeffmp
+  mov                             r5, iscanmp
+%ifidn %1, fp_32x32
+  psllw                           m2, 1
+%endif
+  pxor                            m5, m5                   ; m5 = dedicated zero
+
+  lea                         coeffq, [  coeffq+ncoeffq*2]
+  lea                            r5q, [  r5q+ncoeffq*2]
+  lea                            r3q, [ r3q+ncoeffq*2]
+  lea                            r4q, [r4q+ncoeffq*2]
+  neg                        ncoeffq
+
+  ; get DC and first 15 AC coeffs
+  mova                            m9, [  coeffq+ncoeffq*2+ 0] ; m9 = c[i]
+  mova                           m10, [  coeffq+ncoeffq*2+16] ; m10 = c[i]
+  pabsw                           m6, m9                   ; m6 = abs(m9)
+  pabsw                          m11, m10                  ; m11 = abs(m10)
+  pcmpeqw                         m7, m7
+
+  paddsw                          m6, m1                   ; m6 += round
+  punpckhqdq                      m1, m1
+  paddsw                         m11, m1                   ; m11 += round
+  pmulhw                          m8, m6, m2               ; m8 = m6*q>>16
+  punpckhqdq                      m2, m2
+  pmulhw                         m13, m11, m2              ; m13 = m11*q>>16
+  psignw                          m8, m9                   ; m8 = reinsert sign
+  psignw                         m13, m10                  ; m13 = reinsert sign
+  mova            [r3q+ncoeffq*2+ 0], m8
+  mova            [r3q+ncoeffq*2+16], m13
+%ifidn %1, fp_32x32
+  pabsw                           m8, m8
+  pabsw                          m13, m13
+%endif
+  pmullw                          m8, m3                   ; r4[i] = r3[i] * q
+  punpckhqdq                      m3, m3
+  pmullw                         m13, m3                   ; r4[i] = r3[i] * q
+%ifidn %1, fp_32x32
+  psrlw                           m8, 1
+  psrlw                          m13, 1
+  psignw                          m8, m9
+  psignw                         m13, m10
+  psrlw                           m0, m3, 2
+%else
+  psrlw                           m0, m3, 1
+%endif
+  mova            [r4q+ncoeffq*2+ 0], m8
+  mova            [r4q+ncoeffq*2+16], m13
+  pcmpeqw                         m8, m5                   ; m8 = c[i] == 0
+  pcmpeqw                        m13, m5                   ; m13 = c[i] == 0
+  mova                            m6, [  r5q+ncoeffq*2+ 0] ; m6 = scan[i]
+  mova                           m11, [  r5q+ncoeffq*2+16] ; m11 = scan[i]
+  psubw                           m6, m7                   ; m6 = scan[i] + 1
+  psubw                          m11, m7                   ; m11 = scan[i] + 1
+  pandn                           m8, m6                   ; m8 = max(eob)
+  pandn                          m13, m11                  ; m13 = max(eob)
+  pmaxsw                          m8, m13
+  add                        ncoeffq, mmsize
+  jz .accumulate_eob
+
+.ac_only_loop:
+  mova                            m9, [  coeffq+ncoeffq*2+ 0] ; m9 = c[i]
+  mova                           m10, [  coeffq+ncoeffq*2+16] ; m10 = c[i]
+  pabsw                           m6, m9                   ; m6 = abs(m9)
+  pabsw                          m11, m10                  ; m11 = abs(m10)
+
+  pcmpgtw                         m7, m6,  m0
+  pcmpgtw                        m12, m11, m0
+  pmovmskb                       r6d, m7
+  pmovmskb                       r2d, m12
+
+  or                              r6, r2
+  jz .skip_iter
+
+  pcmpeqw                         m7, m7
+
+  paddsw                          m6, m1                   ; m6 += round
+  paddsw                         m11, m1                   ; m11 += round
+  pmulhw                         m14, m6, m2               ; m14 = m6*q>>16
+  pmulhw                         m13, m11, m2              ; m13 = m11*q>>16
+  psignw                         m14, m9                   ; m14 = reinsert sign
+  psignw                         m13, m10                  ; m13 = reinsert sign
+  mova            [r3q+ncoeffq*2+ 0], m14
+  mova            [r3q+ncoeffq*2+16], m13
+%ifidn %1, fp_32x32
+  pabsw                          m14, m14
+  pabsw                          m13, m13
+%endif
+  pmullw                         m14, m3                   ; r4[i] = r3[i] * q
+  pmullw                         m13, m3                   ; r4[i] = r3[i] * q
+%ifidn %1, fp_32x32
+  psrlw                          m14, 1
+  psrlw                          m13, 1
+  psignw                         m14, m9
+  psignw                         m13, m10
+%endif
+  mova            [r4q+ncoeffq*2+ 0], m14
+  mova            [r4q+ncoeffq*2+16], m13
+  pcmpeqw                        m14, m5                   ; m14 = c[i] == 0
+  pcmpeqw                        m13, m5                   ; m13 = c[i] == 0
+  mova                            m6, [  r5q+ncoeffq*2+ 0] ; m6 = scan[i]
+  mova                           m11, [  r5q+ncoeffq*2+16] ; m11 = scan[i]
+  psubw                           m6, m7                   ; m6 = scan[i] + 1
+  psubw                          m11, m7                   ; m11 = scan[i] + 1
+  pandn                          m14, m6                   ; m14 = max(eob)
+  pandn                          m13, m11                  ; m13 = max(eob)
+  pmaxsw                          m8, m14
+  pmaxsw                          m8, m13
+  add                        ncoeffq, mmsize
+  jl .ac_only_loop
+
+  jmp .accumulate_eob
+.skip_iter:
+  mova            [r3q+ncoeffq*2+ 0], m5
+  mova            [r3q+ncoeffq*2+16], m5
+  mova            [r4q+ncoeffq*2+ 0], m5
+  mova            [r4q+ncoeffq*2+16], m5
+  add                        ncoeffq, mmsize
+  jl .ac_only_loop
+
+.accumulate_eob:
+  ; horizontally accumulate/max eobs and write into [eob] memory pointer
+  mov                             r2, eobmp
+  pshufd                          m7, m8, 0xe
+  pmaxsw                          m8, m7
+  pshuflw                         m7, m8, 0xe
+  pmaxsw                          m8, m7
+  pshuflw                         m7, m8, 0x1
+  pmaxsw                          m8, m7
+  pextrw                          r6, m8, 0
+  mov                           [r2], r6
+  RET
+
+  ; skip-block, i.e. just write all zeroes
+.blank:
+  mov                             r0, dqcoeffmp
+  movifnidn                  ncoeffq, ncoeffmp
+  mov                             r2, qcoeffmp
+  mov                             r3, eobmp
+
+  lea                            r0q, [r0q+ncoeffq*2]
+  lea                            r2q, [r2q+ncoeffq*2]
+  neg                        ncoeffq
+  pxor                            m7, m7
+.blank_loop:
+  mova            [r0q+ncoeffq*2+ 0], m7
+  mova            [r0q+ncoeffq*2+16], m7
+  mova            [r2q+ncoeffq*2+ 0], m7
+  mova            [r2q+ncoeffq*2+16], m7
+  add                        ncoeffq, mmsize
+  jl .blank_loop
+  mov                     word [r3q], 0
+  RET
+%endmacro
+
+INIT_XMM ssse3
+QUANTIZE_FP fp, 7
+QUANTIZE_FP fp_32x32, 7
diff --git a/av1/encoder/x86/ssim_opt_x86_64.asm b/av1/encoder/x86/ssim_opt_x86_64.asm
new file mode 100644
index 0000000..29659ee
--- /dev/null
+++ b/av1/encoder/x86/ssim_opt_x86_64.asm
@@ -0,0 +1,216 @@
+;
+;  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+;  Use of this source code is governed by a BSD-style license
+;  that can be found in the LICENSE file in the root of the source
+;  tree. An additional intellectual property rights grant can be found
+;  in the file PATENTS.  All contributing project authors may
+;  be found in the AUTHORS file in the root of the source tree.
+;
+
+%include "aom_ports/x86_abi_support.asm"
+
+; tabulate_ssim - sums sum_s,sum_r,sum_sq_s,sum_sq_r, sum_sxr
+%macro TABULATE_SSIM 0
+        paddusw         xmm15, xmm3  ; sum_s
+        paddusw         xmm14, xmm4  ; sum_r
+        movdqa          xmm1, xmm3
+        pmaddwd         xmm1, xmm1
+        paddd           xmm13, xmm1 ; sum_sq_s
+        movdqa          xmm2, xmm4
+        pmaddwd         xmm2, xmm2
+        paddd           xmm12, xmm2 ; sum_sq_r
+        pmaddwd         xmm3, xmm4
+        paddd           xmm11, xmm3  ; sum_sxr
+%endmacro
+
+; Sum across the register %1 starting with q words
+%macro SUM_ACROSS_Q 1
+        movdqa          xmm2,%1
+        punpckldq       %1,xmm0
+        punpckhdq       xmm2,xmm0
+        paddq           %1,xmm2
+        movdqa          xmm2,%1
+        punpcklqdq      %1,xmm0
+        punpckhqdq      xmm2,xmm0
+        paddq           %1,xmm2
+%endmacro
+
+; Sum across the register %1 starting with q words
+%macro SUM_ACROSS_W 1
+        movdqa          xmm1, %1
+        punpcklwd       %1,xmm0
+        punpckhwd       xmm1,xmm0
+        paddd           %1, xmm1
+        SUM_ACROSS_Q    %1
+%endmacro
+;void ssim_parms_sse2(
+;    unsigned char *s,
+;    int sp,
+;    unsigned char *r,
+;    int rp
+;    unsigned long *sum_s,
+;    unsigned long *sum_r,
+;    unsigned long *sum_sq_s,
+;    unsigned long *sum_sq_r,
+;    unsigned long *sum_sxr);
+;
+; TODO: Use parm passing through structure, probably don't need the pxors
+; ( calling app will initialize to 0 ) could easily fit everything in sse2
+; without too much hastle, and can probably do better estimates with psadw
+; or pavgb At this point this is just meant to be first pass for calculating
+; all the parms needed for 16x16 ssim so we can play with dssim as distortion
+; in mode selection code.
+global sym(vp10_ssim_parms_16x16_sse2) PRIVATE
+sym(vp10_ssim_parms_16x16_sse2):
+    push        rbp
+    mov         rbp, rsp
+    SHADOW_ARGS_TO_STACK 9
+    SAVE_XMM 15
+    push        rsi
+    push        rdi
+    ; end prolog
+
+    mov             rsi,        arg(0) ;s
+    mov             rcx,        arg(1) ;sp
+    mov             rdi,        arg(2) ;r
+    mov             rax,        arg(3) ;rp
+
+    pxor            xmm0, xmm0
+    pxor            xmm15,xmm15  ;sum_s
+    pxor            xmm14,xmm14  ;sum_r
+    pxor            xmm13,xmm13  ;sum_sq_s
+    pxor            xmm12,xmm12  ;sum_sq_r
+    pxor            xmm11,xmm11  ;sum_sxr
+
+    mov             rdx, 16      ;row counter
+.NextRow:
+
+    ;grab source and reference pixels
+    movdqu          xmm5, [rsi]
+    movdqu          xmm6, [rdi]
+    movdqa          xmm3, xmm5
+    movdqa          xmm4, xmm6
+    punpckhbw       xmm3, xmm0 ; high_s
+    punpckhbw       xmm4, xmm0 ; high_r
+
+    TABULATE_SSIM
+
+    movdqa          xmm3, xmm5
+    movdqa          xmm4, xmm6
+    punpcklbw       xmm3, xmm0 ; low_s
+    punpcklbw       xmm4, xmm0 ; low_r
+
+    TABULATE_SSIM
+
+    add             rsi, rcx   ; next s row
+    add             rdi, rax   ; next r row
+
+    dec             rdx        ; counter
+    jnz .NextRow
+
+    SUM_ACROSS_W    xmm15
+    SUM_ACROSS_W    xmm14
+    SUM_ACROSS_Q    xmm13
+    SUM_ACROSS_Q    xmm12
+    SUM_ACROSS_Q    xmm11
+
+    mov             rdi,arg(4)
+    movd            [rdi], xmm15;
+    mov             rdi,arg(5)
+    movd            [rdi], xmm14;
+    mov             rdi,arg(6)
+    movd            [rdi], xmm13;
+    mov             rdi,arg(7)
+    movd            [rdi], xmm12;
+    mov             rdi,arg(8)
+    movd            [rdi], xmm11;
+
+    ; begin epilog
+    pop         rdi
+    pop         rsi
+    RESTORE_XMM
+    UNSHADOW_ARGS
+    pop         rbp
+    ret
+
+;void ssim_parms_sse2(
+;    unsigned char *s,
+;    int sp,
+;    unsigned char *r,
+;    int rp
+;    unsigned long *sum_s,
+;    unsigned long *sum_r,
+;    unsigned long *sum_sq_s,
+;    unsigned long *sum_sq_r,
+;    unsigned long *sum_sxr);
+;
+; TODO: Use parm passing through structure, probably don't need the pxors
+; ( calling app will initialize to 0 ) could easily fit everything in sse2
+; without too much hastle, and can probably do better estimates with psadw
+; or pavgb At this point this is just meant to be first pass for calculating
+; all the parms needed for 16x16 ssim so we can play with dssim as distortion
+; in mode selection code.
+global sym(vp10_ssim_parms_8x8_sse2) PRIVATE
+sym(vp10_ssim_parms_8x8_sse2):
+    push        rbp
+    mov         rbp, rsp
+    SHADOW_ARGS_TO_STACK 9
+    SAVE_XMM 15
+    push        rsi
+    push        rdi
+    ; end prolog
+
+    mov             rsi,        arg(0) ;s
+    mov             rcx,        arg(1) ;sp
+    mov             rdi,        arg(2) ;r
+    mov             rax,        arg(3) ;rp
+
+    pxor            xmm0, xmm0
+    pxor            xmm15,xmm15  ;sum_s
+    pxor            xmm14,xmm14  ;sum_r
+    pxor            xmm13,xmm13  ;sum_sq_s
+    pxor            xmm12,xmm12  ;sum_sq_r
+    pxor            xmm11,xmm11  ;sum_sxr
+
+    mov             rdx, 8      ;row counter
+.NextRow:
+
+    ;grab source and reference pixels
+    movq            xmm3, [rsi]
+    movq            xmm4, [rdi]
+    punpcklbw       xmm3, xmm0 ; low_s
+    punpcklbw       xmm4, xmm0 ; low_r
+
+    TABULATE_SSIM
+
+    add             rsi, rcx   ; next s row
+    add             rdi, rax   ; next r row
+
+    dec             rdx        ; counter
+    jnz .NextRow
+
+    SUM_ACROSS_W    xmm15
+    SUM_ACROSS_W    xmm14
+    SUM_ACROSS_Q    xmm13
+    SUM_ACROSS_Q    xmm12
+    SUM_ACROSS_Q    xmm11
+
+    mov             rdi,arg(4)
+    movd            [rdi], xmm15;
+    mov             rdi,arg(5)
+    movd            [rdi], xmm14;
+    mov             rdi,arg(6)
+    movd            [rdi], xmm13;
+    mov             rdi,arg(7)
+    movd            [rdi], xmm12;
+    mov             rdi,arg(8)
+    movd            [rdi], xmm11;
+
+    ; begin epilog
+    pop         rdi
+    pop         rsi
+    RESTORE_XMM
+    UNSHADOW_ARGS
+    pop         rbp
+    ret
diff --git a/av1/encoder/x86/temporal_filter_apply_sse2.asm b/av1/encoder/x86/temporal_filter_apply_sse2.asm
new file mode 100644
index 0000000..eabe575
--- /dev/null
+++ b/av1/encoder/x86/temporal_filter_apply_sse2.asm
@@ -0,0 +1,212 @@
+;
+;  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+;  Use of this source code is governed by a BSD-style license
+;  that can be found in the LICENSE file in the root of the source
+;  tree. An additional intellectual property rights grant can be found
+;  in the file PATENTS.  All contributing project authors may
+;  be found in the AUTHORS file in the root of the source tree.
+;
+
+
+%include "aom_ports/x86_abi_support.asm"
+
+; void vp10_temporal_filter_apply_sse2 | arg
+;  (unsigned char  *frame1,           |  0
+;   unsigned int    stride,           |  1
+;   unsigned char  *frame2,           |  2
+;   unsigned int    block_width,      |  3
+;   unsigned int    block_height,     |  4
+;   int             strength,         |  5
+;   int             filter_weight,    |  6
+;   unsigned int   *accumulator,      |  7
+;   unsigned short *count)            |  8
+global sym(vp10_temporal_filter_apply_sse2) PRIVATE
+sym(vp10_temporal_filter_apply_sse2):
+
+    push        rbp
+    mov         rbp, rsp
+    SHADOW_ARGS_TO_STACK 9
+    SAVE_XMM 7
+    GET_GOT     rbx
+    push        rsi
+    push        rdi
+    ALIGN_STACK 16, rax
+    %define block_width    0
+    %define block_height  16
+    %define strength      32
+    %define filter_weight 48
+    %define rounding_bit  64
+    %define rbp_backup    80
+    %define stack_size    96
+    sub         rsp,           stack_size
+    mov         [rsp + rbp_backup], rbp
+    ; end prolog
+
+        mov         edx,            arg(3)
+        mov         [rsp + block_width], rdx
+        mov         edx,            arg(4)
+        mov         [rsp + block_height], rdx
+        movd        xmm6,           arg(5)
+        movdqa      [rsp + strength], xmm6 ; where strength is used, all 16 bytes are read
+
+        ; calculate the rounding bit outside the loop
+        ; 0x8000 >> (16 - strength)
+        mov         rdx,            16
+        sub         rdx,            arg(5) ; 16 - strength
+        movq        xmm4,           rdx    ; can't use rdx w/ shift
+        movdqa      xmm5,           [GLOBAL(_const_top_bit)]
+        psrlw       xmm5,           xmm4
+        movdqa      [rsp + rounding_bit], xmm5
+
+        mov         rsi,            arg(0) ; src/frame1
+        mov         rdx,            arg(2) ; predictor frame
+        mov         rdi,            arg(7) ; accumulator
+        mov         rax,            arg(8) ; count
+
+        ; dup the filter weight and store for later
+        movd        xmm0,           arg(6) ; filter_weight
+        pshuflw     xmm0,           xmm0, 0
+        punpcklwd   xmm0,           xmm0
+        movdqa      [rsp + filter_weight], xmm0
+
+        mov         rbp,            arg(1) ; stride
+        pxor        xmm7,           xmm7   ; zero for extraction
+
+        mov         rcx,            [rsp + block_width]
+        imul        rcx,            [rsp + block_height]
+        add         rcx,            rdx
+        cmp         dword ptr [rsp + block_width], 8
+        jne         .temporal_filter_apply_load_16
+
+.temporal_filter_apply_load_8:
+        movq        xmm0,           [rsi]  ; first row
+        lea         rsi,            [rsi + rbp] ; += stride
+        punpcklbw   xmm0,           xmm7   ; src[ 0- 7]
+        movq        xmm1,           [rsi]  ; second row
+        lea         rsi,            [rsi + rbp] ; += stride
+        punpcklbw   xmm1,           xmm7   ; src[ 8-15]
+        jmp         .temporal_filter_apply_load_finished
+
+.temporal_filter_apply_load_16:
+        movdqa      xmm0,           [rsi]  ; src (frame1)
+        lea         rsi,            [rsi + rbp] ; += stride
+        movdqa      xmm1,           xmm0
+        punpcklbw   xmm0,           xmm7   ; src[ 0- 7]
+        punpckhbw   xmm1,           xmm7   ; src[ 8-15]
+
+.temporal_filter_apply_load_finished:
+        movdqa      xmm2,           [rdx]  ; predictor (frame2)
+        movdqa      xmm3,           xmm2
+        punpcklbw   xmm2,           xmm7   ; pred[ 0- 7]
+        punpckhbw   xmm3,           xmm7   ; pred[ 8-15]
+
+        ; modifier = src_byte - pixel_value
+        psubw       xmm0,           xmm2   ; src - pred[ 0- 7]
+        psubw       xmm1,           xmm3   ; src - pred[ 8-15]
+
+        ; modifier *= modifier
+        pmullw      xmm0,           xmm0   ; modifer[ 0- 7]^2
+        pmullw      xmm1,           xmm1   ; modifer[ 8-15]^2
+
+        ; modifier *= 3
+        pmullw      xmm0,           [GLOBAL(_const_3w)]
+        pmullw      xmm1,           [GLOBAL(_const_3w)]
+
+        ; modifer += 0x8000 >> (16 - strength)
+        paddw       xmm0,           [rsp + rounding_bit]
+        paddw       xmm1,           [rsp + rounding_bit]
+
+        ; modifier >>= strength
+        psrlw       xmm0,           [rsp + strength]
+        psrlw       xmm1,           [rsp + strength]
+
+        ; modifier = 16 - modifier
+        ; saturation takes care of modifier > 16
+        movdqa      xmm3,           [GLOBAL(_const_16w)]
+        movdqa      xmm2,           [GLOBAL(_const_16w)]
+        psubusw     xmm3,           xmm1
+        psubusw     xmm2,           xmm0
+
+        ; modifier *= filter_weight
+        pmullw      xmm2,           [rsp + filter_weight]
+        pmullw      xmm3,           [rsp + filter_weight]
+
+        ; count
+        movdqa      xmm4,           [rax]
+        movdqa      xmm5,           [rax+16]
+        ; += modifier
+        paddw       xmm4,           xmm2
+        paddw       xmm5,           xmm3
+        ; write back
+        movdqa      [rax],          xmm4
+        movdqa      [rax+16],       xmm5
+        lea         rax,            [rax + 16*2] ; count += 16*(sizeof(short))
+
+        ; load and extract the predictor up to shorts
+        pxor        xmm7,           xmm7
+        movdqa      xmm0,           [rdx]
+        lea         rdx,            [rdx + 16*1] ; pred += 16*(sizeof(char))
+        movdqa      xmm1,           xmm0
+        punpcklbw   xmm0,           xmm7   ; pred[ 0- 7]
+        punpckhbw   xmm1,           xmm7   ; pred[ 8-15]
+
+        ; modifier *= pixel_value
+        pmullw      xmm0,           xmm2
+        pmullw      xmm1,           xmm3
+
+        ; expand to double words
+        movdqa      xmm2,           xmm0
+        punpcklwd   xmm0,           xmm7   ; [ 0- 3]
+        punpckhwd   xmm2,           xmm7   ; [ 4- 7]
+        movdqa      xmm3,           xmm1
+        punpcklwd   xmm1,           xmm7   ; [ 8-11]
+        punpckhwd   xmm3,           xmm7   ; [12-15]
+
+        ; accumulator
+        movdqa      xmm4,           [rdi]
+        movdqa      xmm5,           [rdi+16]
+        movdqa      xmm6,           [rdi+32]
+        movdqa      xmm7,           [rdi+48]
+        ; += modifier
+        paddd       xmm4,           xmm0
+        paddd       xmm5,           xmm2
+        paddd       xmm6,           xmm1
+        paddd       xmm7,           xmm3
+        ; write back
+        movdqa      [rdi],          xmm4
+        movdqa      [rdi+16],       xmm5
+        movdqa      [rdi+32],       xmm6
+        movdqa      [rdi+48],       xmm7
+        lea         rdi,            [rdi + 16*4] ; accumulator += 16*(sizeof(int))
+
+        cmp         rdx,            rcx
+        je          .temporal_filter_apply_epilog
+        pxor        xmm7,           xmm7   ; zero for extraction
+        cmp         dword ptr [rsp + block_width], 16
+        je          .temporal_filter_apply_load_16
+        jmp         .temporal_filter_apply_load_8
+
+.temporal_filter_apply_epilog:
+    ; begin epilog
+    mov         rbp,            [rsp + rbp_backup]
+    add         rsp,            stack_size
+    pop         rsp
+    pop         rdi
+    pop         rsi
+    RESTORE_GOT
+    RESTORE_XMM
+    UNSHADOW_ARGS
+    pop         rbp
+    ret
+
+SECTION_RODATA
+align 16
+_const_3w:
+    times 8 dw 3
+align 16
+_const_top_bit:
+    times 8 dw 1<<15
+align 16
+_const_16w
+    times 8 dw 16
diff --git a/av1/encoder/x86/vp10_highbd_quantize_sse4.c b/av1/encoder/x86/vp10_highbd_quantize_sse4.c
new file mode 100644
index 0000000..8b05c6a
--- /dev/null
+++ b/av1/encoder/x86/vp10_highbd_quantize_sse4.c
@@ -0,0 +1,192 @@
+/*
+ *  Copyright (c) 2016 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <smmintrin.h>
+#include <stdint.h>
+
+#include "./vp10_rtcd.h"
+#include "aom_dsp/vpx_dsp_common.h"
+
+// Coefficient quantization phase 1
+// param[0-2] : rounding/quan/dequan constants
+static INLINE void quantize_coeff_phase1(__m128i *coeff, const __m128i *param,
+                                         const int shift, const int scale,
+                                         __m128i *qcoeff, __m128i *dquan,
+                                         __m128i *sign) {
+  const __m128i zero = _mm_setzero_si128();
+  const __m128i one = _mm_set1_epi32(1);
+
+  *sign = _mm_cmplt_epi32(*coeff, zero);
+  *sign = _mm_or_si128(*sign, one);
+  *coeff = _mm_abs_epi32(*coeff);
+
+  qcoeff[0] = _mm_add_epi32(*coeff, param[0]);
+  qcoeff[1] = _mm_unpackhi_epi32(qcoeff[0], zero);
+  qcoeff[0] = _mm_unpacklo_epi32(qcoeff[0], zero);
+
+  qcoeff[0] = _mm_mul_epi32(qcoeff[0], param[1]);
+  qcoeff[0] = _mm_srli_epi64(qcoeff[0], shift);
+  dquan[0] = _mm_mul_epi32(qcoeff[0], param[2]);
+  dquan[0] = _mm_srli_epi64(dquan[0], scale);
+}
+
+// Coefficient quantization phase 2
+static INLINE void quantize_coeff_phase2(__m128i *qcoeff, __m128i *dquan,
+                                         const __m128i *sign,
+                                         const __m128i *param, const int shift,
+                                         const int scale, tran_low_t *qAddr,
+                                         tran_low_t *dqAddr) {
+  __m128i mask0L = _mm_set_epi32(-1, -1, 0, 0);
+  __m128i mask0H = _mm_set_epi32(0, 0, -1, -1);
+
+  qcoeff[1] = _mm_mul_epi32(qcoeff[1], param[1]);
+  qcoeff[1] = _mm_srli_epi64(qcoeff[1], shift);
+  dquan[1] = _mm_mul_epi32(qcoeff[1], param[2]);
+  dquan[1] = _mm_srli_epi64(dquan[1], scale);
+
+  // combine L&H
+  qcoeff[0] = _mm_shuffle_epi32(qcoeff[0], 0xd8);
+  qcoeff[1] = _mm_shuffle_epi32(qcoeff[1], 0x8d);
+
+  qcoeff[0] = _mm_and_si128(qcoeff[0], mask0H);
+  qcoeff[1] = _mm_and_si128(qcoeff[1], mask0L);
+
+  dquan[0] = _mm_shuffle_epi32(dquan[0], 0xd8);
+  dquan[1] = _mm_shuffle_epi32(dquan[1], 0x8d);
+
+  dquan[0] = _mm_and_si128(dquan[0], mask0H);
+  dquan[1] = _mm_and_si128(dquan[1], mask0L);
+
+  qcoeff[0] = _mm_or_si128(qcoeff[0], qcoeff[1]);
+  dquan[0] = _mm_or_si128(dquan[0], dquan[1]);
+
+  qcoeff[0] = _mm_sign_epi32(qcoeff[0], *sign);
+  dquan[0] = _mm_sign_epi32(dquan[0], *sign);
+
+  _mm_storeu_si128((__m128i *)qAddr, qcoeff[0]);
+  _mm_storeu_si128((__m128i *)dqAddr, dquan[0]);
+}
+
+static INLINE void find_eob(tran_low_t *qcoeff_ptr, const int16_t *iscan,
+                            __m128i *eob) {
+  const __m128i zero = _mm_setzero_si128();
+  __m128i mask, iscanIdx;
+  const __m128i q0 = _mm_loadu_si128((__m128i const *)qcoeff_ptr);
+  const __m128i q1 = _mm_loadu_si128((__m128i const *)(qcoeff_ptr + 4));
+  __m128i nz_flag0 = _mm_cmpeq_epi32(q0, zero);
+  __m128i nz_flag1 = _mm_cmpeq_epi32(q1, zero);
+
+  nz_flag0 = _mm_cmpeq_epi32(nz_flag0, zero);
+  nz_flag1 = _mm_cmpeq_epi32(nz_flag1, zero);
+
+  mask = _mm_packs_epi32(nz_flag0, nz_flag1);
+  iscanIdx = _mm_loadu_si128((__m128i const *)iscan);
+  iscanIdx = _mm_sub_epi16(iscanIdx, mask);
+  iscanIdx = _mm_and_si128(iscanIdx, mask);
+  *eob = _mm_max_epi16(*eob, iscanIdx);
+}
+
+static INLINE uint16_t get_accumulated_eob(__m128i *eob) {
+  __m128i eob_shuffled;
+  uint16_t eobValue;
+  eob_shuffled = _mm_shuffle_epi32(*eob, 0xe);
+  *eob = _mm_max_epi16(*eob, eob_shuffled);
+  eob_shuffled = _mm_shufflelo_epi16(*eob, 0xe);
+  *eob = _mm_max_epi16(*eob, eob_shuffled);
+  eob_shuffled = _mm_shufflelo_epi16(*eob, 0x1);
+  *eob = _mm_max_epi16(*eob, eob_shuffled);
+  eobValue = _mm_extract_epi16(*eob, 0);
+  return eobValue;
+}
+
+void vp10_highbd_quantize_fp_sse4_1(
+    const tran_low_t *coeff_ptr, intptr_t count, int skip_block,
+    const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr,
+    const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr,
+    tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr,
+    const int16_t *scan, const int16_t *iscan, int log_scale) {
+  __m128i coeff[2], qcoeff[2], dequant[2], qparam[3], coeff_sign;
+  __m128i eob = _mm_setzero_si128();
+  const tran_low_t *src = coeff_ptr;
+  tran_low_t *quanAddr = qcoeff_ptr;
+  tran_low_t *dquanAddr = dqcoeff_ptr;
+  const int shift = 16 - log_scale;
+  const int coeff_stride = 4;
+  const int quan_stride = coeff_stride;
+  (void)skip_block;
+  (void)zbin_ptr;
+  (void)quant_shift_ptr;
+  (void)scan;
+
+  memset(quanAddr, 0, count * sizeof(quanAddr[0]));
+  memset(dquanAddr, 0, count * sizeof(dquanAddr[0]));
+
+  if (!skip_block) {
+    coeff[0] = _mm_loadu_si128((__m128i const *)src);
+
+    qparam[0] =
+        _mm_set_epi32(round_ptr[1], round_ptr[1], round_ptr[1], round_ptr[0]);
+    qparam[1] = _mm_set_epi64x(quant_ptr[1], quant_ptr[0]);
+    qparam[2] = _mm_set_epi64x(dequant_ptr[1], dequant_ptr[0]);
+
+    // DC and first 3 AC
+    quantize_coeff_phase1(&coeff[0], qparam, shift, log_scale, qcoeff, dequant,
+                          &coeff_sign);
+
+    // update round/quan/dquan for AC
+    qparam[0] = _mm_unpackhi_epi64(qparam[0], qparam[0]);
+    qparam[1] = _mm_set_epi64x(quant_ptr[1], quant_ptr[1]);
+    qparam[2] = _mm_set_epi64x(dequant_ptr[1], dequant_ptr[1]);
+
+    quantize_coeff_phase2(qcoeff, dequant, &coeff_sign, qparam, shift,
+                          log_scale, quanAddr, dquanAddr);
+
+    // next 4 AC
+    coeff[1] = _mm_loadu_si128((__m128i const *)(src + coeff_stride));
+    quantize_coeff_phase1(&coeff[1], qparam, shift, log_scale, qcoeff, dequant,
+                          &coeff_sign);
+    quantize_coeff_phase2(qcoeff, dequant, &coeff_sign, qparam, shift,
+                          log_scale, quanAddr + quan_stride,
+                          dquanAddr + quan_stride);
+
+    find_eob(quanAddr, iscan, &eob);
+
+    count -= 8;
+
+    // loop for the rest of AC
+    while (count > 0) {
+      src += coeff_stride << 1;
+      quanAddr += quan_stride << 1;
+      dquanAddr += quan_stride << 1;
+      iscan += quan_stride << 1;
+
+      coeff[0] = _mm_loadu_si128((__m128i const *)src);
+      coeff[1] = _mm_loadu_si128((__m128i const *)(src + coeff_stride));
+
+      quantize_coeff_phase1(&coeff[0], qparam, shift, log_scale, qcoeff,
+                            dequant, &coeff_sign);
+      quantize_coeff_phase2(qcoeff, dequant, &coeff_sign, qparam, shift,
+                            log_scale, quanAddr, dquanAddr);
+
+      quantize_coeff_phase1(&coeff[1], qparam, shift, log_scale, qcoeff,
+                            dequant, &coeff_sign);
+      quantize_coeff_phase2(qcoeff, dequant, &coeff_sign, qparam, shift,
+                            log_scale, quanAddr + quan_stride,
+                            dquanAddr + quan_stride);
+
+      find_eob(quanAddr, iscan, &eob);
+
+      count -= 8;
+    }
+    *eob_ptr = get_accumulated_eob(&eob);
+  } else {
+    *eob_ptr = 0;
+  }
+}
diff --git a/av1/encoder/x86/wedge_utils_sse2.c b/av1/encoder/x86/wedge_utils_sse2.c
new file mode 100644
index 0000000..a6be947
--- /dev/null
+++ b/av1/encoder/x86/wedge_utils_sse2.c
@@ -0,0 +1,253 @@
+/*
+ *  Copyright (c) 2016 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include <immintrin.h>
+
+#include "aom_dsp/x86/synonyms.h"
+
+#include "aom/vpx_integer.h"
+
+#include "av1/common/reconinter.h"
+
+#define MAX_MASK_VALUE (1 << WEDGE_WEIGHT_BITS)
+
+/**
+ * See vp10_wedge_sse_from_residuals_c
+ */
+uint64_t vp10_wedge_sse_from_residuals_sse2(const int16_t *r1, const int16_t *d,
+                                            const uint8_t *m, int N) {
+  int n = -N;
+  int n8 = n + 8;
+
+  uint64_t csse;
+
+  const __m128i v_mask_max_w = _mm_set1_epi16(MAX_MASK_VALUE);
+  const __m128i v_zext_q = _mm_set_epi32(0, 0xffffffff, 0, 0xffffffff);
+
+  __m128i v_acc0_q = _mm_setzero_si128();
+
+  assert(N % 64 == 0);
+
+  r1 += N;
+  d += N;
+  m += N;
+
+  do {
+    const __m128i v_r0_w = xx_load_128(r1 + n);
+    const __m128i v_r1_w = xx_load_128(r1 + n8);
+    const __m128i v_d0_w = xx_load_128(d + n);
+    const __m128i v_d1_w = xx_load_128(d + n8);
+    const __m128i v_m01_b = xx_load_128(m + n);
+
+    const __m128i v_rd0l_w = _mm_unpacklo_epi16(v_d0_w, v_r0_w);
+    const __m128i v_rd0h_w = _mm_unpackhi_epi16(v_d0_w, v_r0_w);
+    const __m128i v_rd1l_w = _mm_unpacklo_epi16(v_d1_w, v_r1_w);
+    const __m128i v_rd1h_w = _mm_unpackhi_epi16(v_d1_w, v_r1_w);
+    const __m128i v_m0_w = _mm_unpacklo_epi8(v_m01_b, _mm_setzero_si128());
+    const __m128i v_m1_w = _mm_unpackhi_epi8(v_m01_b, _mm_setzero_si128());
+
+    const __m128i v_m0l_w = _mm_unpacklo_epi16(v_m0_w, v_mask_max_w);
+    const __m128i v_m0h_w = _mm_unpackhi_epi16(v_m0_w, v_mask_max_w);
+    const __m128i v_m1l_w = _mm_unpacklo_epi16(v_m1_w, v_mask_max_w);
+    const __m128i v_m1h_w = _mm_unpackhi_epi16(v_m1_w, v_mask_max_w);
+
+    const __m128i v_t0l_d = _mm_madd_epi16(v_rd0l_w, v_m0l_w);
+    const __m128i v_t0h_d = _mm_madd_epi16(v_rd0h_w, v_m0h_w);
+    const __m128i v_t1l_d = _mm_madd_epi16(v_rd1l_w, v_m1l_w);
+    const __m128i v_t1h_d = _mm_madd_epi16(v_rd1h_w, v_m1h_w);
+
+    const __m128i v_t0_w = _mm_packs_epi32(v_t0l_d, v_t0h_d);
+    const __m128i v_t1_w = _mm_packs_epi32(v_t1l_d, v_t1h_d);
+
+    const __m128i v_sq0_d = _mm_madd_epi16(v_t0_w, v_t0_w);
+    const __m128i v_sq1_d = _mm_madd_epi16(v_t1_w, v_t1_w);
+
+    const __m128i v_sum0_q = _mm_add_epi64(_mm_and_si128(v_sq0_d, v_zext_q),
+                                           _mm_srli_epi64(v_sq0_d, 32));
+    const __m128i v_sum1_q = _mm_add_epi64(_mm_and_si128(v_sq1_d, v_zext_q),
+                                           _mm_srli_epi64(v_sq1_d, 32));
+
+    v_acc0_q = _mm_add_epi64(v_acc0_q, v_sum0_q);
+    v_acc0_q = _mm_add_epi64(v_acc0_q, v_sum1_q);
+
+    n8 += 16;
+    n += 16;
+  } while (n);
+
+  v_acc0_q = _mm_add_epi64(v_acc0_q, _mm_srli_si128(v_acc0_q, 8));
+
+#if ARCH_X86_64
+  csse = (uint64_t)_mm_cvtsi128_si64(v_acc0_q);
+#else
+  xx_storel_64(&csse, v_acc0_q);
+#endif
+
+  return ROUND_POWER_OF_TWO(csse, 2 * WEDGE_WEIGHT_BITS);
+}
+
+/**
+ * See vp10_wedge_sign_from_residuals_c
+ */
+int vp10_wedge_sign_from_residuals_sse2(const int16_t *ds, const uint8_t *m,
+                                        int N, int64_t limit) {
+  int64_t acc;
+
+  __m128i v_sign_d;
+  __m128i v_acc0_d = _mm_setzero_si128();
+  __m128i v_acc1_d = _mm_setzero_si128();
+  __m128i v_acc_q;
+
+  // Input size limited to 8192 by the use of 32 bit accumulators and m
+  // being between [0, 64]. Overflow might happen at larger sizes,
+  // though it is practically impossible on real video input.
+  assert(N < 8192);
+  assert(N % 64 == 0);
+
+  do {
+    const __m128i v_m01_b = xx_load_128(m);
+    const __m128i v_m23_b = xx_load_128(m + 16);
+    const __m128i v_m45_b = xx_load_128(m + 32);
+    const __m128i v_m67_b = xx_load_128(m + 48);
+
+    const __m128i v_d0_w = xx_load_128(ds);
+    const __m128i v_d1_w = xx_load_128(ds + 8);
+    const __m128i v_d2_w = xx_load_128(ds + 16);
+    const __m128i v_d3_w = xx_load_128(ds + 24);
+    const __m128i v_d4_w = xx_load_128(ds + 32);
+    const __m128i v_d5_w = xx_load_128(ds + 40);
+    const __m128i v_d6_w = xx_load_128(ds + 48);
+    const __m128i v_d7_w = xx_load_128(ds + 56);
+
+    const __m128i v_m0_w = _mm_unpacklo_epi8(v_m01_b, _mm_setzero_si128());
+    const __m128i v_m1_w = _mm_unpackhi_epi8(v_m01_b, _mm_setzero_si128());
+    const __m128i v_m2_w = _mm_unpacklo_epi8(v_m23_b, _mm_setzero_si128());
+    const __m128i v_m3_w = _mm_unpackhi_epi8(v_m23_b, _mm_setzero_si128());
+    const __m128i v_m4_w = _mm_unpacklo_epi8(v_m45_b, _mm_setzero_si128());
+    const __m128i v_m5_w = _mm_unpackhi_epi8(v_m45_b, _mm_setzero_si128());
+    const __m128i v_m6_w = _mm_unpacklo_epi8(v_m67_b, _mm_setzero_si128());
+    const __m128i v_m7_w = _mm_unpackhi_epi8(v_m67_b, _mm_setzero_si128());
+
+    const __m128i v_p0_d = _mm_madd_epi16(v_d0_w, v_m0_w);
+    const __m128i v_p1_d = _mm_madd_epi16(v_d1_w, v_m1_w);
+    const __m128i v_p2_d = _mm_madd_epi16(v_d2_w, v_m2_w);
+    const __m128i v_p3_d = _mm_madd_epi16(v_d3_w, v_m3_w);
+    const __m128i v_p4_d = _mm_madd_epi16(v_d4_w, v_m4_w);
+    const __m128i v_p5_d = _mm_madd_epi16(v_d5_w, v_m5_w);
+    const __m128i v_p6_d = _mm_madd_epi16(v_d6_w, v_m6_w);
+    const __m128i v_p7_d = _mm_madd_epi16(v_d7_w, v_m7_w);
+
+    const __m128i v_p01_d = _mm_add_epi32(v_p0_d, v_p1_d);
+    const __m128i v_p23_d = _mm_add_epi32(v_p2_d, v_p3_d);
+    const __m128i v_p45_d = _mm_add_epi32(v_p4_d, v_p5_d);
+    const __m128i v_p67_d = _mm_add_epi32(v_p6_d, v_p7_d);
+
+    const __m128i v_p0123_d = _mm_add_epi32(v_p01_d, v_p23_d);
+    const __m128i v_p4567_d = _mm_add_epi32(v_p45_d, v_p67_d);
+
+    v_acc0_d = _mm_add_epi32(v_acc0_d, v_p0123_d);
+    v_acc1_d = _mm_add_epi32(v_acc1_d, v_p4567_d);
+
+    ds += 64;
+    m += 64;
+
+    N -= 64;
+  } while (N);
+
+  v_sign_d = _mm_cmplt_epi32(v_acc0_d, _mm_setzero_si128());
+  v_acc0_d = _mm_add_epi64(_mm_unpacklo_epi32(v_acc0_d, v_sign_d),
+                           _mm_unpackhi_epi32(v_acc0_d, v_sign_d));
+
+  v_sign_d = _mm_cmplt_epi32(v_acc1_d, _mm_setzero_si128());
+  v_acc1_d = _mm_add_epi64(_mm_unpacklo_epi32(v_acc1_d, v_sign_d),
+                           _mm_unpackhi_epi32(v_acc1_d, v_sign_d));
+
+  v_acc_q = _mm_add_epi64(v_acc0_d, v_acc1_d);
+
+  v_acc_q = _mm_add_epi64(v_acc_q, _mm_srli_si128(v_acc_q, 8));
+
+#if ARCH_X86_64
+  acc = (uint64_t)_mm_cvtsi128_si64(v_acc_q);
+#else
+  xx_storel_64(&acc, v_acc_q);
+#endif
+
+  return acc > limit;
+}
+
+// Negate under mask
+static INLINE __m128i negm_epi16(__m128i v_v_w, __m128i v_mask_w) {
+  return _mm_sub_epi16(_mm_xor_si128(v_v_w, v_mask_w), v_mask_w);
+}
+
+/**
+ * vp10_wedge_compute_delta_squares_c
+ */
+void vp10_wedge_compute_delta_squares_sse2(int16_t *d, const int16_t *a,
+                                           const int16_t *b, int N) {
+  const __m128i v_neg_w =
+      _mm_set_epi16(0xffff, 0, 0xffff, 0, 0xffff, 0, 0xffff, 0);
+
+  assert(N % 64 == 0);
+
+  do {
+    const __m128i v_a0_w = xx_load_128(a);
+    const __m128i v_b0_w = xx_load_128(b);
+    const __m128i v_a1_w = xx_load_128(a + 8);
+    const __m128i v_b1_w = xx_load_128(b + 8);
+    const __m128i v_a2_w = xx_load_128(a + 16);
+    const __m128i v_b2_w = xx_load_128(b + 16);
+    const __m128i v_a3_w = xx_load_128(a + 24);
+    const __m128i v_b3_w = xx_load_128(b + 24);
+
+    const __m128i v_ab0l_w = _mm_unpacklo_epi16(v_a0_w, v_b0_w);
+    const __m128i v_ab0h_w = _mm_unpackhi_epi16(v_a0_w, v_b0_w);
+    const __m128i v_ab1l_w = _mm_unpacklo_epi16(v_a1_w, v_b1_w);
+    const __m128i v_ab1h_w = _mm_unpackhi_epi16(v_a1_w, v_b1_w);
+    const __m128i v_ab2l_w = _mm_unpacklo_epi16(v_a2_w, v_b2_w);
+    const __m128i v_ab2h_w = _mm_unpackhi_epi16(v_a2_w, v_b2_w);
+    const __m128i v_ab3l_w = _mm_unpacklo_epi16(v_a3_w, v_b3_w);
+    const __m128i v_ab3h_w = _mm_unpackhi_epi16(v_a3_w, v_b3_w);
+
+    // Negate top word of pairs
+    const __m128i v_abl0n_w = negm_epi16(v_ab0l_w, v_neg_w);
+    const __m128i v_abh0n_w = negm_epi16(v_ab0h_w, v_neg_w);
+    const __m128i v_abl1n_w = negm_epi16(v_ab1l_w, v_neg_w);
+    const __m128i v_abh1n_w = negm_epi16(v_ab1h_w, v_neg_w);
+    const __m128i v_abl2n_w = negm_epi16(v_ab2l_w, v_neg_w);
+    const __m128i v_abh2n_w = negm_epi16(v_ab2h_w, v_neg_w);
+    const __m128i v_abl3n_w = negm_epi16(v_ab3l_w, v_neg_w);
+    const __m128i v_abh3n_w = negm_epi16(v_ab3h_w, v_neg_w);
+
+    const __m128i v_r0l_w = _mm_madd_epi16(v_ab0l_w, v_abl0n_w);
+    const __m128i v_r0h_w = _mm_madd_epi16(v_ab0h_w, v_abh0n_w);
+    const __m128i v_r1l_w = _mm_madd_epi16(v_ab1l_w, v_abl1n_w);
+    const __m128i v_r1h_w = _mm_madd_epi16(v_ab1h_w, v_abh1n_w);
+    const __m128i v_r2l_w = _mm_madd_epi16(v_ab2l_w, v_abl2n_w);
+    const __m128i v_r2h_w = _mm_madd_epi16(v_ab2h_w, v_abh2n_w);
+    const __m128i v_r3l_w = _mm_madd_epi16(v_ab3l_w, v_abl3n_w);
+    const __m128i v_r3h_w = _mm_madd_epi16(v_ab3h_w, v_abh3n_w);
+
+    const __m128i v_r0_w = _mm_packs_epi32(v_r0l_w, v_r0h_w);
+    const __m128i v_r1_w = _mm_packs_epi32(v_r1l_w, v_r1h_w);
+    const __m128i v_r2_w = _mm_packs_epi32(v_r2l_w, v_r2h_w);
+    const __m128i v_r3_w = _mm_packs_epi32(v_r3l_w, v_r3h_w);
+
+    xx_store_128(d, v_r0_w);
+    xx_store_128(d + 8, v_r1_w);
+    xx_store_128(d + 16, v_r2_w);
+    xx_store_128(d + 24, v_r3_w);
+
+    a += 32;
+    b += 32;
+    d += 32;
+    N -= 32;
+  } while (N);
+}