Merge "Avoid mixed comparison"
diff --git a/test/test-data.sha1 b/test/test-data.sha1
index 442bfd2..03881c7 100644
--- a/test/test-data.sha1
+++ b/test/test-data.sha1
@@ -569,3 +569,5 @@
 5e524165f0397e6141d914f4f0a66267d7658376  vp90-2-08-tile_1x8.webm.md5
 a34e14923d6d17b1144254d8187d7f85b700a63c  vp90-2-02-size-lf-1920x1080.webm
 e3b28ddcfaeb37fb4d132b93f92642a9ad17c22d  vp90-2-02-size-lf-1920x1080.webm.md5
+d48c5db1b0f8e60521a7c749696b8067886033a3  vp90-2-09-aq2.webm
+84c1599298aac78f2fc05ae2274575d10569dfa0  vp90-2-09-aq2.webm.md5
diff --git a/test/test.mk b/test/test.mk
index 361a34f..2905a1a 100644
--- a/test/test.mk
+++ b/test/test.mk
@@ -668,6 +668,8 @@
 LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-08-tile-4x1.webm.md5
 LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-09-subpixel-00.ivf
 LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-09-subpixel-00.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-09-aq2.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-09-aq2.webm.md5
 LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp91-2-04-yv444.webm
 LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp91-2-04-yv444.webm.md5
 
diff --git a/test/test_vectors.cc b/test/test_vectors.cc
index 7ffecf0..5b58c4a 100644
--- a/test/test_vectors.cc
+++ b/test/test_vectors.cc
@@ -158,6 +158,7 @@
   "vp90-2-08-tile-4x4.webm", "vp90-2-08-tile-4x1.webm",
   "vp90-2-09-subpixel-00.ivf",
   "vp90-2-02-size-lf-1920x1080.webm",
+  "vp90-2-09-aq2.webm",
 #if CONFIG_NON420
   "vp91-2-04-yv444.webm"
 #endif
diff --git a/test/test_vectors.h b/test/test_vectors.h
index 942175a..491de33 100644
--- a/test/test_vectors.h
+++ b/test/test_vectors.h
@@ -22,9 +22,9 @@
 
 #if CONFIG_VP9_DECODER
 #if CONFIG_NON420
-const int kNumVp9TestVectors = 214;
+const int kNumVp9TestVectors = 215;
 #else
-const int kNumVp9TestVectors = 213;
+const int kNumVp9TestVectors = 214;
 #endif
 
 extern const char *kVP9TestVectors[kNumVp9TestVectors];
diff --git a/vp9/common/vp9_blockd.h b/vp9/common/vp9_blockd.h
index 93f96c8..ead4661 100644
--- a/vp9/common/vp9_blockd.h
+++ b/vp9/common/vp9_blockd.h
@@ -238,6 +238,9 @@
   /* pointers to reference frames */
   const YV12_BUFFER_CONFIG *ref_buf[2];
 
+  /* pointer to current frame */
+  const YV12_BUFFER_CONFIG *cur_buf;
+
   int lossless;
   /* Inverse transform function pointers. */
   void (*itxm_add)(const int16_t *input, uint8_t *dest, int stride, int eob);
@@ -409,44 +412,6 @@
   *y = (raster_mb >> tx_cols_log2) << tx_size;
 }
 
-static void extend_for_intra(MACROBLOCKD *xd, BLOCK_SIZE plane_bsize,
-                             int plane, int aoff, int loff) {
-  struct macroblockd_plane *const pd = &xd->plane[plane];
-  uint8_t *const buf = pd->dst.buf;
-  const int stride = pd->dst.stride;
-  const int x = aoff * 4 - 1;
-  const int y = loff * 4 - 1;
-  // Copy a pixel into the umv if we are in a situation where the block size
-  // extends into the UMV.
-  // TODO(JBB): Should be able to do the full extend in place so we don't have
-  // to do this multiple times.
-  if (xd->mb_to_right_edge < 0) {
-    const int bw = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
-    const int umv_border_start = bw + (xd->mb_to_right_edge >>
-                                       (3 + pd->subsampling_x));
-
-    if (x + bw > umv_border_start)
-      vpx_memset(&buf[y * stride + umv_border_start],
-                 buf[y * stride + umv_border_start - 1], bw);
-  }
-
-  if (xd->mb_to_bottom_edge < 0) {
-    if (xd->left_available || x >= 0) {
-      const int bh = 4 * num_4x4_blocks_high_lookup[plane_bsize];
-      const int umv_border_start =
-          bh + (xd->mb_to_bottom_edge >> (3 + pd->subsampling_y));
-
-      if (y + bh > umv_border_start) {
-        const uint8_t c = buf[(umv_border_start - 1) * stride + x];
-        uint8_t *d = &buf[umv_border_start * stride + x];
-        int i;
-        for (i = 0; i < bh; ++i, d += stride)
-          *d = c;
-      }
-    }
-  }
-}
-
 static void set_contexts(const MACROBLOCKD *xd, struct macroblockd_plane *pd,
                          BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
                          int has_eob, int aoff, int loff) {
diff --git a/vp9/common/vp9_loopfilter.c b/vp9/common/vp9_loopfilter.c
index 40d8ffd..72adf92 100644
--- a/vp9/common/vp9_loopfilter.c
+++ b/vp9/common/vp9_loopfilter.c
@@ -989,15 +989,16 @@
     // Determine the vertical edges that need filtering
     for (c = 0; c < MI_BLOCK_SIZE && mi_col + c < cm->mi_cols; c += col_step) {
       const MODE_INFO *mi = mi_8x8[c];
+      const BLOCK_SIZE sb_type = mi[0].mbmi.sb_type;
       const int skip_this = mi[0].mbmi.skip_coeff
                             && is_inter_block(&mi[0].mbmi);
       // left edge of current unit is block/partition edge -> no skip
-      const int block_edge_left = b_width_log2(mi[0].mbmi.sb_type) ?
-          !(c & ((1 << (b_width_log2(mi[0].mbmi.sb_type)-1)) - 1)) : 1;
+      const int block_edge_left = (num_4x4_blocks_wide_lookup[sb_type] > 1) ?
+          !(c & (num_8x8_blocks_wide_lookup[sb_type] - 1)) : 1;
       const int skip_this_c = skip_this && !block_edge_left;
       // top edge of current unit is block/partition edge -> no skip
-      const int block_edge_above = b_height_log2(mi[0].mbmi.sb_type) ?
-          !(r & ((1 << (b_height_log2(mi[0].mbmi.sb_type)-1)) - 1)) : 1;
+      const int block_edge_above = (num_4x4_blocks_high_lookup[sb_type] > 1) ?
+          !(r & (num_8x8_blocks_high_lookup[sb_type] - 1)) : 1;
       const int skip_this_r = skip_this && !block_edge_above;
       const TX_SIZE tx_size = (plane->plane_type == PLANE_TYPE_UV)
                             ? get_uv_tx_size(&mi[0].mbmi)
diff --git a/vp9/common/vp9_mvref_common.c b/vp9/common/vp9_mvref_common.c
index 8df8aec..df4961c 100644
--- a/vp9/common/vp9_mvref_common.c
+++ b/vp9/common/vp9_mvref_common.c
@@ -13,6 +13,11 @@
 
 #define MVREF_NEIGHBOURS 8
 
+typedef struct position {
+  int row;
+  int col;
+} POSITION;
+
 typedef enum {
   BOTH_ZERO = 0,
   ZERO_PLUS_PREDICTED = 1,
@@ -71,7 +76,7 @@
   BOTH_INTRA  // 18
 };
 
-static const MV mv_ref_blocks[BLOCK_SIZES][MVREF_NEIGHBOURS] = {
+static const POSITION mv_ref_blocks[BLOCK_SIZES][MVREF_NEIGHBOURS] = {
   // 4X4
   {{-1, 0}, {0, -1}, {-1, -1}, {-2, 0}, {0, -2}, {-2, -1}, {-1, -2}, {-2, -2}},
   // 4X8
@@ -172,11 +177,11 @@
 // are inside the borders of the tile.
 static INLINE int is_inside(const TileInfo *const tile,
                             int mi_col, int mi_row, int mi_rows,
-                            const MV *mv) {
-  return !(mi_row + mv->row < 0 ||
-           mi_col + mv->col < tile->mi_col_start ||
-           mi_row + mv->row >= mi_rows ||
-           mi_col + mv->col >= tile->mi_col_end);
+                            const POSITION *mi_pos) {
+  return !(mi_row + mi_pos->row < 0 ||
+           mi_col + mi_pos->col < tile->mi_col_start ||
+           mi_row + mi_pos->row >= mi_rows ||
+           mi_col + mi_pos->col >= tile->mi_col_end);
 }
 
 // This function searches the neighbourhood of a given MB/SB
@@ -190,7 +195,7 @@
                           int mi_row, int mi_col) {
   const int *ref_sign_bias = cm->ref_frame_sign_bias;
   int i, refmv_count = 0;
-  const MV *const mv_ref_search = mv_ref_blocks[mi->mbmi.sb_type];
+  const POSITION *const mv_ref_search = mv_ref_blocks[mi->mbmi.sb_type];
   const MB_MODE_INFO *const prev_mbmi = prev_mi ? &prev_mi->mbmi : NULL;
   int different_ref_found = 0;
   int context_counter = 0;
@@ -202,7 +207,7 @@
   // if the size < 8x8 we get the mv from the bmi substructure,
   // and we also need to keep a mode count.
   for (i = 0; i < 2; ++i) {
-    const MV *const mv_ref = &mv_ref_search[i];
+    const POSITION *const mv_ref = &mv_ref_search[i];
     if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) {
       const MODE_INFO *const candidate_mi = xd->mi_8x8[mv_ref->col + mv_ref->row
                                                    * xd->mode_info_stride];
@@ -229,7 +234,7 @@
   // as before except we don't need to keep track of sub blocks or
   // mode counts.
   for (; i < MVREF_NEIGHBOURS; ++i) {
-    const MV *const mv_ref = &mv_ref_search[i];
+    const POSITION *const mv_ref = &mv_ref_search[i];
     if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) {
       const MB_MODE_INFO *const candidate = &xd->mi_8x8[mv_ref->col +
                                             mv_ref->row
@@ -259,7 +264,7 @@
   // different reference frames.
   if (different_ref_found) {
     for (i = 0; i < MVREF_NEIGHBOURS; ++i) {
-      const MV *mv_ref = &mv_ref_search[i];
+      const POSITION *mv_ref = &mv_ref_search[i];
       if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) {
         const MB_MODE_INFO *const candidate = &xd->mi_8x8[mv_ref->col +
                                                           mv_ref->row
diff --git a/vp9/common/vp9_pred_common.c b/vp9/common/vp9_pred_common.c
index 40cfc81..449b945 100644
--- a/vp9/common/vp9_pred_common.c
+++ b/vp9/common/vp9_pred_common.c
@@ -16,12 +16,8 @@
 #include "vp9/common/vp9_seg_common.h"
 #include "vp9/common/vp9_treecoder.h"
 
-static INLINE const MB_MODE_INFO *get_above_mbmi(const MODE_INFO *const above) {
-  return (above != NULL) ? &above->mbmi : NULL;
-}
-
-static INLINE const MB_MODE_INFO *get_left_mbmi(const MODE_INFO *const left) {
-  return (left != NULL) ? &left->mbmi : NULL;
+static INLINE const MB_MODE_INFO *get_mbmi(const MODE_INFO *const mi) {
+  return (mi != NULL) ? &mi->mbmi : NULL;
 }
 
 // Returns a context number for the given MB prediction signal
@@ -30,15 +26,13 @@
   // The mode info data structure has a one element border above and to the
   // left of the entries correpsonding to real macroblocks.
   // The prediction flags in these dummy entries are initialised to 0.
-  const MODE_INFO *const left_mi = get_left_mi(xd);
-  const int has_left = left_mi != NULL ? is_inter_block(&left_mi->mbmi) : 0;
-  const int left_type = has_left ? left_mi->mbmi.interp_filter
-                                 : SWITCHABLE_FILTERS;
+  const MB_MODE_INFO *const left_mbmi = get_mbmi(get_left_mi(xd));
+  const int left_type = left_mbmi != NULL && is_inter_block(left_mbmi) ?
+                           left_mbmi->interp_filter : SWITCHABLE_FILTERS;
+  const MB_MODE_INFO *const above_mbmi = get_mbmi(get_above_mi(xd));
+  const int above_type = above_mbmi != NULL && is_inter_block(above_mbmi) ?
+                             above_mbmi->interp_filter : SWITCHABLE_FILTERS;
 
-  const MODE_INFO *const above_mi = get_above_mi(xd);
-  const int has_above = above_mi != NULL ? is_inter_block(&above_mi->mbmi) : 0;
-  const int above_type = has_above ? above_mi->mbmi.interp_filter
-                                   : SWITCHABLE_FILTERS;
   if (left_type == above_type)
     return left_type;
   else if (left_type == SWITCHABLE_FILTERS && above_type != SWITCHABLE_FILTERS)
@@ -50,8 +44,8 @@
 }
 // Returns a context number for the given MB prediction signal
 int vp9_get_intra_inter_context(const MACROBLOCKD *xd) {
-  const MB_MODE_INFO *const above_mbmi = get_above_mbmi(get_above_mi(xd));
-  const MB_MODE_INFO *const left_mbmi = get_left_mbmi(get_left_mi(xd));
+  const MB_MODE_INFO *const above_mbmi = get_mbmi(get_above_mi(xd));
+  const MB_MODE_INFO *const left_mbmi = get_mbmi(get_left_mi(xd));
   const int has_above = above_mbmi != NULL;
   const int has_left = left_mbmi != NULL;
   const int above_intra = has_above ? !is_inter_block(above_mbmi) : 1;
@@ -76,8 +70,8 @@
 int vp9_get_reference_mode_context(const VP9_COMMON *cm,
                                    const MACROBLOCKD *xd) {
   int ctx;
-  const MB_MODE_INFO *const above_mbmi = get_above_mbmi(get_above_mi(xd));
-  const MB_MODE_INFO *const left_mbmi = get_left_mbmi(get_left_mi(xd));
+  const MB_MODE_INFO *const above_mbmi = get_mbmi(get_above_mi(xd));
+  const MB_MODE_INFO *const left_mbmi = get_mbmi(get_left_mi(xd));
   const int has_above = above_mbmi != NULL;
   const int has_left = left_mbmi != NULL;
   // Note:
@@ -119,12 +113,10 @@
 int vp9_get_pred_context_comp_ref_p(const VP9_COMMON *cm,
                                     const MACROBLOCKD *xd) {
   int pred_context;
-  const MODE_INFO *const above_mi = get_above_mi(xd);
-  const MODE_INFO *const left_mi = get_left_mi(xd);
-  const MB_MODE_INFO *const above_mbmi = get_above_mbmi(above_mi);
-  const MB_MODE_INFO *const left_mbmi = get_left_mbmi(left_mi);
-  const int above_in_image = above_mi != NULL;
-  const int left_in_image = left_mi != NULL;
+  const MB_MODE_INFO *const above_mbmi = get_mbmi(get_above_mi(xd));
+  const MB_MODE_INFO *const left_mbmi = get_mbmi(get_left_mi(xd));
+  const int above_in_image = above_mbmi != NULL;
+  const int left_in_image = left_mbmi != NULL;
   const int above_intra = above_in_image ? !is_inter_block(above_mbmi) : 1;
   const int left_intra = left_in_image ? !is_inter_block(left_mbmi) : 1;
   // Note:
@@ -148,10 +140,10 @@
     } else {  // inter/inter
       const int l_sg = !has_second_ref(left_mbmi);
       const int a_sg = !has_second_ref(above_mbmi);
-      MV_REFERENCE_FRAME vrfa = a_sg ? above_mbmi->ref_frame[0]
-                                     : above_mbmi->ref_frame[var_ref_idx];
-      MV_REFERENCE_FRAME vrfl = l_sg ? left_mbmi->ref_frame[0]
-                                     : left_mbmi->ref_frame[var_ref_idx];
+      const MV_REFERENCE_FRAME vrfa = a_sg ? above_mbmi->ref_frame[0]
+                                           : above_mbmi->ref_frame[var_ref_idx];
+      const MV_REFERENCE_FRAME vrfl = l_sg ? left_mbmi->ref_frame[0]
+                                           : left_mbmi->ref_frame[var_ref_idx];
 
       if (vrfa == vrfl && cm->comp_var_ref[1] == vrfa) {
         pred_context = 0;
@@ -164,8 +156,8 @@
         else
           pred_context = 1;
       } else if (l_sg || a_sg) {  // single/comp
-        MV_REFERENCE_FRAME vrfc = l_sg ? vrfa : vrfl;
-        MV_REFERENCE_FRAME rfs = a_sg ? vrfa : vrfl;
+        const MV_REFERENCE_FRAME vrfc = l_sg ? vrfa : vrfl;
+        const MV_REFERENCE_FRAME rfs = a_sg ? vrfa : vrfl;
         if (vrfc == cm->comp_var_ref[1] && rfs != cm->comp_var_ref[1])
           pred_context = 1;
         else if (rfs == cm->comp_var_ref[1] && vrfc != cm->comp_var_ref[1])
@@ -200,8 +192,8 @@
 
 int vp9_get_pred_context_single_ref_p1(const MACROBLOCKD *xd) {
   int pred_context;
-  const MB_MODE_INFO *const above_mbmi = get_above_mbmi(get_above_mi(xd));
-  const MB_MODE_INFO *const left_mbmi = get_left_mbmi(get_left_mi(xd));
+  const MB_MODE_INFO *const above_mbmi = get_mbmi(get_above_mi(xd));
+  const MB_MODE_INFO *const left_mbmi = get_mbmi(get_left_mi(xd));
   const int has_above = above_mbmi != NULL;
   const int has_left = left_mbmi != NULL;
   const int above_intra = has_above ? !is_inter_block(above_mbmi) : 1;
@@ -264,8 +256,8 @@
 
 int vp9_get_pred_context_single_ref_p2(const MACROBLOCKD *xd) {
   int pred_context;
-  const MB_MODE_INFO *const above_mbmi = get_above_mbmi(get_above_mi(xd));
-  const MB_MODE_INFO *const left_mbmi = get_left_mbmi(get_left_mi(xd));
+  const MB_MODE_INFO *const above_mbmi = get_mbmi(get_above_mi(xd));
+  const MB_MODE_INFO *const left_mbmi = get_mbmi(get_left_mi(xd));
   const int has_above = above_mbmi != NULL;
   const int has_left = left_mbmi != NULL;
   const int above_intra = has_above ? !is_inter_block(above_mbmi) : 1;
@@ -352,8 +344,8 @@
 // The prediction flags in these dummy entries are initialized to 0.
 int vp9_get_tx_size_context(const MACROBLOCKD *xd) {
   const int max_tx_size = max_txsize_lookup[xd->mi_8x8[0]->mbmi.sb_type];
-  const MB_MODE_INFO *const above_mbmi = get_above_mbmi(get_above_mi(xd));
-  const MB_MODE_INFO *const left_mbmi = get_left_mbmi(get_left_mi(xd));
+  const MB_MODE_INFO *const above_mbmi = get_mbmi(get_above_mi(xd));
+  const MB_MODE_INFO *const left_mbmi = get_mbmi(get_left_mi(xd));
   const int has_above = above_mbmi != NULL;
   const int has_left = left_mbmi != NULL;
   int above_ctx = (has_above && !above_mbmi->skip_coeff) ? above_mbmi->tx_size
diff --git a/vp9/common/vp9_reconintra.c b/vp9/common/vp9_reconintra.c
index eb643b0..96ba3e4 100644
--- a/vp9/common/vp9_reconintra.c
+++ b/vp9/common/vp9_reconintra.c
@@ -313,17 +313,21 @@
 #undef intra_pred_allsizes
 }
 
-static void build_intra_predictors(const uint8_t *ref, int ref_stride,
-                                   uint8_t *dst, int dst_stride,
+static void build_intra_predictors(const MACROBLOCKD *xd, const uint8_t *ref,
+                                   int ref_stride, uint8_t *dst, int dst_stride,
                                    MB_PREDICTION_MODE mode, TX_SIZE tx_size,
                                    int up_available, int left_available,
-                                   int right_available) {
+                                   int right_available, int x, int y,
+                                   int plane) {
   int i;
   DECLARE_ALIGNED_ARRAY(16, uint8_t, left_col, 64);
   DECLARE_ALIGNED_ARRAY(16, uint8_t, above_data, 128 + 16);
   uint8_t *above_row = above_data + 16;
   const uint8_t *const_above_row = above_row;
   const int bs = 4 << tx_size;
+  int frame_width, frame_height;
+  int x0, y0;
+  const struct macroblockd_plane *const pd = &xd->plane[plane];
 
   // 127 127 127 .. 127 127 127 127 127 127
   // 129  A   B  ..  Y   Z
@@ -334,26 +338,90 @@
 
   once(init_intra_pred_fn_ptrs);
 
+  // Get current frame pointer, width and height.
+  if (plane == 0) {
+    frame_width = xd->cur_buf->y_width;
+    frame_height = xd->cur_buf->y_height;
+  } else {
+    frame_width = xd->cur_buf->uv_width;
+    frame_height = xd->cur_buf->uv_height;
+  }
+
+  // Get block position in current frame.
+  x0 = (-xd->mb_to_left_edge >> (3 + pd->subsampling_x)) + x;
+  y0 = (-xd->mb_to_top_edge >> (3 + pd->subsampling_y)) + y;
+
   // left
   if (left_available) {
-    for (i = 0; i < bs; i++)
-      left_col[i] = ref[i * ref_stride - 1];
+    if (xd->mb_to_bottom_edge < 0) {
+      /* slower path if the block needs border extension */
+      if (y0 + bs <= frame_height) {
+        for (i = 0; i < bs; ++i)
+          left_col[i] = ref[i * ref_stride - 1];
+      } else {
+        const int extend_bottom = frame_height - y0;
+        for (i = 0; i < extend_bottom; ++i)
+          left_col[i] = ref[i * ref_stride - 1];
+        for (; i < bs; ++i)
+          left_col[i] = ref[(extend_bottom - 1) * ref_stride - 1];
+      }
+    } else {
+      /* faster path if the block does not need extension */
+      for (i = 0; i < bs; ++i)
+        left_col[i] = ref[i * ref_stride - 1];
+    }
   } else {
     vpx_memset(left_col, 129, bs);
   }
 
+  // TODO(hkuang) do not extend 2*bs pixels for all modes.
   // above
   if (up_available) {
     const uint8_t *above_ref = ref - ref_stride;
-    if (bs == 4 && right_available && left_available) {
-      const_above_row = above_ref;
+    if (xd->mb_to_right_edge < 0) {
+      /* slower path if the block needs border extension */
+      if (x0 + 2 * bs <= frame_width) {
+        if (right_available && bs == 4) {
+          vpx_memcpy(above_row - 1, above_ref - 1, 2 * bs + 1);
+        } else {
+          vpx_memcpy(above_row - 1, above_ref - 1, bs + 1);
+          vpx_memset(above_row + bs, above_row[bs - 1], bs);
+        }
+      } else if (x0 + bs <= frame_width) {
+        const int r = frame_width - x0;
+        if (right_available && bs == 4) {
+          vpx_memcpy(above_row - 1, above_ref - 1, r + 1);
+          vpx_memset(above_row + r, above_row[r - 1],
+                     x0 + 2 * bs - frame_width);
+        } else {
+          vpx_memcpy(above_row - 1, above_ref - 1, bs + 1);
+          vpx_memset(above_row + bs, above_row[bs - 1], bs);
+        }
+      } else if (x0 <= frame_width) {
+        const int r = frame_width - x0;
+        if (right_available && bs == 4) {
+          vpx_memcpy(above_row - 1, above_ref - 1, r + 1);
+          vpx_memset(above_row + r, above_row[r - 1],
+                     x0 + 2 * bs - frame_width);
+        } else {
+          vpx_memcpy(above_row - 1, above_ref - 1, r + 1);
+          vpx_memset(above_row + r, above_row[r - 1],
+                     x0 + 2 * bs - frame_width);
+        }
+        above_row[-1] = left_available ? above_ref[-1] : 129;
+      }
     } else {
-      vpx_memcpy(above_row, above_ref, bs);
-      if (bs == 4 && right_available)
-        vpx_memcpy(above_row + bs, above_ref + bs, bs);
-      else
-        vpx_memset(above_row + bs, above_row[bs - 1], bs);
-      above_row[-1] = left_available ? above_ref[-1] : 129;
+      /* faster path if the block does not need extension */
+      if (bs == 4 && right_available && left_available) {
+        const_above_row = above_ref;
+      } else {
+        vpx_memcpy(above_row, above_ref, bs);
+        if (bs == 4 && right_available)
+          vpx_memcpy(above_row + bs, above_ref + bs, bs);
+        else
+          vpx_memset(above_row + bs, above_row[bs - 1], bs);
+        above_row[-1] = left_available ? above_ref[-1] : 129;
+      }
     }
   } else {
     vpx_memset(above_row, 127, bs * 2);
@@ -370,16 +438,19 @@
 }
 
 void vp9_predict_intra_block(const MACROBLOCKD *xd, int block_idx, int bwl_in,
-                            TX_SIZE tx_size, int mode,
-                            const uint8_t *ref, int ref_stride,
-                            uint8_t *dst, int dst_stride) {
+                             TX_SIZE tx_size, int mode,
+                             const uint8_t *ref, int ref_stride,
+                             uint8_t *dst, int dst_stride,
+                             int aoff, int loff, int plane) {
   const int bwl = bwl_in - tx_size;
   const int wmask = (1 << bwl) - 1;
   const int have_top = (block_idx >> bwl) || xd->up_available;
   const int have_left = (block_idx & wmask) || xd->left_available;
   const int have_right = ((block_idx & wmask) != wmask);
+  const int x = aoff * 4;
+  const int y = loff * 4;
 
   assert(bwl >= 0);
-  build_intra_predictors(ref, ref_stride, dst, dst_stride, mode, tx_size,
-                         have_top, have_left, have_right);
+  build_intra_predictors(xd, ref, ref_stride, dst, dst_stride, mode, tx_size,
+                         have_top, have_left, have_right, x, y, plane);
 }
diff --git a/vp9/common/vp9_reconintra.h b/vp9/common/vp9_reconintra.h
index 6e3f55c..fc916fc 100644
--- a/vp9/common/vp9_reconintra.h
+++ b/vp9/common/vp9_reconintra.h
@@ -17,5 +17,6 @@
 void vp9_predict_intra_block(const MACROBLOCKD *xd, int block_idx, int bwl_in,
                              TX_SIZE tx_size, int mode,
                              const uint8_t *ref, int ref_stride,
-                             uint8_t *dst, int dst_stride);
+                             uint8_t *dst, int dst_stride,
+                             int aoff, int loff, int plane);
 #endif  // VP9_COMMON_VP9_RECONINTRA_H_
diff --git a/vp9/common/vp9_seg_common.c b/vp9/common/vp9_seg_common.c
index ef30404..910200e 100644
--- a/vp9/common/vp9_seg_common.c
+++ b/vp9/common/vp9_seg_common.c
@@ -41,11 +41,6 @@
   seg->feature_mask[segment_id] |= 1 << feature_id;
 }
 
-void vp9_disable_segfeature(struct segmentation *seg, int segment_id,
-                            SEG_LVL_FEATURES feature_id) {
-  seg->feature_mask[segment_id] &= ~(1 << feature_id);
-}
-
 int vp9_seg_feature_data_max(SEG_LVL_FEATURES feature_id) {
   return seg_feature_data_max[feature_id];
 }
@@ -54,11 +49,6 @@
   return seg_feature_data_signed[feature_id];
 }
 
-void vp9_clear_segdata(struct segmentation *seg, int segment_id,
-                       SEG_LVL_FEATURES feature_id) {
-  seg->feature_data[segment_id][feature_id] = 0;
-}
-
 void vp9_set_segdata(struct segmentation *seg, int segment_id,
                      SEG_LVL_FEATURES feature_id, int seg_data) {
   assert(seg_data <= seg_feature_data_max[feature_id]);
diff --git a/vp9/common/vp9_seg_common.h b/vp9/common/vp9_seg_common.h
index eb38c06..0b0879e 100644
--- a/vp9/common/vp9_seg_common.h
+++ b/vp9/common/vp9_seg_common.h
@@ -55,18 +55,10 @@
                            int segment_id,
                            SEG_LVL_FEATURES feature_id);
 
-void vp9_disable_segfeature(struct segmentation *seg,
-                            int segment_id,
-                            SEG_LVL_FEATURES feature_id);
-
 int vp9_seg_feature_data_max(SEG_LVL_FEATURES feature_id);
 
 int vp9_is_segfeature_signed(SEG_LVL_FEATURES feature_id);
 
-void vp9_clear_segdata(struct segmentation *seg,
-                       int segment_id,
-                       SEG_LVL_FEATURES feature_id);
-
 void vp9_set_segdata(struct segmentation *seg,
                      int segment_id,
                      SEG_LVL_FEATURES feature_id,
diff --git a/vp9/decoder/vp9_decodeframe.c b/vp9/decoder/vp9_decodeframe.c
index 79f0835..c167004 100644
--- a/vp9/decoder/vp9_decodeframe.c
+++ b/vp9/decoder/vp9_decodeframe.c
@@ -305,12 +305,10 @@
   txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &x, &y);
   dst = &pd->dst.buf[4 * y * pd->dst.stride + 4 * x];
 
-  if (xd->mb_to_right_edge < 0 || xd->mb_to_bottom_edge < 0)
-    extend_for_intra(xd, plane_bsize, plane, x, y);
-
   vp9_predict_intra_block(xd, block >> (tx_size << 1),
                           b_width_log2(plane_bsize), tx_size, mode,
-                          dst, pd->dst.stride, dst, pd->dst.stride);
+                          dst, pd->dst.stride, dst, pd->dst.stride,
+                          x, y, plane);
 
   if (!mi->mbmi.skip_coeff) {
     const int eob = vp9_decode_block_tokens(cm, xd, plane, block,
@@ -1349,6 +1347,7 @@
   const int tile_rows = 1 << cm->log2_tile_rows;
   const int tile_cols = 1 << cm->log2_tile_cols;
   YV12_BUFFER_CONFIG *const new_fb = get_frame_new_buffer(cm);
+  xd->cur_buf = new_fb;
 
   if (!first_partition_size) {
       // showing a frame directly
diff --git a/vp9/encoder/vp9_encodeframe.c b/vp9/encoder/vp9_encodeframe.c
index e9d35c1..7af9a1f 100644
--- a/vp9/encoder/vp9_encodeframe.c
+++ b/vp9/encoder/vp9_encodeframe.c
@@ -548,6 +548,9 @@
                           src->alpha_stride};
   int i;
 
+  // Set current frame pointer.
+  x->e_mbd.cur_buf = src;
+
   for (i = 0; i < MAX_MB_PLANE; i++)
     setup_pred_plane(&x->plane[i].src, buffers[i], strides[i], mi_row, mi_col,
                      NULL, x->e_mbd.plane[i].subsampling_x,
@@ -1457,7 +1460,7 @@
   // int use_8x8 = (MIN(cpi->common.width, cpi->common.height) < 720) ? 1 : 0;
   int use_8x8 = 1;
 
-  if (cm->frame_type && !cpi->is_src_frame_alt_ref &&
+  if (cm->frame_type && !cpi->rc.is_src_frame_alt_ref &&
       ((use_8x8 && bsize == BLOCK_16X16) ||
       bsize == BLOCK_32X32 || bsize == BLOCK_64X64)) {
     int ref0 = 0, ref1 = 0, ref2 = 0, ref3 = 0;
@@ -1946,7 +1949,7 @@
             || cm->prev_mi == 0
             || cpi->common.show_frame == 0
             || cpi->common.frame_type == KEY_FRAME
-            || cpi->is_src_frame_alt_ref
+            || cpi->rc.is_src_frame_alt_ref
             || ((cpi->sf.use_lastframe_partitioning ==
                  LAST_FRAME_PARTITION_LOW_MOTION) &&
                  sb_has_motion(cpi, prev_mi_8x8))) {
@@ -2074,7 +2077,7 @@
 
   xd->last_mi = cm->prev_mi;
 
-  vp9_zero(cpi->NMVcount);
+  vp9_zero(cpi->common.counts.mv);
   vp9_zero(cpi->coef_counts);
   vp9_zero(cm->counts.eob_branch);
 
@@ -2276,7 +2279,7 @@
   int frame_type;
   if (frame_is_intra_only(&cpi->common))
     frame_type = 0;
-  else if (cpi->is_src_frame_alt_ref && cpi->refresh_golden_frame)
+  else if (cpi->rc.is_src_frame_alt_ref && cpi->refresh_golden_frame)
     frame_type = 3;
   else if (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)
     frame_type = 1;
diff --git a/vp9/encoder/vp9_encodemb.c b/vp9/encoder/vp9_encodemb.c
index e05ba1b..6bc1a4b 100644
--- a/vp9/encoder/vp9_encodemb.c
+++ b/vp9/encoder/vp9_encodemb.c
@@ -550,9 +550,6 @@
   src = &p->src.buf[4 * (j * p->src.stride + i)];
   src_diff = &p->src_diff[4 * (j * diff_stride + i)];
 
-  if (xd->mb_to_right_edge < 0 || xd->mb_to_bottom_edge < 0)
-    extend_for_intra(xd, plane_bsize, plane, i, j);
-
   // if (x->optimize)
   // vp9_optimize_b(plane, block, plane_bsize, tx_size, x, args->ctx);
 
@@ -563,7 +560,7 @@
       vp9_predict_intra_block(xd, block >> 6, bwl, TX_32X32, mode,
                               x->skip_encode ? src : dst,
                               x->skip_encode ? p->src.stride : pd->dst.stride,
-                              dst, pd->dst.stride);
+                              dst, pd->dst.stride, i, j, plane);
       if (!x->skip_recode) {
         vp9_subtract_block(32, 32, src_diff, diff_stride,
                            src, p->src.stride, dst, pd->dst.stride);
@@ -586,7 +583,7 @@
       vp9_predict_intra_block(xd, block >> 4, bwl, TX_16X16, mode,
                               x->skip_encode ? src : dst,
                               x->skip_encode ? p->src.stride : pd->dst.stride,
-                              dst, pd->dst.stride);
+                              dst, pd->dst.stride, i, j, plane);
       if (!x->skip_recode) {
         vp9_subtract_block(16, 16, src_diff, diff_stride,
                            src, p->src.stride, dst, pd->dst.stride);
@@ -606,7 +603,7 @@
       vp9_predict_intra_block(xd, block >> 2, bwl, TX_8X8, mode,
                               x->skip_encode ? src : dst,
                               x->skip_encode ? p->src.stride : pd->dst.stride,
-                              dst, pd->dst.stride);
+                              dst, pd->dst.stride, i, j, plane);
       if (!x->skip_recode) {
         vp9_subtract_block(8, 8, src_diff, diff_stride,
                            src, p->src.stride, dst, pd->dst.stride);
@@ -630,7 +627,7 @@
       vp9_predict_intra_block(xd, block, bwl, TX_4X4, mode,
                               x->skip_encode ? src : dst,
                               x->skip_encode ? p->src.stride : pd->dst.stride,
-                              dst, pd->dst.stride);
+                              dst, pd->dst.stride, i, j, plane);
 
       if (!x->skip_recode) {
         vp9_subtract_block(4, 4, src_diff, diff_stride,
diff --git a/vp9/encoder/vp9_encodemv.c b/vp9/encoder/vp9_encodemv.c
index 9af28f9..dae89ad 100644
--- a/vp9/encoder/vp9_encodemv.c
+++ b/vp9/encoder/vp9_encodemv.c
@@ -166,7 +166,7 @@
 void vp9_write_nmv_probs(VP9_COMP* const cpi, int usehp, vp9_writer *w) {
   int i, j;
   nmv_context *mvc = &cpi->common.fc.nmvc;
-  nmv_context_counts *counts = &cpi->NMVcount;
+  nmv_context_counts *counts = &cpi->common.counts.mv;
 
   write_mv_update(vp9_mv_joint_tree, mvc->joints, counts->joints, MV_JOINTS, w);
 
@@ -252,6 +252,7 @@
   MODE_INFO *mi = x->e_mbd.mi_8x8[0];
   MB_MODE_INFO *const mbmi = &mi->mbmi;
   const int is_compound = has_second_ref(mbmi);
+  nmv_context_counts *counts = &cpi->common.counts.mv;
 
   if (mbmi->sb_type < BLOCK_8X8) {
     const int num_4x4_w = num_4x4_blocks_wide_lookup[mbmi->sb_type];
@@ -262,11 +263,11 @@
       for (idx = 0; idx < 2; idx += num_4x4_w) {
         const int i = idy * 2 + idx;
         if (mi->bmi[i].as_mode == NEWMV)
-          inc_mvs(mi->bmi[i].as_mv, best_ref_mv, is_compound, &cpi->NMVcount);
+          inc_mvs(mi->bmi[i].as_mv, best_ref_mv, is_compound, counts);
       }
     }
   } else if (mbmi->mode == NEWMV) {
-    inc_mvs(mbmi->mv, best_ref_mv, is_compound, &cpi->NMVcount);
+    inc_mvs(mbmi->mv, best_ref_mv, is_compound, counts);
   }
 }
 
diff --git a/vp9/encoder/vp9_firstpass.c b/vp9/encoder/vp9_firstpass.c
index cd6831a..b54f78a 100644
--- a/vp9/encoder/vp9_firstpass.c
+++ b/vp9/encoder/vp9_firstpass.c
@@ -393,14 +393,14 @@
 }
 
 static void first_pass_motion_search(VP9_COMP *cpi, MACROBLOCK *x,
-                                     int_mv *ref_mv, MV *best_mv,
+                                     MV *ref_mv, MV *best_mv,
                                      YV12_BUFFER_CONFIG *recon_buffer,
                                      int *best_motion_err, int recon_yoffset) {
   MACROBLOCKD *const xd = &x->e_mbd;
   int num00;
 
-  int_mv tmp_mv;
-  int_mv ref_mv_full;
+  MV tmp_mv = {0, 0};
+  MV ref_mv_full;
 
   int tmp_err;
   int step_param = 3;
@@ -440,21 +440,20 @@
   xd->plane[0].pre[0].buf = recon_buffer->y_buffer + recon_yoffset;
 
   // Initial step/diamond search centred on best mv
-  tmp_mv.as_int = 0;
-  ref_mv_full.as_mv.col = ref_mv->as_mv.col >> 3;
-  ref_mv_full.as_mv.row = ref_mv->as_mv.row >> 3;
-  tmp_err = cpi->diamond_search_sad(x, &ref_mv_full.as_mv, &tmp_mv.as_mv,
+  ref_mv_full.col = ref_mv->col >> 3;
+  ref_mv_full.row = ref_mv->row >> 3;
+  tmp_err = cpi->diamond_search_sad(x, &ref_mv_full, &tmp_mv,
                                     step_param,
                                     x->sadperbit16, &num00, &v_fn_ptr,
                                     x->nmvjointcost,
-                                    x->mvcost, &ref_mv->as_mv);
+                                    x->mvcost, ref_mv);
   if (tmp_err < INT_MAX - new_mv_mode_penalty)
     tmp_err += new_mv_mode_penalty;
 
   if (tmp_err < *best_motion_err) {
     *best_motion_err = tmp_err;
-    best_mv->row = tmp_mv.as_mv.row;
-    best_mv->col = tmp_mv.as_mv.col;
+    best_mv->row = tmp_mv.row;
+    best_mv->col = tmp_mv.col;
   }
 
   // Further step/diamond searches as necessary
@@ -467,18 +466,18 @@
     if (num00) {
       num00--;
     } else {
-      tmp_err = cpi->diamond_search_sad(x, &ref_mv_full.as_mv, &tmp_mv.as_mv,
+      tmp_err = cpi->diamond_search_sad(x, &ref_mv_full, &tmp_mv,
                                         step_param + n, x->sadperbit16,
                                         &num00, &v_fn_ptr,
                                         x->nmvjointcost,
-                                        x->mvcost, &ref_mv->as_mv);
+                                        x->mvcost, ref_mv);
       if (tmp_err < INT_MAX - new_mv_mode_penalty)
         tmp_err += new_mv_mode_penalty;
 
       if (tmp_err < *best_motion_err) {
         *best_motion_err = tmp_err;
-        best_mv->row = tmp_mv.as_mv.row;
-        best_mv->col = tmp_mv.as_mv.col;
+        best_mv->row = tmp_mv.row;
+        best_mv->col = tmp_mv.col;
       }
     }
   }
@@ -649,9 +648,8 @@
 
         // Test last reference frame using the previous best mv as the
         // starting point (best reference) for the search
-        first_pass_motion_search(cpi, x, &best_ref_mv,
-                                 &mv.as_mv, lst_yv12,
-                                 &motion_error, recon_yoffset);
+        first_pass_motion_search(cpi, x, &best_ref_mv.as_mv, &mv.as_mv,
+                                 lst_yv12, &motion_error, recon_yoffset);
         if (cpi->oxcf.aq_mode == VARIANCE_AQ) {
           vp9_clear_system_state();  // __asm emms;
           motion_error *= error_weight;
@@ -661,7 +659,7 @@
         // based search as well.
         if (best_ref_mv.as_int) {
           tmp_err = INT_MAX;
-          first_pass_motion_search(cpi, x, &zero_ref_mv, &tmp_mv.as_mv,
+          first_pass_motion_search(cpi, x, &zero_ref_mv.as_mv, &tmp_mv.as_mv,
                                    lst_yv12, &tmp_err, recon_yoffset);
           if (cpi->oxcf.aq_mode == VARIANCE_AQ) {
             vp9_clear_system_state();  // __asm emms;
@@ -679,9 +677,8 @@
           // Simple 0,0 motion with no mv overhead
           gf_motion_error = zz_motion_search(cpi, x, gld_yv12, recon_yoffset);
 
-          first_pass_motion_search(cpi, x, &zero_ref_mv,
-                                   &tmp_mv.as_mv, gld_yv12,
-                                   &gf_motion_error, recon_yoffset);
+          first_pass_motion_search(cpi, x, &zero_ref_mv.as_mv, &tmp_mv.as_mv,
+                                   gld_yv12, &gf_motion_error, recon_yoffset);
           if (cpi->oxcf.aq_mode == VARIANCE_AQ) {
             vp9_clear_system_state();  // __asm emms;
             gf_motion_error *= error_weight;
@@ -1517,6 +1514,8 @@
   cpi->this_frame_weight = cpi->arf_weight[cpi->sequence_number];
   assert(cpi->this_frame_weight >= 0);
 
+  cpi->twopass.gf_zeromotion_pct = 0;
+
   // Initialize frame coding order variables.
   cpi->new_frame_coding_order_period = 0;
   cpi->next_frame_in_order = 0;
@@ -1525,14 +1524,14 @@
   vp9_zero(cpi->arf_buffer_idx);
   vpx_memset(cpi->arf_weight, -1, sizeof(cpi->arf_weight));
 
-  if (cpi->twopass.frames_to_key <= (FIXED_ARF_GROUP_SIZE + 8)) {
+  if (cpi->rc.frames_to_key <= (FIXED_ARF_GROUP_SIZE + 8)) {
     // Setup a GF group close to the keyframe.
-    cpi->source_alt_ref_pending = 0;
-    cpi->rc.baseline_gf_interval = cpi->twopass.frames_to_key;
+    cpi->rc.source_alt_ref_pending = 0;
+    cpi->rc.baseline_gf_interval = cpi->rc.frames_to_key;
     schedule_frames(cpi, 0, (cpi->rc.baseline_gf_interval - 1), 2, 0, 0);
   } else {
     // Setup a fixed period ARF group.
-    cpi->source_alt_ref_pending = 1;
+    cpi->rc.source_alt_ref_pending = 1;
     cpi->rc.baseline_gf_interval = FIXED_ARF_GROUP_SIZE;
     schedule_frames(cpi, 0, -(cpi->rc.baseline_gf_interval - 1), 2, 1, 0);
   }
@@ -1630,16 +1629,17 @@
   // bits to spare and are better with a smaller interval and smaller boost.
   // At high Q when there are few bits to spare we are better with a longer
   // interval to spread the cost of the GF.
+  //
   active_max_gf_interval =
-    12 + ((int)vp9_convert_qindex_to_q(cpi->rc.active_worst_quality) >> 5);
+    11 + ((int)vp9_convert_qindex_to_q(cpi->rc.last_q[INTER_FRAME]) >> 5);
 
   if (active_max_gf_interval > cpi->rc.max_gf_interval)
     active_max_gf_interval = cpi->rc.max_gf_interval;
 
   i = 0;
   while (((i < cpi->twopass.static_scene_max_gf_interval) ||
-          ((cpi->twopass.frames_to_key - i) < MIN_GF_INTERVAL)) &&
-         (i < cpi->twopass.frames_to_key)) {
+          ((cpi->rc.frames_to_key - i) < MIN_GF_INTERVAL)) &&
+         (i < cpi->rc.frames_to_key)) {
     i++;    // Increment the loop counter
 
     // Accumulate error score of frames in this gf group
@@ -1694,7 +1694,7 @@
         // Don't break out with a very short interval
         (i > MIN_GF_INTERVAL) &&
         // Don't break out very close to a key frame
-        ((cpi->twopass.frames_to_key - i) >= MIN_GF_INTERVAL) &&
+        ((cpi->rc.frames_to_key - i) >= MIN_GF_INTERVAL) &&
         ((boost_score > 125.0) || (next_frame.pcnt_inter < 0.75)) &&
         (!flash_detected) &&
         ((mv_ratio_accumulator > mv_ratio_accumulator_thresh) ||
@@ -1710,17 +1710,17 @@
     old_boost_score = boost_score;
   }
 
-  cpi->gf_zeromotion_pct = (int)(zero_motion_accumulator * 1000.0);
+  cpi->twopass.gf_zeromotion_pct = (int)(zero_motion_accumulator * 1000.0);
 
   // Don't allow a gf too near the next kf
-  if ((cpi->twopass.frames_to_key - i) < MIN_GF_INTERVAL) {
-    while (i < cpi->twopass.frames_to_key) {
+  if ((cpi->rc.frames_to_key - i) < MIN_GF_INTERVAL) {
+    while (i < cpi->rc.frames_to_key) {
       i++;
 
       if (EOF == input_stats(cpi, this_frame))
         break;
 
-      if (i < cpi->twopass.frames_to_key) {
+      if (i < cpi->rc.frames_to_key) {
         mod_frame_err = calculate_modified_err(cpi, this_frame);
         gf_group_err += mod_frame_err;
       }
@@ -1747,7 +1747,7 @@
       (i < cpi->oxcf.lag_in_frames) &&
       (i >= MIN_GF_INTERVAL) &&
       // dont use ARF very near next kf
-      (i <= (cpi->twopass.frames_to_key - MIN_GF_INTERVAL)) &&
+      (i <= (cpi->rc.frames_to_key - MIN_GF_INTERVAL)) &&
       ((next_frame.pcnt_inter > 0.75) ||
        (next_frame.pcnt_second_ref > 0.5)) &&
       ((mv_in_out_accumulator / (double)i > -0.2) ||
@@ -1756,7 +1756,7 @@
     // Alternative boost calculation for alt ref
     cpi->rc.gfu_boost = calc_arf_boost(cpi, 0, (i - 1), (i - 1), &f_boost,
                                     &b_boost);
-    cpi->source_alt_ref_pending = 1;
+    cpi->rc.source_alt_ref_pending = 1;
 
 #if CONFIG_MULTIPLE_ARF
     // Set the ARF schedule.
@@ -1766,7 +1766,7 @@
 #endif
   } else {
     cpi->rc.gfu_boost = (int)boost_score;
-    cpi->source_alt_ref_pending = 0;
+    cpi->rc.source_alt_ref_pending = 0;
 #if CONFIG_MULTIPLE_ARF
     // Set the GF schedule.
     if (cpi->multi_arf_enabled) {
@@ -1821,7 +1821,7 @@
   // where cpi->twopass.kf_group_bits is tied to cpi->twopass.bits_left.
   // This is also important for short clips where there may only be one
   // key frame.
-  if (cpi->twopass.frames_to_key >= (int)(cpi->twopass.total_stats.count -
+  if (cpi->rc.frames_to_key >= (int)(cpi->twopass.total_stats.count -
                                           cpi->common.current_video_frame)) {
     cpi->twopass.kf_group_bits =
       (cpi->twopass.bits_left > 0) ? cpi->twopass.bits_left : 0;
@@ -1857,7 +1857,8 @@
 
   // Assign  bits to the arf or gf.
   for (i = 0;
-      i <= (cpi->source_alt_ref_pending && cpi->common.frame_type != KEY_FRAME);
+      i <= (cpi->rc.source_alt_ref_pending &&
+            cpi->common.frame_type != KEY_FRAME);
       ++i) {
     int allocation_chunks;
     int q = cpi->rc.last_q[INTER_FRAME];
@@ -1868,7 +1869,7 @@
     // Set max and minimum boost and hence minimum allocation
     boost = clamp(boost, 125, (cpi->rc.baseline_gf_interval + 1) * 200);
 
-    if (cpi->source_alt_ref_pending && i == 0)
+    if (cpi->rc.source_alt_ref_pending && i == 0)
       allocation_chunks = ((cpi->rc.baseline_gf_interval + 1) * 100) + boost;
     else
       allocation_chunks = (cpi->rc.baseline_gf_interval * 100) + (boost - 100);
@@ -1921,7 +1922,7 @@
     if (i == 0) {
       cpi->twopass.gf_bits = gf_bits;
     }
-    if (i == 1 || (!cpi->source_alt_ref_pending
+    if (i == 1 || (!cpi->rc.source_alt_ref_pending
         && (cpi->common.frame_type != KEY_FRAME))) {
       // Per frame bit target for this frame
       cpi->rc.per_frame_bandwidth = gf_bits;
@@ -1940,7 +1941,7 @@
     // For normal GFs we want to remove the error score for the first frame
     // of the group (except in Key frame case where this has already
     // happened)
-    if (!cpi->source_alt_ref_pending && cpi->common.frame_type != KEY_FRAME)
+    if (!cpi->rc.source_alt_ref_pending && cpi->common.frame_type != KEY_FRAME)
       cpi->twopass.gf_group_error_left = (int64_t)(gf_group_err
                                                    - gf_first_frame_err);
     else
@@ -1956,7 +1957,7 @@
     // despite (MIN_GF_INTERVAL) and would cause a divide by 0 in the
     // calculation of alt_extra_bits.
     if (cpi->rc.baseline_gf_interval >= 3) {
-      const int boost = cpi->source_alt_ref_pending ?
+      const int boost = cpi->rc.source_alt_ref_pending ?
           b_boost : cpi->rc.gfu_boost;
 
       if (boost >= 150) {
@@ -2098,7 +2099,7 @@
     // estimate for the clip is bad, but helps prevent excessive
     // variation in Q, especially near the end of a clip
     // where for example a small overspend may cause Q to crash
-    adjust_maxq_qrange(cpi);
+    // adjust_maxq_qrange(cpi);
   }
   vp9_zero(this_frame);
   if (EOF == input_stats(cpi, &this_frame))
@@ -2108,7 +2109,7 @@
   this_frame_coded_error = this_frame.coded_error;
 
   // keyframe and section processing !
-  if (cpi->twopass.frames_to_key == 0) {
+  if (cpi->rc.frames_to_key == 0) {
     // Define next KF group and assign bits to it
     this_frame_copy = this_frame;
     find_next_key_frame(cpi, &this_frame_copy);
@@ -2119,8 +2120,6 @@
     // Define next gf group and assign bits to it
     this_frame_copy = this_frame;
 
-    cpi->gf_zeromotion_pct = 0;
-
 #if CONFIG_MULTIPLE_ARF
     if (cpi->multi_arf_enabled) {
       define_fixed_arf_period(cpi);
@@ -2131,7 +2130,7 @@
     }
 #endif
 
-    if (cpi->gf_zeromotion_pct > 995) {
+    if (cpi->twopass.gf_zeromotion_pct > 995) {
       // As long as max_thresh for encode breakout is small enough, it is ok
       // to enable it for no-show frame, i.e. set enable_encode_breakout to 2.
       if (!cpi->common.show_frame)
@@ -2146,7 +2145,8 @@
     // from that arf boost and it should not be given extra bits
     // If the previous group was NOT coded using arf we may want to apply
     // some boost to this GF as well
-    if (cpi->source_alt_ref_pending && (cpi->common.frame_type != KEY_FRAME)) {
+    if (cpi->rc.source_alt_ref_pending &&
+        cpi->common.frame_type != KEY_FRAME) {
       // Assign a standard frames worth of bits from those allocated
       // to the GF group
       int bak = cpi->rc.per_frame_bandwidth;
@@ -2178,7 +2178,7 @@
   if (cpi->target_bandwidth < 0)
     cpi->target_bandwidth = 0;
 
-  cpi->twopass.frames_to_key--;
+  cpi->rc.frames_to_key--;
 
   // Update the total stats remaining structure
   subtract_stats(&cpi->twopass.total_left_stats, &this_frame);
@@ -2274,6 +2274,7 @@
 
   return is_viable_kf;
 }
+
 static void find_next_key_frame(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) {
   int i, j;
   FIRSTPASS_STATS last_frame;
@@ -2300,15 +2301,15 @@
   cpi->common.frame_type = KEY_FRAME;
 
   // is this a forced key frame by interval
-  cpi->this_key_frame_forced = cpi->next_key_frame_forced;
+  cpi->rc.this_key_frame_forced = cpi->rc.next_key_frame_forced;
 
   // Clear the alt ref active flag as this can never be active on a key frame
-  cpi->source_alt_ref_active = 0;
+  cpi->rc.source_alt_ref_active = 0;
 
   // Kf is always a gf so clear frames till next gf counter
   cpi->rc.frames_till_gf_update_due = 0;
 
-  cpi->twopass.frames_to_key = 1;
+  cpi->rc.frames_to_key = 1;
 
   // Take a copy of the initial frame details
   first_frame = *this_frame;
@@ -2360,14 +2361,14 @@
         break;
 
       // Step on to the next frame
-      cpi->twopass.frames_to_key++;
+      cpi->rc.frames_to_key++;
 
       // If we don't have a real key frame within the next two
       // forcekeyframeevery intervals then break out of the loop.
-      if (cpi->twopass.frames_to_key >= 2 * (int)cpi->key_frame_frequency)
+      if (cpi->rc.frames_to_key >= 2 * (int)cpi->key_frame_frequency)
         break;
     } else {
-      cpi->twopass.frames_to_key++;
+      cpi->rc.frames_to_key++;
     }
     i++;
   }
@@ -2377,11 +2378,11 @@
   // This code centers the extra kf if the actual natural
   // interval is between 1x and 2x
   if (cpi->oxcf.auto_key
-      && cpi->twopass.frames_to_key > (int)cpi->key_frame_frequency) {
+      && cpi->rc.frames_to_key > (int)cpi->key_frame_frequency) {
     FIRSTPASS_STATS *current_pos = cpi->twopass.stats_in;
     FIRSTPASS_STATS tmp_frame;
 
-    cpi->twopass.frames_to_key /= 2;
+    cpi->rc.frames_to_key /= 2;
 
     // Copy first frame details
     tmp_frame = first_frame;
@@ -2394,7 +2395,7 @@
     kf_group_coded_err = 0;
 
     // Rescan to get the correct error data for the forced kf group
-    for (i = 0; i < cpi->twopass.frames_to_key; i++) {
+    for (i = 0; i < cpi->rc.frames_to_key; i++) {
       // Accumulate kf group errors
       kf_group_err += calculate_modified_err(cpi, &tmp_frame);
       kf_group_intra_err += tmp_frame.intra_error;
@@ -2407,9 +2408,9 @@
     // Reset to the start of the group
     reset_fpf_position(cpi, current_pos);
 
-    cpi->next_key_frame_forced = 1;
+    cpi->rc.next_key_frame_forced = 1;
   } else {
-    cpi->next_key_frame_forced = 0;
+    cpi->rc.next_key_frame_forced = 0;
   }
   // Special case for the last frame of the file
   if (cpi->twopass.stats_in >= cpi->twopass.stats_in_end) {
@@ -2439,7 +2440,7 @@
                                             cpi->twopass.modified_error_left));
 
     // Clip based on maximum per frame rate defined by the user.
-    max_grp_bits = (int64_t)max_bits * (int64_t)cpi->twopass.frames_to_key;
+    max_grp_bits = (int64_t)max_bits * (int64_t)cpi->rc.frames_to_key;
     if (cpi->twopass.kf_group_bits > max_grp_bits)
       cpi->twopass.kf_group_bits = max_grp_bits;
   } else {
@@ -2455,7 +2456,7 @@
   loop_decay_rate = 1.00;       // Starting decay rate
 
   // Scan through the kf group collating various stats.
-  for (i = 0; i < cpi->twopass.frames_to_key; i++) {
+  for (i = 0; i < cpi->rc.frames_to_key; i++) {
     double r;
 
     if (EOF == input_stats(cpi, &next_frame))
@@ -2498,7 +2499,7 @@
     zero_stats(&sectionstats);
     reset_fpf_position(cpi, start_position);
 
-    for (i = 0; i < cpi->twopass.frames_to_key; i++) {
+    for (i = 0; i < cpi->rc.frames_to_key; i++) {
       input_stats(cpi, &next_frame);
       accumulate_stats(&sectionstats, &next_frame);
     }
@@ -2519,8 +2520,8 @@
     int allocation_chunks;
     int alt_kf_bits;
 
-    if (kf_boost < (cpi->twopass.frames_to_key * 3))
-      kf_boost = (cpi->twopass.frames_to_key * 3);
+    if (kf_boost < (cpi->rc.frames_to_key * 3))
+      kf_boost = (cpi->rc.frames_to_key * 3);
 
     if (kf_boost < 300)  // Min KF boost
       kf_boost = 300;
@@ -2528,7 +2529,7 @@
     // Make a note of baseline boost and the zero motion
     // accumulator value for use elsewhere.
     cpi->rc.kf_boost = kf_boost;
-    cpi->kf_zeromotion_pct = (int)(zero_motion_accumulator * 100.0);
+    cpi->twopass.kf_zeromotion_pct = (int)(zero_motion_accumulator * 100.0);
 
     // We do three calculations for kf size.
     // The first is based on the error score for the whole kf group.
@@ -2540,14 +2541,14 @@
     // Special case if the sequence appears almost totaly static
     // In this case we want to spend almost all of the bits on the
     // key frame.
-    // cpi->twopass.frames_to_key-1 because key frame itself is taken
+    // cpi->rc.frames_to_key-1 because key frame itself is taken
     // care of by kf_boost.
     if (zero_motion_accumulator >= 0.99) {
       allocation_chunks =
-        ((cpi->twopass.frames_to_key - 1) * 10) + kf_boost;
+        ((cpi->rc.frames_to_key - 1) * 10) + kf_boost;
     } else {
       allocation_chunks =
-        ((cpi->twopass.frames_to_key - 1) * 100) + kf_boost;
+        ((cpi->rc.frames_to_key - 1) * 100) + kf_boost;
     }
 
     // Prevent overflow
@@ -2569,10 +2570,10 @@
     // kf group (which does sometimes happen... eg a blank intro frame)
     // Then use an alternate calculation based on the kf error score
     // which should give a smaller key frame.
-    if (kf_mod_err < kf_group_err / cpi->twopass.frames_to_key) {
+    if (kf_mod_err < kf_group_err / cpi->rc.frames_to_key) {
       double  alt_kf_grp_bits =
         ((double)cpi->twopass.bits_left *
-         (kf_mod_err * (double)cpi->twopass.frames_to_key) /
+         (kf_mod_err * (double)cpi->rc.frames_to_key) /
          DOUBLE_DIVIDE_CHECK(cpi->twopass.modified_error_left));
 
       alt_kf_bits = (int)((double)kf_boost *
diff --git a/vp9/encoder/vp9_mbgraph.c b/vp9/encoder/vp9_mbgraph.c
index e2ef256..32ab3fc 100644
--- a/vp9/encoder/vp9_mbgraph.c
+++ b/vp9/encoder/vp9_mbgraph.c
@@ -152,7 +152,8 @@
     xd->mi_8x8[0]->mbmi.mode = mode;
     vp9_predict_intra_block(xd, 0, 2, TX_16X16, mode,
                             x->plane[0].src.buf, x->plane[0].src.stride,
-                            xd->plane[0].dst.buf, xd->plane[0].dst.stride);
+                            xd->plane[0].dst.buf, xd->plane[0].dst.stride,
+                            0, 0, 0);
     err = vp9_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride,
                        xd->plane[0].dst.buf, xd->plane[0].dst.stride, best_err);
 
@@ -398,8 +399,7 @@
   // being a GF - so exit if we don't look ahead beyond that
   if (n_frames <= cpi->rc.frames_till_gf_update_due)
     return;
-  if (n_frames > (int)cpi->frames_till_alt_ref_frame)
-    n_frames = cpi->frames_till_alt_ref_frame;
+
   if (n_frames > MAX_LAG_BUFFERS)
     n_frames = MAX_LAG_BUFFERS;
 
diff --git a/vp9/encoder/vp9_mcomp.c b/vp9/encoder/vp9_mcomp.c
index 87b5988..382ccb0 100644
--- a/vp9/encoder/vp9_mcomp.c
+++ b/vp9/encoder/vp9_mcomp.c
@@ -688,7 +688,7 @@
     if (thissad < bestsad)\
     {\
       if (use_mvcost) \
-        thissad += mvsad_err_cost(&this_mv, &fcenter_mv.as_mv, \
+        thissad += mvsad_err_cost(&this_mv, &fcenter_mv, \
                                   mvjsadcost, mvsadcost, \
                                   sad_per_bit);\
       if (thissad < bestsad)\
@@ -741,13 +741,13 @@
   int k = -1;
   int all_in;
   int best_site = -1;
-  int_mv fcenter_mv;
+  MV fcenter_mv;
   int best_init_s = search_param_to_steps[search_param];
   int *mvjsadcost = x->nmvjointsadcost;
   int *mvsadcost[2] = {x->nmvsadcost[0], x->nmvsadcost[1]};
 
-  fcenter_mv.as_mv.row = center_mv->row >> 3;
-  fcenter_mv.as_mv.col = center_mv->col >> 3;
+  fcenter_mv.row = center_mv->row >> 3;
+  fcenter_mv.col = center_mv->col >> 3;
 
   // adjust ref_mv to make sure it is within MV range
   clamp_mv(ref_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max);
@@ -760,7 +760,7 @@
   this_mv.row = br;
   this_mv.col = bc;
   bestsad = vfp->sdf(what, what_stride, this_offset, in_what_stride, 0x7fffffff)
-                + mvsad_err_cost(&this_mv, &fcenter_mv.as_mv,
+                + mvsad_err_cost(&this_mv, &fcenter_mv,
                                  mvjsadcost, mvsadcost, sad_per_bit);
 
   // Search all possible scales upto the search param around the center point
@@ -1212,13 +1212,13 @@
 
   uint8_t *check_here;
   int thissad;
-  int_mv fcenter_mv;
+  MV fcenter_mv;
 
   int *mvjsadcost = x->nmvjointsadcost;
   int *mvsadcost[2] = {x->nmvsadcost[0], x->nmvsadcost[1]};
 
-  fcenter_mv.as_mv.row = center_mv->row >> 3;
-  fcenter_mv.as_mv.col = center_mv->col >> 3;
+  fcenter_mv.row = center_mv->row >> 3;
+  fcenter_mv.col = center_mv->col >> 3;
 
   clamp_mv(ref_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max);
   ref_row = ref_mv->row;
@@ -1234,7 +1234,7 @@
 
   // Check the starting position
   bestsad = fn_ptr->sdf(what, what_stride, in_what, in_what_stride, 0x7fffffff)
-                + mvsad_err_cost(best_mv, &fcenter_mv.as_mv,
+                + mvsad_err_cost(best_mv, &fcenter_mv,
                                  mvjsadcost, mvsadcost, sad_per_bit);
 
   // search_param determines the length of the initial step and hence the number
@@ -1263,7 +1263,7 @@
         if (thissad < bestsad) {
           this_mv.row = this_row_offset;
           this_mv.col = this_col_offset;
-          thissad += mvsad_err_cost(&this_mv, &fcenter_mv.as_mv,
+          thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
                                     mvjsadcost, mvsadcost, sad_per_bit);
 
           if (thissad < bestsad) {
@@ -1295,7 +1295,7 @@
           if (thissad < bestsad) {
             this_mv.row = this_row_offset;
             this_mv.col = this_col_offset;
-            thissad += mvsad_err_cost(&this_mv, &fcenter_mv.as_mv,
+            thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
                                       mvjsadcost, mvsadcost, sad_per_bit);
             if (thissad < bestsad) {
               bestsad = thissad;
@@ -1356,13 +1356,13 @@
 
   uint8_t *check_here;
   unsigned int thissad;
-  int_mv fcenter_mv;
+  MV fcenter_mv;
 
   int *mvjsadcost = x->nmvjointsadcost;
   int *mvsadcost[2] = {x->nmvsadcost[0], x->nmvsadcost[1]};
 
-  fcenter_mv.as_mv.row = center_mv->row >> 3;
-  fcenter_mv.as_mv.col = center_mv->col >> 3;
+  fcenter_mv.row = center_mv->row >> 3;
+  fcenter_mv.col = center_mv->col >> 3;
 
   clamp_mv(ref_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max);
   ref_row = ref_mv->row;
@@ -1378,7 +1378,7 @@
 
   // Check the starting position
   bestsad = fn_ptr->sdf(what, what_stride, in_what, in_what_stride, 0x7fffffff)
-                + mvsad_err_cost(best_mv, &fcenter_mv.as_mv,
+                + mvsad_err_cost(best_mv, &fcenter_mv,
                                  mvjsadcost, mvsadcost, sad_per_bit);
 
   // search_param determines the length of the initial step and hence the number
@@ -1420,7 +1420,7 @@
           if (sad_array[t] < bestsad) {
             this_mv.row = best_mv->row + ss[i].mv.row;
             this_mv.col = best_mv->col + ss[i].mv.col;
-            sad_array[t] += mvsad_err_cost(&this_mv, &fcenter_mv.as_mv,
+            sad_array[t] += mvsad_err_cost(&this_mv, &fcenter_mv,
                                            mvjsadcost, mvsadcost, sad_per_bit);
 
             if (sad_array[t] < bestsad) {
@@ -1447,7 +1447,7 @@
           if (thissad < bestsad) {
             this_mv.row = this_row_offset;
             this_mv.col = this_col_offset;
-            thissad += mvsad_err_cost(&this_mv, &fcenter_mv.as_mv,
+            thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
                                       mvjsadcost, mvsadcost, sad_per_bit);
 
             if (thissad < bestsad) {
@@ -1478,7 +1478,7 @@
           if (thissad < bestsad) {
             this_mv.row = this_row_offset;
             this_mv.col = this_col_offset;
-            thissad += mvsad_err_cost(&this_mv, &fcenter_mv.as_mv,
+            thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
                                       mvjsadcost, mvsadcost, sad_per_bit);
             if (thissad < bestsad) {
               bestsad = thissad;
@@ -1585,7 +1585,7 @@
   int in_what_stride = xd->plane[0].pre[0].stride;
   int mv_stride = xd->plane[0].pre[0].stride;
   uint8_t *bestaddress;
-  int_mv *best_mv = &x->e_mbd.mi_8x8[0]->bmi[n].as_mv[0];
+  MV *best_mv = &x->e_mbd.mi_8x8[0]->bmi[n].as_mv[0].as_mv;
   MV this_mv;
   int bestsad = INT_MAX;
   int r, c;
@@ -1612,13 +1612,13 @@
   in_what = xd->plane[0].pre[0].buf;
   bestaddress = in_what + (ref_row * xd->plane[0].pre[0].stride) + ref_col;
 
-  best_mv->as_mv.row = ref_row;
-  best_mv->as_mv.col = ref_col;
+  best_mv->row = ref_row;
+  best_mv->col = ref_col;
 
   // Baseline value at the centre
   bestsad = fn_ptr->sdf(what, what_stride, bestaddress,
                         in_what_stride, 0x7fffffff)
-                           + mvsad_err_cost(&best_mv->as_mv, &fcenter_mv,
+                           + mvsad_err_cost(best_mv, &fcenter_mv,
                                             mvjsadcost, mvsadcost, sad_per_bit);
 
   // Apply further limits to prevent us looking using vectors that stretch
@@ -1642,8 +1642,8 @@
 
       if (thissad < bestsad) {
         bestsad = thissad;
-        best_mv->as_mv.row = r;
-        best_mv->as_mv.col = c;
+        best_mv->row = r;
+        best_mv->col = c;
         bestaddress = check_here;
       }
 
@@ -1651,8 +1651,8 @@
     }
   }
 
-  this_mv.row = best_mv->as_mv.row * 8;
-  this_mv.col = best_mv->as_mv.col * 8;
+  this_mv.row = best_mv->row * 8;
+  this_mv.col = best_mv->col * 8;
 
   if (bestsad < INT_MAX)
     return fn_ptr->vf(what, what_stride, bestaddress, in_what_stride,
@@ -1674,7 +1674,7 @@
   int in_what_stride = xd->plane[0].pre[0].stride;
   int mv_stride = xd->plane[0].pre[0].stride;
   uint8_t *bestaddress;
-  int_mv *best_mv = &x->e_mbd.mi_8x8[0]->bmi[n].as_mv[0];
+  MV *best_mv = &x->e_mbd.mi_8x8[0]->bmi[n].as_mv[0].as_mv;
   MV this_mv;
   unsigned int bestsad = INT_MAX;
   int r, c;
@@ -1703,13 +1703,13 @@
   in_what = xd->plane[0].pre[0].buf;
   bestaddress = in_what + (ref_row * xd->plane[0].pre[0].stride) + ref_col;
 
-  best_mv->as_mv.row = ref_row;
-  best_mv->as_mv.col = ref_col;
+  best_mv->row = ref_row;
+  best_mv->col = ref_col;
 
   // Baseline value at the centre
   bestsad = fn_ptr->sdf(what, what_stride,
                         bestaddress, in_what_stride, 0x7fffffff)
-            + mvsad_err_cost(&best_mv->as_mv, &fcenter_mv,
+            + mvsad_err_cost(best_mv, &fcenter_mv,
                              mvjsadcost, mvsadcost, sad_per_bit);
 
   // Apply further limits to prevent us looking using vectors that stretch
@@ -1739,8 +1739,8 @@
 
           if (thissad < bestsad) {
             bestsad = thissad;
-            best_mv->as_mv.row = r;
-            best_mv->as_mv.col = c;
+            best_mv->row = r;
+            best_mv->col = c;
             bestaddress = check_here;
           }
         }
@@ -1761,8 +1761,8 @@
 
         if (thissad < bestsad) {
           bestsad = thissad;
-          best_mv->as_mv.row = r;
-          best_mv->as_mv.col = c;
+          best_mv->row = r;
+          best_mv->col = c;
           bestaddress = check_here;
         }
       }
@@ -1772,8 +1772,8 @@
     }
   }
 
-  this_mv.row = best_mv->as_mv.row * 8;
-  this_mv.col = best_mv->as_mv.col * 8;
+  this_mv.row = best_mv->row * 8;
+  this_mv.col = best_mv->col * 8;
 
   if (bestsad < INT_MAX)
     return fn_ptr->vf(what, what_stride, bestaddress, in_what_stride,
@@ -1796,7 +1796,7 @@
   int in_what_stride = xd->plane[0].pre[0].stride;
   int mv_stride = xd->plane[0].pre[0].stride;
   uint8_t *bestaddress;
-  int_mv *best_mv = &x->e_mbd.mi_8x8[0]->bmi[n].as_mv[0];
+  MV *best_mv = &x->e_mbd.mi_8x8[0]->bmi[n].as_mv[0].as_mv;
   MV this_mv;
   unsigned int bestsad = INT_MAX;
   int r, c;
@@ -1826,13 +1826,13 @@
   in_what = xd->plane[0].pre[0].buf;
   bestaddress = in_what + (ref_row * xd->plane[0].pre[0].stride) + ref_col;
 
-  best_mv->as_mv.row = ref_row;
-  best_mv->as_mv.col = ref_col;
+  best_mv->row = ref_row;
+  best_mv->col = ref_col;
 
-  // Baseline value at the centre
+  // Baseline value at the center
   bestsad = fn_ptr->sdf(what, what_stride,
                         bestaddress, in_what_stride, 0x7fffffff)
-            + mvsad_err_cost(&best_mv->as_mv, &fcenter_mv,
+            + mvsad_err_cost(best_mv, &fcenter_mv,
                              mvjsadcost, mvsadcost, sad_per_bit);
 
   // Apply further limits to prevent us looking using vectors that stretch
@@ -1862,8 +1862,8 @@
 
           if (thissad < bestsad) {
             bestsad = thissad;
-            best_mv->as_mv.row = r;
-            best_mv->as_mv.col = c;
+            best_mv->row = r;
+            best_mv->col = c;
             bestaddress = check_here;
           }
         }
@@ -1888,8 +1888,8 @@
 
           if (thissad < bestsad) {
             bestsad = thissad;
-            best_mv->as_mv.row = r;
-            best_mv->as_mv.col = c;
+            best_mv->row = r;
+            best_mv->col = c;
             bestaddress = check_here;
           }
         }
@@ -1910,8 +1910,8 @@
 
         if (thissad < bestsad) {
           bestsad = thissad;
-          best_mv->as_mv.row = r;
-          best_mv->as_mv.col = c;
+          best_mv->row = r;
+          best_mv->col = c;
           bestaddress = check_here;
         }
       }
@@ -1921,8 +1921,8 @@
     }
   }
 
-  this_mv.row = best_mv->as_mv.row * 8;
-  this_mv.col = best_mv->as_mv.col * 8;
+  this_mv.row = best_mv->row * 8;
+  this_mv.col = best_mv->col * 8;
 
   if (bestsad < INT_MAX)
     return fn_ptr->vf(what, what_stride, bestaddress, in_what_stride,
@@ -2136,9 +2136,9 @@
  * mode.
  */
 int vp9_refining_search_8p_c(MACROBLOCK *x,
-                             int_mv *ref_mv, int error_per_bit,
+                             MV *ref_mv, int error_per_bit,
                              int search_range, vp9_variance_fn_ptr_t *fn_ptr,
-                             int *mvjcost, int *mvcost[2], int_mv *center_mv,
+                             int *mvjcost, int *mvcost[2], const MV *center_mv,
                              const uint8_t *second_pred, int w, int h) {
   const MACROBLOCKD* const xd = &x->e_mbd;
   MV neighbors[8] = {{-1, 0}, {0, -1}, {0, 1}, {1, 0},
@@ -2150,32 +2150,32 @@
   int in_what_stride = xd->plane[0].pre[0].stride;
   uint8_t *what = x->plane[0].src.buf;
   uint8_t *best_address = xd->plane[0].pre[0].buf +
-                          (ref_mv->as_mv.row * xd->plane[0].pre[0].stride) +
-                          ref_mv->as_mv.col;
+                          (ref_mv->row * xd->plane[0].pre[0].stride) +
+                          ref_mv->col;
   uint8_t *check_here;
   unsigned int thissad;
-  int_mv this_mv;
+  MV this_mv;
   unsigned int bestsad = INT_MAX;
-  int_mv fcenter_mv;
+  MV fcenter_mv;
 
   int *mvjsadcost = x->nmvjointsadcost;
   int *mvsadcost[2] = {x->nmvsadcost[0], x->nmvsadcost[1]};
 
-  fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
-  fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
+  fcenter_mv.row = center_mv->row >> 3;
+  fcenter_mv.col = center_mv->col >> 3;
 
   /* Get compound pred by averaging two pred blocks. */
   bestsad = fn_ptr->sdaf(what, what_stride, best_address, in_what_stride,
                          second_pred, 0x7fffffff) +
-      mvsad_err_cost(&ref_mv->as_mv, &fcenter_mv.as_mv,
+      mvsad_err_cost(ref_mv, &fcenter_mv,
                      mvjsadcost, mvsadcost, error_per_bit);
 
   for (i = 0; i < search_range; i++) {
     int best_site = -1;
 
     for (j = 0; j < 8; j++) {
-      this_row_offset = ref_mv->as_mv.row + neighbors[j].row;
-      this_col_offset = ref_mv->as_mv.col + neighbors[j].col;
+      this_row_offset = ref_mv->row + neighbors[j].row;
+      this_col_offset = ref_mv->col + neighbors[j].col;
 
       if ((this_col_offset > x->mv_col_min) &&
           (this_col_offset < x->mv_col_max) &&
@@ -2189,9 +2189,9 @@
                                second_pred, bestsad);
 
         if (thissad < bestsad) {
-          this_mv.as_mv.row = this_row_offset;
-          this_mv.as_mv.col = this_col_offset;
-          thissad += mvsad_err_cost(&this_mv.as_mv, &fcenter_mv.as_mv,
+          this_mv.row = this_row_offset;
+          this_mv.col = this_col_offset;
+          thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
                                     mvjsadcost, mvsadcost, error_per_bit);
           if (thissad < bestsad) {
             bestsad = thissad;
@@ -2204,22 +2204,22 @@
     if (best_site == -1) {
       break;
     } else {
-      ref_mv->as_mv.row += neighbors[best_site].row;
-      ref_mv->as_mv.col += neighbors[best_site].col;
+      ref_mv->row += neighbors[best_site].row;
+      ref_mv->col += neighbors[best_site].col;
       best_address += (neighbors[best_site].row) * in_what_stride +
           neighbors[best_site].col;
     }
   }
 
-  this_mv.as_mv.row = ref_mv->as_mv.row * 8;
-  this_mv.as_mv.col = ref_mv->as_mv.col * 8;
+  this_mv.row = ref_mv->row * 8;
+  this_mv.col = ref_mv->col * 8;
 
   if (bestsad < INT_MAX) {
     // FIXME(rbultje, yunqing): add full-pixel averaging variance functions
     // so we don't have to use the subpixel with xoff=0,yoff=0 here.
     return fn_ptr->svaf(best_address, in_what_stride, 0, 0, what, what_stride,
                         (unsigned int *)(&thissad), second_pred) +
-                        mv_err_cost(&this_mv.as_mv, &center_mv->as_mv,
+                        mv_err_cost(&this_mv, center_mv,
                                     mvjcost, mvcost, x->errorperbit);
   } else {
     return INT_MAX;
diff --git a/vp9/encoder/vp9_mcomp.h b/vp9/encoder/vp9_mcomp.h
index c574e61..f9d1f90 100644
--- a/vp9/encoder/vp9_mcomp.h
+++ b/vp9/encoder/vp9_mcomp.h
@@ -124,9 +124,9 @@
                                        const MV *center_mv);
 
 int vp9_refining_search_8p_c(MACROBLOCK *x,
-                             int_mv *ref_mv, int error_per_bit,
+                             MV *ref_mv, int error_per_bit,
                              int search_range, vp9_variance_fn_ptr_t *fn_ptr,
                              int *mvjcost, int *mvcost[2],
-                             int_mv *center_mv, const uint8_t *second_pred,
+                             const MV *center_mv, const uint8_t *second_pred,
                              int w, int h);
 #endif  // VP9_ENCODER_VP9_MCOMP_H_
diff --git a/vp9/encoder/vp9_onyx_if.c b/vp9/encoder/vp9_onyx_if.c
index 3ca8af3..5bbd42b 100644
--- a/vp9/encoder/vp9_onyx_if.c
+++ b/vp9/encoder/vp9_onyx_if.c
@@ -302,7 +302,7 @@
 
   if (cm->frame_type == KEY_FRAME ||
       cpi->refresh_alt_ref_frame ||
-      (cpi->refresh_golden_frame && !cpi->is_src_frame_alt_ref)) {
+      (cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref)) {
     // Clear down the segment map
     vpx_memset(cpi->segmentation_map, 0, cm->mi_rows * cm->mi_cols);
 
@@ -329,7 +329,6 @@
     }
   }
 }
-
 static void configure_static_seg_features(VP9_COMP *cpi) {
   VP9_COMMON *cm = &cpi->common;
   struct segmentation *seg = &cm->seg;
@@ -389,7 +388,7 @@
     // First normal frame in a valid gf or alt ref group
     if (cpi->rc.frames_since_golden == 0) {
       // Set up segment features for normal frames in an arf group
-      if (cpi->source_alt_ref_active) {
+      if (cpi->rc.source_alt_ref_active) {
         seg->update_map = 0;
         seg->update_data = 1;
         seg->abs_delta = SEGMENT_DELTADATA;
@@ -421,7 +420,7 @@
 
         vp9_clearall_segfeatures(seg);
       }
-    } else if (cpi->is_src_frame_alt_ref) {
+    } else if (cpi->rc.is_src_frame_alt_ref) {
       // Special case where we are coding over the top of a previous
       // alt ref frame.
       // Segment coding disabled for compred testing
@@ -1048,6 +1047,7 @@
 
   return 63;
 };
+
 void vp9_new_framerate(VP9_COMP *cpi, double framerate) {
   if (framerate < 0.1)
     framerate = 30;
@@ -1124,7 +1124,15 @@
   // Initialize active best and worst q and average q values.
   cpi->rc.active_worst_quality      = cpi->oxcf.worst_allowed_q;
 
-  cpi->rc.avg_frame_qindex          = cpi->oxcf.worst_allowed_q;
+  cpi->rc.avg_frame_qindex[0]       = (cpi->oxcf.worst_allowed_q +
+                                       cpi->oxcf.best_allowed_q) / 2;
+  cpi->rc.avg_frame_qindex[1]       = (cpi->oxcf.worst_allowed_q +
+                                       cpi->oxcf.best_allowed_q) / 2;
+  cpi->rc.avg_frame_qindex[2]       = (cpi->oxcf.worst_allowed_q +
+                                       cpi->oxcf.best_allowed_q) / 2;
+  cpi->rc.last_q[0]                 = cpi->oxcf.best_allowed_q;
+  cpi->rc.last_q[1]                 = cpi->oxcf.best_allowed_q;
+  cpi->rc.last_q[2]                 = cpi->oxcf.best_allowed_q;
 
   // Initialise the starting buffer levels
   cpi->rc.buffer_level              = cpi->oxcf.starting_buffer_level;
@@ -1288,6 +1296,7 @@
   if (cpi->oxcf.fixed_q >= 0) {
     cpi->rc.last_q[0] = cpi->oxcf.fixed_q;
     cpi->rc.last_q[1] = cpi->oxcf.fixed_q;
+    cpi->rc.last_q[2] = cpi->oxcf.fixed_q;
     cpi->rc.last_boosted_qindex = cpi->oxcf.fixed_q;
   }
 
@@ -1307,7 +1316,7 @@
 #else
   cpi->alt_ref_source = NULL;
 #endif
-  cpi->is_src_frame_alt_ref = 0;
+  cpi->rc.is_src_frame_alt_ref = 0;
 
 #if 0
   // Experimental RD Code
@@ -1567,14 +1576,14 @@
 
   /*Initialize the feed-forward activity masking.*/
   cpi->activity_avg = 90 << 12;
-
-  cpi->frames_since_key = 8;  // Sensible default for first frame.
   cpi->key_frame_frequency = cpi->oxcf.key_freq;
-  cpi->this_key_frame_forced = 0;
-  cpi->next_key_frame_forced = 0;
 
-  cpi->source_alt_ref_pending = 0;
-  cpi->source_alt_ref_active = 0;
+  cpi->rc.frames_since_key = 8;  // Sensible default for first frame.
+  cpi->rc.this_key_frame_forced = 0;
+  cpi->rc.next_key_frame_forced = 0;
+
+  cpi->rc.source_alt_ref_pending = 0;
+  cpi->rc.source_alt_ref_active = 0;
   cpi->refresh_alt_ref_frame = 0;
 
 #if CONFIG_MULTIPLE_ARF
@@ -1627,7 +1636,6 @@
   cpi->first_time_stamp_ever = INT64_MAX;
 
   cpi->rc.frames_till_gf_update_due      = 0;
-  cpi->rc.key_frame_count              = 1;
 
   cpi->rc.ni_av_qi                     = cpi->oxcf.worst_allowed_q;
   cpi->rc.ni_tot_qi                    = 0;
@@ -1652,9 +1660,6 @@
   cpi->mb.nmvsadcost_hp[1] = &cpi->mb.nmvsadcosts_hp[1][MV_MAX];
   cal_nmvsadcosts_hp(cpi->mb.nmvsadcost_hp);
 
-  for (i = 0; i < KEY_FRAME_CONTEXT; i++)
-    cpi->rc.prior_key_frame_distance[i] = (int)cpi->output_framerate;
-
 #ifdef OUTPUT_YUV_SRC
   yuv_file = fopen("bd.yuv", "ab");
 #endif
@@ -2309,11 +2314,12 @@
   if (!cpi->multi_arf_enabled)
 #endif
     // Clear the alternate reference update pending flag.
-    cpi->source_alt_ref_pending = 0;
+    cpi->rc.source_alt_ref_pending = 0;
 
   // Set the alternate reference frame active flag
-  cpi->source_alt_ref_active = 1;
+  cpi->rc.source_alt_ref_active = 1;
 }
+
 static void update_golden_frame_stats(VP9_COMP *cpi) {
   // Update the Golden frame usage counts.
   if (cpi->refresh_golden_frame) {
@@ -2326,7 +2332,7 @@
     // set a flag to say so.
     if (cpi->oxcf.fixed_q >= 0 &&
         cpi->oxcf.play_alternate && !cpi->refresh_alt_ref_frame) {
-      cpi->source_alt_ref_pending = 1;
+      cpi->rc.source_alt_ref_pending = 1;
       cpi->rc.frames_till_gf_update_due = cpi->rc.baseline_gf_interval;
 
       // TODO(ivan): For SVC encoder, GF automatic update is disabled by using
@@ -2336,8 +2342,8 @@
       }
     }
 
-    if (!cpi->source_alt_ref_pending)
-      cpi->source_alt_ref_active = 0;
+    if (!cpi->rc.source_alt_ref_pending)
+      cpi->rc.source_alt_ref_active = 0;
 
     // Decrement count down till next gf
     if (cpi->rc.frames_till_gf_update_due > 0)
@@ -2348,9 +2354,6 @@
     if (cpi->rc.frames_till_gf_update_due > 0)
       cpi->rc.frames_till_gf_update_due--;
 
-    if (cpi->frames_till_alt_ref_frame)
-      cpi->frames_till_alt_ref_frame--;
-
     cpi->rc.frames_since_golden++;
   }
 }
@@ -2642,7 +2645,7 @@
         (double)cpi->twopass.bits_left /
             (1 + cpi->twopass.total_left_stats.coded_error),
         cpi->tot_recode_hits, recon_err, cpi->rc.kf_boost,
-        cpi->kf_zeromotion_pct);
+        cpi->twopass.kf_zeromotion_pct);
 
   fclose(f);
 
@@ -2736,7 +2739,7 @@
       loop = 0;
     } else {
       // Special case handling for forced key frames
-      if ((cm->frame_type == KEY_FRAME) && cpi->this_key_frame_forced) {
+      if ((cm->frame_type == KEY_FRAME) && cpi->rc.this_key_frame_forced) {
         int last_q = *q;
         int kf_err = vp9_calc_ss_err(cpi->Source, get_frame_new_buffer(cm));
 
@@ -2850,7 +2853,7 @@
       }
     }
 
-    if (cpi->is_src_frame_alt_ref)
+    if (cpi->rc.is_src_frame_alt_ref)
       loop = 0;
 
     if (loop) {
@@ -2873,7 +2876,6 @@
   int frame_over_shoot_limit;
   int frame_under_shoot_limit;
   int top_index;
-  int top_index_prop;
   int bottom_index;
 
   SPEED_FEATURES *const sf = &cpi->sf;
@@ -2912,14 +2914,14 @@
   cpi->zbin_mode_boost_enabled = 0;
 
   // Current default encoder behavior for the altref sign bias.
-  cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = cpi->source_alt_ref_active;
+  cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = cpi->rc.source_alt_ref_active;
 
   // Check to see if a key frame is signaled.
   // For two pass with auto key frame enabled cm->frame_type may already be
   // set, but not for one pass.
   if ((cm->current_video_frame == 0) ||
       (cm->frame_flags & FRAMEFLAGS_KEY) ||
-      (cpi->oxcf.auto_key && (cpi->frames_since_key %
+      (cpi->oxcf.auto_key && (cpi->rc.frames_since_key %
                               cpi->key_frame_frequency == 0))) {
     // Set frame type to key frame for the force key frame, if we exceed the
     // maximum distance in an automatic keyframe selection or for the first
@@ -2962,7 +2964,7 @@
     }
 
     // The alternate reference frame cannot be active for a key frame.
-    cpi->source_alt_ref_active = 0;
+    cpi->rc.source_alt_ref_active = 0;
 
     cm->error_resilient_mode = (cpi->oxcf.error_resilient_mode != 0);
     cm->frame_parallel_decoding_mode =
@@ -3029,8 +3031,7 @@
   // Decide q and q bounds
   q = vp9_rc_pick_q_and_adjust_q_bounds(cpi,
                                         &bottom_index,
-                                        &top_index,
-                                        &top_index_prop);
+                                        &top_index);
 
   if (!frame_is_intra_only(cm)) {
     cm->mcomp_filter_type = DEFAULT_INTERP_FILTER;
@@ -3050,7 +3051,7 @@
   // Special case code to reduce pulsing when key frames are forced at a
   // fixed interval. Note the reconstruction error if it is the frame before
   // the force key frame
-  if (cpi->next_key_frame_forced && (cpi->twopass.frames_to_key == 0)) {
+  if (cpi->rc.next_key_frame_forced && (cpi->rc.frames_to_key == 0)) {
     cpi->ambient_err = vp9_calc_ss_err(cpi->Source, get_frame_new_buffer(cm));
   }
 
@@ -3107,7 +3108,6 @@
     vp9_copy(counts->comp_inter, cpi->comp_inter_count);
     vp9_copy(counts->single_ref, cpi->single_ref_count);
     vp9_copy(counts->comp_ref, cpi->comp_ref_count);
-    counts->mv = cpi->NMVcount;
     if (!cpi->common.error_resilient_mode &&
         !cpi->common.frame_parallel_decoding_mode) {
       vp9_adapt_mode_probs(&cpi->common);
@@ -3123,7 +3123,7 @@
    * needed in motion search besides loopfilter */
   cm->last_frame_type = cm->frame_type;
 
-  vp9_rc_postencode_update(cpi, *size, top_index_prop);
+  vp9_rc_postencode_update(cpi, *size);
 
 #if 0
   output_frame_level_debug_stats(cpi);
@@ -3188,6 +3188,8 @@
 
     // As this frame is a key frame the next defaults to an inter frame.
     cm->frame_type = INTER_FRAME;
+    vp9_clear_system_state();
+    cpi->rc.frames_since_key = 0;
   } else {
     *frame_flags = cm->frame_flags&~FRAMEFLAGS_KEY;
 
@@ -3237,7 +3239,7 @@
     // Don't increment frame counters if this was an altref buffer
     // update not a real frame
     ++cm->current_video_frame;
-    ++cpi->frames_since_key;
+    ++cpi->rc.frames_since_key;
   }
   // restore prev_mi
   cm->prev_mi = cm->prev_mip + cm->mode_info_stride + 1;
@@ -3344,7 +3346,7 @@
   set_high_precision_mv(cpi, ALTREF_HIGH_PRECISION_MV);
 
   // Should we code an alternate reference frame.
-  if (cpi->oxcf.play_alternate && cpi->source_alt_ref_pending) {
+  if (cpi->oxcf.play_alternate && cpi->rc.source_alt_ref_pending) {
     int frames_to_arf;
 
 #if CONFIG_MULTIPLE_ARF
@@ -3358,7 +3360,7 @@
 #endif
       frames_to_arf = cpi->rc.frames_till_gf_update_due;
 
-    assert(frames_to_arf < cpi->twopass.frames_to_key);
+    assert(frames_to_arf < cpi->rc.frames_to_key);
 
     if ((cpi->source = vp9_lookahead_peek(cpi->lookahead, frames_to_arf))) {
 #if CONFIG_MULTIPLE_ARF
@@ -3382,15 +3384,12 @@
       cpi->refresh_alt_ref_frame = 1;
       cpi->refresh_golden_frame = 0;
       cpi->refresh_last_frame = 0;
-      cpi->is_src_frame_alt_ref = 0;
-
-      // TODO(agrange) This needs to vary depending on where the next ARF is.
-      cpi->frames_till_alt_ref_frame = frames_to_arf;
+      cpi->rc.is_src_frame_alt_ref = 0;
 
 #if CONFIG_MULTIPLE_ARF
       if (!cpi->multi_arf_enabled)
 #endif
-        cpi->source_alt_ref_pending = 0;   // Clear Pending altf Ref flag.
+        cpi->rc.source_alt_ref_pending = 0;   // Clear Pending altf Ref flag.
     }
   }
 
@@ -3404,19 +3403,19 @@
 
 #if CONFIG_MULTIPLE_ARF
       // Is this frame the ARF overlay.
-      cpi->is_src_frame_alt_ref = 0;
+      cpi->rc.is_src_frame_alt_ref = 0;
       for (i = 0; i < cpi->arf_buffered; ++i) {
         if (cpi->source == cpi->alt_ref_source[i]) {
-          cpi->is_src_frame_alt_ref = 1;
+          cpi->rc.is_src_frame_alt_ref = 1;
           cpi->refresh_golden_frame = 1;
           break;
         }
       }
 #else
-      cpi->is_src_frame_alt_ref = cpi->alt_ref_source
-                                  && (cpi->source == cpi->alt_ref_source);
+      cpi->rc.is_src_frame_alt_ref = cpi->alt_ref_source
+          && (cpi->source == cpi->alt_ref_source);
 #endif
-      if (cpi->is_src_frame_alt_ref) {
+      if (cpi->rc.is_src_frame_alt_ref) {
         // Current frame is an ARF overlay frame.
 #if CONFIG_MULTIPLE_ARF
         cpi->alt_ref_source[i] = NULL;
@@ -3454,7 +3453,7 @@
 
 #if CONFIG_MULTIPLE_ARF
     if ((cm->frame_type != KEY_FRAME) && (cpi->pass == 2))
-      cpi->source_alt_ref_pending = is_next_frame_arf(cpi);
+      cpi->rc.source_alt_ref_pending = is_next_frame_arf(cpi);
 #endif
   } else {
     *size = 0;
@@ -3547,7 +3546,7 @@
         cm->active_ref_idx[0], cm->active_ref_idx[1], cm->active_ref_idx[2]);
     if (cpi->refresh_alt_ref_frame)
       fprintf(fp_out, "  type:ARF");
-    if (cpi->is_src_frame_alt_ref)
+    if (cpi->rc.is_src_frame_alt_ref)
       fprintf(fp_out, "  type:OVERLAY[%d]", cpi->alt_fb_idx);
     fprintf(fp_out, "\n");
   }
diff --git a/vp9/encoder/vp9_onyx_int.h b/vp9/encoder/vp9_onyx_int.h
index 8f2ffc9..2d7cd01 100644
--- a/vp9/encoder/vp9_onyx_int.h
+++ b/vp9/encoder/vp9_onyx_int.h
@@ -294,7 +294,7 @@
   int this_frame_target;
   int projected_frame_size;
   int sb64_target_rate;
-  int last_q[2];                   // Separate values for Intra/Inter
+  int last_q[3];                   // Separate values for Intra/Inter/ARF-GF
   int last_boosted_qindex;         // Last boosted GF/KF/ARF q
 
   int gfu_boost;
@@ -306,13 +306,17 @@
   double gf_rate_correction_factor;
 
   unsigned int frames_since_golden;
-  int frames_till_gf_update_due;  // Count down till next GF
+  unsigned int frames_till_gf_update_due;  // Count down till next GF
+  unsigned int max_gf_interval;
+  unsigned int baseline_gf_interval;
+  unsigned int frames_to_key;
+  unsigned int frames_since_key;
+  unsigned int this_key_frame_forced;
+  unsigned int next_key_frame_forced;
+  unsigned int source_alt_ref_pending;
+  unsigned int source_alt_ref_active;
+  unsigned int is_src_frame_alt_ref;
 
-  int max_gf_interval;
-  int baseline_gf_interval;
-
-  int64_t key_frame_count;
-  int prior_key_frame_distance[KEY_FRAME_CONTEXT];
   int per_frame_bandwidth;  // Current section per frame bandwidth target
   int av_per_frame_bandwidth;  // Average frame size target for clip
   int min_frame_bandwidth;  // Minimum allocation used for any frame
@@ -320,7 +324,7 @@
   int ni_av_qi;
   int ni_tot_qi;
   int ni_frames;
-  int avg_frame_qindex;
+  int avg_frame_qindex[3];  // 0 - KEY, 1 - INTER, 2 - ARF/GF
   double tot_q;
   double avg_q;
 
@@ -376,11 +380,7 @@
   YV12_BUFFER_CONFIG *un_scaled_source;
   YV12_BUFFER_CONFIG scaled_source;
 
-  unsigned int frames_till_alt_ref_frame;
-  int source_alt_ref_pending;
-  int source_alt_ref_active;
-
-  int is_src_frame_alt_ref;
+  unsigned int key_frame_frequency;
 
   int gold_is_last;  // gold same as last frame ( short circuit gold searches)
   int alt_is_last;  // Alt same as last ( short circuit altref search)
@@ -405,11 +405,6 @@
   TOKENEXTRA *tok;
   unsigned int tok_count[4][1 << 6];
 
-
-  unsigned int frames_since_key;
-  unsigned int key_frame_frequency;
-  unsigned int this_key_frame_forced;
-  unsigned int next_key_frame_forced;
 #if CONFIG_MULTIPLE_ARF
   // Position within a frame coding order (including any additional ARF frames).
   unsigned int sequence_number;
@@ -468,15 +463,10 @@
   int y_mode_count[4][INTRA_MODES];
   int y_uv_mode_count[INTRA_MODES][INTRA_MODES];
 
-  nmv_context_counts NMVcount;
-
   vp9_coeff_count coef_counts[TX_SIZES][PLANE_TYPES];
   vp9_coeff_probs_model frame_coef_probs[TX_SIZES][PLANE_TYPES];
   vp9_coeff_stats frame_branch_ct[TX_SIZES][PLANE_TYPES];
 
-  int kf_zeromotion_pct;
-  int gf_zeromotion_pct;
-
   int64_t target_bandwidth;
   struct vpx_codec_pkt_list  *output_pkt_list;
 
@@ -549,7 +539,6 @@
     double modified_error_left;
     double kf_intra_err_min;
     double gf_intra_err_min;
-    int frames_to_key;
     int maxq_max_limit;
     int maxq_min_limit;
     int static_scene_max_gf_interval;
@@ -570,6 +559,9 @@
     int alt_extra_bits;
 
     int sr_update_lag;
+
+    int kf_zeromotion_pct;
+    int gf_zeromotion_pct;
   } twopass;
 
   YV12_BUFFER_CONFIG alt_ref_buffer;
diff --git a/vp9/encoder/vp9_ratectrl.c b/vp9/encoder/vp9_ratectrl.c
index 3fa8cea..9ea0c3d 100644
--- a/vp9/encoder/vp9_ratectrl.c
+++ b/vp9/encoder/vp9_ratectrl.c
@@ -34,9 +34,6 @@
 // Bits Per MB at different Q (Multiplied by 512)
 #define BPER_MB_NORMBITS    9
 
-static const unsigned int prior_key_frame_weight[KEY_FRAME_CONTEXT] =
-    { 1, 2, 3, 4, 5 };
-
 // Tables relating active max Q to active min Q
 static int kf_low_motion_minq[QINDEX_RANGE];
 static int kf_high_motion_minq[QINDEX_RANGE];
@@ -275,7 +272,7 @@
     // If we are using alternate ref instead of gf then do not apply the boost
     // It will instead be applied to the altref update
     // Jims modified boost
-    if (!cpi->source_alt_ref_active) {
+    if (!cpi->rc.source_alt_ref_active) {
       // The spend on the GF is defined in the two pass code
       // for two pass encodes
       cpi->rc.this_frame_target = cpi->rc.per_frame_bandwidth;
@@ -443,8 +440,7 @@
 
 int vp9_rc_pick_q_and_adjust_q_bounds(const VP9_COMP *cpi,
                                       int *bottom_index,
-                                      int *top_index,
-                                      int *top_index_prop) {
+                                      int *top_index) {
   const VP9_COMMON *const cm = &cpi->common;
   int active_best_quality;
   int active_worst_quality = cpi->rc.active_worst_quality;
@@ -456,7 +452,7 @@
     // Handle the special case for key frames forced when we have75 reached
     // the maximum key frame interval. Here force the Q to a range
     // based on the ambient Q to reduce the risk of popping.
-    if (cpi->this_key_frame_forced) {
+    if (cpi->rc.this_key_frame_forced) {
       int delta_qindex;
       int qindex = cpi->rc.last_boosted_qindex;
       double last_boosted_q = vp9_convert_qindex_to_q(qindex);
@@ -483,7 +479,7 @@
       }
 
       // Make a further adjustment based on the kf zero motion measure.
-      q_adj_factor += 0.05 - (0.001 * (double)cpi->kf_zeromotion_pct);
+      q_adj_factor += 0.05 - (0.001 * (double)cpi->twopass.kf_zeromotion_pct);
 
       // Convert the adjustment factor to a qindex delta
       // on active_best_quality.
@@ -498,15 +494,15 @@
     active_best_quality = active_worst_quality
         + vp9_compute_qdelta(cpi, current_q, current_q * 0.3);
 #endif
-  } else if (!cpi->is_src_frame_alt_ref &&
+  } else if (!cpi->rc.is_src_frame_alt_ref &&
              (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) {
 
     // Use the lower of active_worst_quality and recent
     // average Q as basis for GF/ARF best Q limit unless last frame was
     // a key frame.
-    if (cpi->frames_since_key > 1 &&
-        cpi->rc.avg_frame_qindex < active_worst_quality) {
-      q = cpi->rc.avg_frame_qindex;
+    if (cpi->rc.frames_since_key > 1 &&
+        cpi->rc.avg_frame_qindex[INTER_FRAME] < active_worst_quality) {
+      q = cpi->rc.avg_frame_qindex[INTER_FRAME];
     } else {
       q = active_worst_quality;
     }
@@ -514,7 +510,7 @@
     if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) {
       if (q < cpi->cq_target_quality)
         q = cpi->cq_target_quality;
-      if (cpi->frames_since_key > 1) {
+      if (cpi->rc.frames_since_key > 1) {
         active_best_quality = get_active_quality(q, cpi->rc.gfu_boost,
                                                  gf_low, gf_high,
                                                  afq_low_motion_minq,
@@ -532,7 +528,7 @@
       if (!cpi->refresh_alt_ref_frame) {
         active_best_quality = cpi->cq_target_quality;
       } else {
-        if (cpi->frames_since_key > 1) {
+        if (cpi->rc.frames_since_key > 1) {
           active_best_quality = get_active_quality(
               q, cpi->rc.gfu_boost, gf_low, gf_high,
               afq_low_motion_minq, afq_high_motion_minq);
@@ -552,10 +548,10 @@
       active_best_quality = cpi->cq_target_quality;
     } else {
       if (cpi->pass == 0 &&
-          cpi->rc.avg_frame_qindex < active_worst_quality)
+          cpi->rc.avg_frame_qindex[INTER_FRAME] < active_worst_quality)
         // 1-pass: for now, use the average Q for the active_best, if its lower
         // than active_worst.
-        active_best_quality = inter_minq[cpi->rc.avg_frame_qindex];
+        active_best_quality = inter_minq[cpi->rc.avg_frame_qindex[INTER_FRAME]];
       else
         active_best_quality = inter_minq[active_worst_quality];
 
@@ -587,19 +583,18 @@
   if (active_worst_quality < active_best_quality)
     active_worst_quality = active_best_quality;
 
-  *top_index_prop = active_worst_quality;
   *top_index = active_worst_quality;
   *bottom_index = active_best_quality;
 
 #if LIMIT_QRANGE_FOR_ALTREF_AND_KEY
   // Limit Q range for the adaptive loop.
-  if (cm->frame_type == KEY_FRAME && !cpi->this_key_frame_forced) {
+  if (cm->frame_type == KEY_FRAME && !cpi->rc.this_key_frame_forced) {
     if (!(cpi->pass == 0 && cpi->common.current_video_frame == 0)) {
       *top_index = active_worst_quality;
       *top_index =
           (active_worst_quality + active_best_quality * 3) / 4;
     }
-  } else if (!cpi->is_src_frame_alt_ref &&
+  } else if (!cpi->rc.is_src_frame_alt_ref &&
              (cpi->oxcf.end_usage != USAGE_STREAM_FROM_SERVER) &&
              (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) {
     *top_index =
@@ -610,7 +605,7 @@
   if (cpi->oxcf.end_usage == USAGE_CONSTANT_QUALITY) {
     q = active_best_quality;
   // Special case code to try and match quality with forced key frames
-  } else if ((cm->frame_type == KEY_FRAME) && cpi->this_key_frame_forced) {
+  } else if ((cm->frame_type == KEY_FRAME) && cpi->rc.this_key_frame_forced) {
     q = cpi->rc.last_boosted_qindex;
   } else {
     // Determine initial Q to try.
@@ -647,61 +642,6 @@
   return q;
 }
 
-static int estimate_keyframe_frequency(VP9_COMP *cpi) {
-  int i;
-
-  // Average key frame frequency
-  int av_key_frame_frequency = 0;
-
-  /* First key frame at start of sequence is a special case. We have no
-   * frequency data.
-   */
-  if (cpi->rc.key_frame_count == 1) {
-    /* Assume a default of 1 kf every 2 seconds, or the max kf interval,
-     * whichever is smaller.
-     */
-    int key_freq = cpi->oxcf.key_freq > 0 ? cpi->oxcf.key_freq : 1;
-    av_key_frame_frequency = (int)cpi->output_framerate * 2;
-
-    if (cpi->oxcf.auto_key && av_key_frame_frequency > key_freq)
-      av_key_frame_frequency = cpi->oxcf.key_freq;
-
-    cpi->rc.prior_key_frame_distance[KEY_FRAME_CONTEXT - 1]
-      = av_key_frame_frequency;
-  } else {
-    unsigned int total_weight = 0;
-    int last_kf_interval =
-      (cpi->frames_since_key > 0) ? cpi->frames_since_key : 1;
-
-    /* reset keyframe context and calculate weighted average of last
-     * KEY_FRAME_CONTEXT keyframes
-     */
-    for (i = 0; i < KEY_FRAME_CONTEXT; i++) {
-      if (i < KEY_FRAME_CONTEXT - 1)
-        cpi->rc.prior_key_frame_distance[i]
-          = cpi->rc.prior_key_frame_distance[i + 1];
-      else
-        cpi->rc.prior_key_frame_distance[i] = last_kf_interval;
-
-      av_key_frame_frequency += prior_key_frame_weight[i]
-                                * cpi->rc.prior_key_frame_distance[i];
-      total_weight += prior_key_frame_weight[i];
-    }
-
-    av_key_frame_frequency /= total_weight;
-  }
-  return av_key_frame_frequency;
-}
-
-
-static void adjust_key_frame_context(VP9_COMP *cpi) {
-  // Clear down mmx registers to allow floating point in what follows
-  vp9_clear_system_state();
-
-  cpi->frames_since_key = 0;
-  cpi->rc.key_frame_count++;
-}
-
 void vp9_rc_compute_frame_size_bounds(const VP9_COMP *cpi,
                                       int this_frame_target,
                                       int *frame_under_shoot_limit,
@@ -755,8 +695,7 @@
   return 1;
 }
 
-void vp9_rc_postencode_update(VP9_COMP *cpi, uint64_t bytes_used,
-                              int worst_q) {
+void vp9_rc_postencode_update(VP9_COMP *cpi, uint64_t bytes_used) {
   VP9_COMMON *const cm = &cpi->common;
   // Update rate control heuristics
   cpi->rc.projected_frame_size = (bytes_used << 3);
@@ -766,8 +705,29 @@
       cpi, (cpi->sf.recode_loop ||
             cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) ? 2 : 0);
 
-  cpi->rc.last_q[cm->frame_type] = cm->base_qindex;
-  cpi->rc.active_worst_quality = worst_q;
+  // Keep a record of last Q and ambient average Q.
+  if (cm->frame_type == KEY_FRAME) {
+    cpi->rc.last_q[KEY_FRAME] = cm->base_qindex;
+    cpi->rc.avg_frame_qindex[KEY_FRAME] =
+        (2 + 3 * cpi->rc.avg_frame_qindex[KEY_FRAME] + cm->base_qindex) >> 2;
+  } else if (!cpi->rc.is_src_frame_alt_ref &&
+             (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) {
+    cpi->rc.last_q[2] = cm->base_qindex;
+    cpi->rc.avg_frame_qindex[2] =
+        (2 + 3 * cpi->rc.avg_frame_qindex[2] + cm->base_qindex) >> 2;
+  } else {
+    cpi->rc.last_q[INTER_FRAME] = cm->base_qindex;
+    cpi->rc.avg_frame_qindex[INTER_FRAME] =
+        (2 + 3 * cpi->rc.avg_frame_qindex[INTER_FRAME] +
+         cm->base_qindex) >> 2;
+    cpi->rc.ni_frames++;
+    cpi->rc.tot_q += vp9_convert_qindex_to_q(cm->base_qindex);
+    cpi->rc.avg_q = cpi->rc.tot_q / (double)cpi->rc.ni_frames;
+
+    // Calculate the average Q for normal inter frames (not key or GFU frames).
+    cpi->rc.ni_tot_qi += cm->base_qindex;
+    cpi->rc.ni_av_qi = cpi->rc.ni_tot_qi / cpi->rc.ni_frames;
+  }
 
   // Keep record of last boosted (KF/KF/ARF) Q value.
   // If the current frame is coded at a lower Q then we also update it.
@@ -777,32 +737,10 @@
   if ((cm->base_qindex < cpi->rc.last_boosted_qindex) ||
       ((cpi->static_mb_pct < 100) &&
        ((cm->frame_type == KEY_FRAME) || cpi->refresh_alt_ref_frame ||
-        (cpi->refresh_golden_frame && !cpi->is_src_frame_alt_ref)))) {
+        (cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref)))) {
     cpi->rc.last_boosted_qindex = cm->base_qindex;
   }
 
-  if (cm->frame_type == KEY_FRAME) {
-    adjust_key_frame_context(cpi);
-  }
-
-  // Keep a record of ambient average Q.
-  if (cm->frame_type != KEY_FRAME)
-    cpi->rc.avg_frame_qindex = (2 + 3 * cpi->rc.avg_frame_qindex +
-                            cm->base_qindex) >> 2;
-
-  // Keep a record from which we can calculate the average Q excluding GF
-  // updates and key frames.
-  if (cm->frame_type != KEY_FRAME &&
-      !cpi->refresh_golden_frame && !cpi->refresh_alt_ref_frame) {
-    cpi->rc.ni_frames++;
-    cpi->rc.tot_q += vp9_convert_qindex_to_q(cm->base_qindex);
-    cpi->rc.avg_q = cpi->rc.tot_q / (double)cpi->rc.ni_frames;
-
-    // Calculate the average Q for normal inter frames (not key or GFU frames).
-    cpi->rc.ni_tot_qi += cm->base_qindex;
-    cpi->rc.ni_av_qi = cpi->rc.ni_tot_qi / cpi->rc.ni_frames;
-  }
-
   // Update the buffer level variable.
   // Non-viewable frames are a special case and are treated as pure overhead.
   if (!cm->show_frame)
diff --git a/vp9/encoder/vp9_ratectrl.h b/vp9/encoder/vp9_ratectrl.h
index 063ac8f..8113a05 100644
--- a/vp9/encoder/vp9_ratectrl.h
+++ b/vp9/encoder/vp9_ratectrl.h
@@ -42,18 +42,16 @@
 // Picks q and q bounds given the target for bits
 int vp9_rc_pick_q_and_adjust_q_bounds(const VP9_COMP *cpi,
                                       int *bottom_index,
-                                      int *top_index,
-                                      int *top_index_prop);
+                                      int *top_index);
 
 // Estimates q to achieve a target bits per frame
 int vp9_rc_regulate_q(const VP9_COMP *cpi, int target_bits_per_frame,
                       int active_best_quality, int active_worst_quality);
 
 // Post encode update of the rate control parameters based
-// on bytes used and q used for the frame
+// on bytes used
 void vp9_rc_postencode_update(VP9_COMP *cpi,
-                              uint64_t bytes_used,
-                              int worst_q);
+                              uint64_t bytes_used);
 
 // estimates bits per mb for a given qindex and correction factor
 int vp9_rc_bits_per_mb(FRAME_TYPE frame_type, int qindex,
diff --git a/vp9/encoder/vp9_rdopt.c b/vp9/encoder/vp9_rdopt.c
index 5702e5a..d464d15 100644
--- a/vp9/encoder/vp9_rdopt.c
+++ b/vp9/encoder/vp9_rdopt.c
@@ -1042,7 +1042,7 @@
                                 TX_4X4, mode,
                                 x->skip_encode ? src : dst,
                                 x->skip_encode ? src_stride : dst_stride,
-                                dst, dst_stride);
+                                dst, dst_stride, idx, idy, 0);
         vp9_subtract_block(4, 4, src_diff, 8,
                            src, src_stride,
                            dst, dst_stride);
@@ -2532,11 +2532,11 @@
     tmp_mv.as_mv.row >>= 3;
 
     // Small-range full-pixel motion search
-    bestsme = vp9_refining_search_8p_c(x, &tmp_mv, sadpb,
+    bestsme = vp9_refining_search_8p_c(x, &tmp_mv.as_mv, sadpb,
                                        search_range,
                                        &cpi->fn_ptr[bsize],
                                        x->nmvjointcost, x->mvcost,
-                                       &ref_mv[id], second_pred,
+                                       &ref_mv[id].as_mv, second_pred,
                                        pw, ph);
 
     x->mv_col_min = tmp_col_min;
@@ -2748,6 +2748,7 @@
     intpel_mv &= (mbmi->mv[1].as_mv.row & 15) == 0 &&
         (mbmi->mv[1].as_mv.col & 15) == 0;
 
+
   // Search for best switchable filter by checking the variance of
   // pred error irrespective of whether the filter will be used
   if (cm->mcomp_filter_type != BILINEAR) {
@@ -2757,7 +2758,7 @@
       *best_filter = EIGHTTAP;
       vp9_zero(cpi->rd_filter_cache);
     } else {
-      int i, newbest;
+      int newbest;
       int tmp_rate_sum = 0;
       int64_t tmp_dist_sum = 0;
 
@@ -3351,7 +3352,7 @@
       // unless ARNR filtering is enabled in which case we want
       // an unfiltered alternative. We allow near/nearest as well
       // because they may result in zero-zero MVs but be cheaper.
-      if (cpi->is_src_frame_alt_ref && (cpi->oxcf.arnr_max_frames == 0)) {
+      if (cpi->rc.is_src_frame_alt_ref && (cpi->oxcf.arnr_max_frames == 0)) {
         if ((this_mode != ZEROMV &&
              !(this_mode == NEARMV &&
                frame_mv[NEARMV][ALTREF_FRAME].as_int == 0) &&
@@ -3864,6 +3865,7 @@
     int this_skip2 = 0;
     int64_t total_sse = INT_MAX;
     int early_term = 0;
+    int64_t mask_rd = 0;
 
     for (i = 0; i < TX_MODES; ++i)
       tx_cache[i] = INT64_MAX;
@@ -3998,7 +4000,7 @@
       // unless ARNR filtering is enabled in which case we want
       // an unfiltered alternative. We allow near/nearest as well
       // because they may result in zero-zero MVs but be cheaper.
-      if (cpi->is_src_frame_alt_ref && (cpi->oxcf.arnr_max_frames == 0))
+      if (cpi->rc.is_src_frame_alt_ref && (cpi->oxcf.arnr_max_frames == 0))
         continue;
     }
 
@@ -4057,21 +4059,20 @@
           cpi->rd_thresh_sub8x8[segment_id][bsize][THR_GOLD] : this_rd_thresh;
       xd->mi_8x8[0]->mbmi.tx_size = TX_4X4;
 
-      cpi->rd_filter_cache[SWITCHABLE_FILTERS] = INT64_MAX;
+      for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
+        cpi->rd_filter_cache[i] = INT64_MAX;
+
       if (cm->mcomp_filter_type != BILINEAR) {
         tmp_best_filter = EIGHTTAP;
         if (x->source_variance <
             cpi->sf.disable_filter_search_var_thresh) {
           tmp_best_filter = EIGHTTAP;
-          vp9_zero(cpi->rd_filter_cache);
         } else if (cpi->sf.adaptive_pred_filter_type == 1 &&
                    ctx->pred_filter_type < SWITCHABLE) {
           tmp_best_filter = ctx->pred_filter_type;
-          vp9_zero(cpi->rd_filter_cache);
         } else if (cpi->sf.adaptive_pred_filter_type == 2) {
           tmp_best_filter = ctx->pred_filter_type < SWITCHABLE ?
                               ctx->pred_filter_type : 0;
-          vp9_zero(cpi->rd_filter_cache);
         } else {
           for (switchable_filter_index = 0;
                switchable_filter_index < SWITCHABLE_FILTERS;
@@ -4093,7 +4094,6 @@
 
             if (tmp_rd == INT64_MAX)
               continue;
-            cpi->rd_filter_cache[switchable_filter_index] = tmp_rd;
             rs = get_switchable_rate(x);
             rs_rd = RDCOST(x->rdmult, x->rddiv, rs, 0);
             cpi->rd_filter_cache[SWITCHABLE_FILTERS] =
@@ -4102,6 +4102,9 @@
             if (cm->mcomp_filter_type == SWITCHABLE)
               tmp_rd += rs_rd;
 
+            cpi->rd_filter_cache[switchable_filter_index] = tmp_rd;
+            mask_rd = MAX(tmp_rd, mask_rd);
+
             newbest = (tmp_rd < tmp_best_rd);
             if (newbest) {
               tmp_best_filter = mbmi->interp_filter;
@@ -4349,16 +4352,17 @@
         cm->mcomp_filter_type != BILINEAR) {
       int64_t ref = cpi->rd_filter_cache[cm->mcomp_filter_type == SWITCHABLE ?
                               SWITCHABLE_FILTERS : cm->mcomp_filter_type];
+      int64_t adj_rd;
+
       for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) {
-        int64_t adj_rd;
-        // In cases of poor prediction, filter_cache[] can contain really big
-        // values, which actually are bigger than this_rd itself. This can
-        // cause negative best_filter_rd[] values, which is obviously silly.
-        // Therefore, if filter_cache < ref, we do an adjusted calculation.
-        if (cpi->rd_filter_cache[i] >= ref)
-          adj_rd = this_rd + cpi->rd_filter_cache[i] - ref;
-        else  // FIXME(rbultje) do this for comppred also
-          adj_rd = this_rd - (ref - cpi->rd_filter_cache[i]) * this_rd / ref;
+        if (ref == INT64_MAX)
+          adj_rd = 0;
+        else if (cpi->rd_filter_cache[i] == INT64_MAX)
+          adj_rd = mask_rd - ref + 10;
+        else
+          adj_rd = cpi->rd_filter_cache[i] - ref;
+
+        adj_rd += this_rd;
         best_filter_rd[i] = MIN(best_filter_rd[i], adj_rd);
       }
     }
diff --git a/vp9/encoder/vp9_segmentation.c b/vp9/encoder/vp9_segmentation.c
index a9cdc9a..4568e7a 100644
--- a/vp9/encoder/vp9_segmentation.c
+++ b/vp9/encoder/vp9_segmentation.c
@@ -58,6 +58,15 @@
   // vpx_memcpy(cpi->mb.e_mbd.segment_feature_mask, 0,
   //            sizeof(cpi->mb.e_mbd.segment_feature_mask));
 }
+void vp9_disable_segfeature(struct segmentation *seg, int segment_id,
+                            SEG_LVL_FEATURES feature_id) {
+  seg->feature_mask[segment_id] &= ~(1 << feature_id);
+}
+
+void vp9_clear_segdata(struct segmentation *seg, int segment_id,
+                       SEG_LVL_FEATURES feature_id) {
+  seg->feature_data[segment_id][feature_id] = 0;
+}
 
 // Based on set of segment counts calculate a probability tree
 static void calc_segtree_probs(int *segcounts, vp9_prob *segment_tree_probs) {
diff --git a/vp9/encoder/vp9_segmentation.h b/vp9/encoder/vp9_segmentation.h
index 2183771..03f14ea 100644
--- a/vp9/encoder/vp9_segmentation.h
+++ b/vp9/encoder/vp9_segmentation.h
@@ -18,6 +18,12 @@
 void vp9_enable_segmentation(VP9_PTR ptr);
 void vp9_disable_segmentation(VP9_PTR ptr);
 
+void vp9_disable_segfeature(struct segmentation *seg,
+                            int segment_id,
+                            SEG_LVL_FEATURES feature_id);
+void vp9_clear_segdata(struct segmentation *seg,
+                       int segment_id,
+                       SEG_LVL_FEATURES feature_id);
 // Valid values for a segment are 0 to 3
 // Segmentation map is arrange as [Rows][Columns]
 void vp9_set_segmentation_map(VP9_PTR ptr, unsigned char *segmentation_map);