Refactor inter recon functions to support scaling

Ensure that all inter prediction goes through a common code path
that takes scaling into account. Removes a bunch of duplicate
1st/2nd predictor code. Also introduces a 16x8 mode for 8x8
MVs, similar to the 8x4 trick we were doing before. This has an
unexpected effect with EIGHTTAP_SMOOTH, so it's disabled in that
case for now.

Change-Id: Ia053e823a8bc616a988a0af30452e1e75a739cba
diff --git a/test/convolve_test.cc b/test/convolve_test.cc
index 56b1bec..efb87ac 100644
--- a/test/convolve_test.cc
+++ b/test/convolve_test.cc
@@ -506,10 +506,12 @@
     make_tuple(4, 4, &convolve8_2d_only_c),
     make_tuple(8, 4, &convolve8_2d_only_c),
     make_tuple(8, 8, &convolve8_2d_only_c),
+    make_tuple(16, 8, &convolve8_2d_only_c),
     make_tuple(16, 16, &convolve8_2d_only_c),
     make_tuple(4, 4, &convolve8_c),
     make_tuple(8, 4, &convolve8_c),
     make_tuple(8, 8, &convolve8_c),
+    make_tuple(16, 8, &convolve8_c),
     make_tuple(16, 16, &convolve8_c)));
 }
 
@@ -523,5 +525,6 @@
     make_tuple(4, 4, &convolve8_ssse3),
     make_tuple(8, 4, &convolve8_ssse3),
     make_tuple(8, 8, &convolve8_ssse3),
+    make_tuple(16, 8, &convolve8_ssse3),
     make_tuple(16, 16, &convolve8_ssse3)));
 #endif
diff --git a/vp9/common/vp9_blockd.h b/vp9/common/vp9_blockd.h
index d1da5fe..5d876c1 100644
--- a/vp9/common/vp9_blockd.h
+++ b/vp9/common/vp9_blockd.h
@@ -288,6 +288,15 @@
   DECLARE_ALIGNED(16, int16_t, dqcoeff[32*32+16*16*2]);
 } SUPERBLOCKD;
 
+struct scale_factors {
+  int x_num;
+  int x_den;
+  int x_offset_q4;
+  int y_num;
+  int y_den;
+  int y_offset_q4;
+};
+
 typedef struct macroblockd {
   DECLARE_ALIGNED(16, int16_t,  diff[384]);      /* from idct diff */
   DECLARE_ALIGNED(16, uint8_t,  predictor[384]);
@@ -303,6 +312,8 @@
   YV12_BUFFER_CONFIG pre; /* Filtered copy of previous frame reconstruction */
   YV12_BUFFER_CONFIG second_pre;
   YV12_BUFFER_CONFIG dst;
+  struct scale_factors scale_factor[2];
+  struct scale_factors scale_factor_uv[2];
 
   MODE_INFO *prev_mode_info_context;
   MODE_INFO *mode_info_context;
diff --git a/vp9/common/vp9_convolve.c b/vp9/common/vp9_convolve.c
index b87c410..ac5d5cb 100644
--- a/vp9/common/vp9_convolve.c
+++ b/vp9/common/vp9_convolve.c
@@ -318,25 +318,17 @@
                        const int16_t *filter_x, int filter_x_stride,
                        const int16_t *filter_y, int filter_y_stride,
                        int w, int h) {
-  if (h == 16) {
+  if (w == 16 && h == 16) {
     vp9_copy_mem16x16(src, src_stride, dst, dst_stride);
-  } else if (h == 8) {
+  } else if (w == 8 && h == 8) {
     vp9_copy_mem8x8(src, src_stride, dst, dst_stride);
-  } else if (w == 8) {
+  } else if (w == 8 && h == 4) {
     vp9_copy_mem8x4(src, src_stride, dst, dst_stride);
   } else {
-    // 4x4
     int r;
 
-    for (r = 0; r < 4; ++r) {
-#if !(CONFIG_FAST_UNALIGNED)
-      dst[0]  = src[0];
-      dst[1]  = src[1];
-      dst[2]  = src[2];
-      dst[3]  = src[3];
-#else
-      *(uint32_t *)dst = *(const uint32_t *)src;
-#endif
+    for (r = h; r > 0; --r) {
+      memcpy(dst, src, w);
       src += src_stride;
       dst += dst_stride;
     }
diff --git a/vp9/common/vp9_mbpitch.c b/vp9/common/vp9_mbpitch.c
index ed96292..b3303eb 100644
--- a/vp9/common/vp9_mbpitch.c
+++ b/vp9/common/vp9_mbpitch.c
@@ -71,6 +71,17 @@
     setup_block(&blockd[block + 4], stride, v, v2, stride,
       ((block - 16) >> 1) * 4 * stride + (block & 1) * 4, bs);
   }
+
+  // TODO(jkoleszar): this will move once we're actually scaling.
+  xd->scale_factor[0].x_num = 1;
+  xd->scale_factor[0].x_den = 1;
+  xd->scale_factor[0].y_num = 1;
+  xd->scale_factor[0].y_den = 1;
+  xd->scale_factor[0].x_offset_q4 = 0;
+  xd->scale_factor[0].y_offset_q4 = 0;
+  xd->scale_factor[1]= xd->scale_factor[0];
+  xd->scale_factor_uv[0] = xd->scale_factor[0];
+  xd->scale_factor_uv[1] = xd->scale_factor[1];
 }
 
 void vp9_setup_block_dptrs(MACROBLOCKD *xd) {
diff --git a/vp9/common/vp9_mv.h b/vp9/common/vp9_mv.h
index 8acd404..a1eef46 100644
--- a/vp9/common/vp9_mv.h
+++ b/vp9/common/vp9_mv.h
@@ -23,4 +23,14 @@
   MV as_mv;
 } int_mv; /* facilitates faster equality tests and copies */
 
+struct mv32 {
+  int32_t row;
+  int32_t col;
+};
+
+typedef union int_mv32 {
+  uint64_t    as_int;
+  struct mv32 as_mv;
+} int_mv32; /* facilitates faster equality tests and copies */
+
 #endif  // VP9_COMMON_VP9_MV_H_
diff --git a/vp9/common/vp9_reconinter.c b/vp9/common/vp9_reconinter.c
index b75525e..3b4b342 100644
--- a/vp9/common/vp9_reconinter.c
+++ b/vp9/common/vp9_reconinter.c
@@ -146,109 +146,118 @@
   }
 }
 
-void vp9_build_inter_predictors_b(BLOCKD *d, int pitch,
-                                  struct subpix_fn_table *subpix) {
-  uint8_t *ptr_base;
-  uint8_t *ptr;
-  uint8_t *pred_ptr = d->predictor;
-  int_mv mv;
+static int32_t scale_motion_vector_component(int mv,
+                                             int num,
+                                             int den,
+                                             int offset_q4) {
+  // returns the scaled and offset value of the mv component.
+  // input and output mv have the same units -- this would work with either q3
+  // or q4 motion vectors. Offset is given as a q4 fractional number.
+  const int32_t mv_q4 = mv * 16;
 
-  ptr_base = *(d->base_pre);
-  mv.as_int = d->bmi.as_mv[0].as_int;
-  ptr = ptr_base + d->pre + (mv.as_mv.row >> 3) * d->pre_stride +
-        (mv.as_mv.col >> 3);
+  /* TODO(jkoleszar): make fixed point, or as a second multiply? */
+  return (mv_q4 * num / den + offset_q4 + 8) >> 4;
+}
 
-  subpix->predict[!!(mv.as_mv.col & 7)][!!(mv.as_mv.row & 7)][0](
-      ptr, d->pre_stride, pred_ptr, pitch,
+static int_mv32 scale_motion_vector(const int_mv *src_mv,
+                                    const struct scale_factors *scale) {
+  // returns mv * scale + offset
+  int_mv32 result;
+
+  result.as_mv.row = scale_motion_vector_component(src_mv->as_mv.row,
+                                                   scale->y_num, scale->y_den,
+                                                   scale->y_offset_q4);
+  result.as_mv.col = scale_motion_vector_component(src_mv->as_mv.col,
+                                                   scale->x_num, scale->x_den,
+                                                   scale->x_offset_q4);
+  return result;
+}
+
+void vp9_build_inter_predictor(const uint8_t *src, int src_stride,
+                               uint8_t *dst, int dst_stride,
+                               const int_mv *mv_q3,
+                               const struct scale_factors *scale,
+                               int w, int h, int do_avg,
+                               const struct subpix_fn_table *subpix) {
+  int_mv32 mv;
+
+  mv = scale_motion_vector(mv_q3, scale);
+  src = src + (mv.as_mv.row >> 3) * src_stride + (mv.as_mv.col >> 3);
+  subpix->predict[!!(mv.as_mv.col & 7)][!!(mv.as_mv.row & 7)][do_avg](
+      src, src_stride, dst, dst_stride,
       subpix->filter_x[(mv.as_mv.col & 7) << 1], subpix->x_step_q4,
       subpix->filter_y[(mv.as_mv.row & 7) << 1], subpix->y_step_q4,
-      4, 4);
+      w, h);
 }
 
-/*
- * Similar to vp9_build_inter_predictors_b(), but instead of storing the
- * results in d->predictor, we average the contents of d->predictor (which
- * come from an earlier call to vp9_build_inter_predictors_b()) with the
- * predictor of the second reference frame / motion vector.
+/* Like vp9_build_inter_predictor, but takes the full-pel part of the
+ * mv separately, and the fractional part as a q4.
  */
-void vp9_build_2nd_inter_predictors_b(BLOCKD *d, int pitch,
-                                      struct subpix_fn_table *subpix) {
-  uint8_t *ptr_base;
-  uint8_t *ptr;
-  uint8_t *pred_ptr = d->predictor;
-  int_mv mv;
+void vp9_build_inter_predictor_q4(const uint8_t *src, int src_stride,
+                                  uint8_t *dst, int dst_stride,
+                                  const int_mv *fullpel_mv_q3,
+                                  const int_mv *frac_mv_q4,
+                                  const struct scale_factors *scale,
+                                  int w, int h, int do_avg,
+                                  const struct subpix_fn_table *subpix) {
+  const int mv_row_q4 = ((fullpel_mv_q3->as_mv.row >> 3) << 4)
+                        + (frac_mv_q4->as_mv.row & 0xf);
+  const int mv_col_q4 = ((fullpel_mv_q3->as_mv.col >> 3) << 4)
+                        + (frac_mv_q4->as_mv.col & 0xf);
+  const int scaled_mv_row_q4 =
+      scale_motion_vector_component(mv_row_q4, scale->y_num, scale->y_den,
+                                    scale->y_offset_q4);
+  const int scaled_mv_col_q4 =
+      scale_motion_vector_component(mv_col_q4, scale->x_num, scale->x_den,
+                                    scale->x_offset_q4);
+  const int subpel_x = scaled_mv_col_q4 & 15;
+  const int subpel_y = scaled_mv_row_q4 & 15;
 
-  ptr_base = *(d->base_second_pre);
-  mv.as_int = d->bmi.as_mv[1].as_int;
-  ptr = ptr_base + d->pre + (mv.as_mv.row >> 3) * d->pre_stride +
-        (mv.as_mv.col >> 3);
-
-  subpix->predict[!!(mv.as_mv.col & 7)][!!(mv.as_mv.row & 7)][1](
-      ptr, d->pre_stride, pred_ptr, pitch,
-      subpix->filter_x[(mv.as_mv.col & 7) << 1], subpix->x_step_q4,
-      subpix->filter_y[(mv.as_mv.row & 7) << 1], subpix->y_step_q4,
-      4, 4);
+  src = src + (scaled_mv_row_q4 >> 4) * src_stride + (scaled_mv_col_q4 >> 4);
+  subpix->predict[!!subpel_x][!!subpel_y][do_avg](
+      src, src_stride, dst, dst_stride,
+      subpix->filter_x[subpel_x], subpix->x_step_q4,
+      subpix->filter_y[subpel_y], subpix->y_step_q4,
+      w, h);
 }
 
-void vp9_build_inter_predictors4b(MACROBLOCKD *xd, BLOCKD *d, int pitch) {
-  uint8_t *ptr_base;
-  uint8_t *ptr;
-  uint8_t *pred_ptr = d->predictor;
-  int_mv mv;
+static void build_2x1_inter_predictor(const BLOCKD *d0, const BLOCKD *d1,
+                                      const struct scale_factors *scale,
+                                      int block_size, int stride, int which_mv,
+                                      const struct subpix_fn_table *subpix) {
+  assert(d1->predictor - d0->predictor == block_size);
+  assert(d1->pre == d0->pre + block_size);
 
-  ptr_base = *(d->base_pre);
-  mv.as_int = d->bmi.as_mv[0].as_int;
-  ptr = ptr_base + d->pre + (mv.as_mv.row >> 3) * d->pre_stride +
-        (mv.as_mv.col >> 3);
+  if (d0->bmi.as_mv[which_mv].as_int == d1->bmi.as_mv[which_mv].as_int) {
+    uint8_t **base_pre = which_mv ? d0->base_second_pre : d0->base_pre;
 
-  xd->subpix.predict[!!(mv.as_mv.col & 7)][!!(mv.as_mv.row & 7)][0](
-      ptr, d->pre_stride, pred_ptr, pitch,
-      xd->subpix.filter_x[(mv.as_mv.col & 7) << 1], xd->subpix.x_step_q4,
-      xd->subpix.filter_y[(mv.as_mv.row & 7) << 1], xd->subpix.y_step_q4,
-      8, 8);
-}
+    vp9_build_inter_predictor(*base_pre + d0->pre,
+                              d0->pre_stride,
+                              d0->predictor, stride,
+                              &d0->bmi.as_mv[which_mv],
+                              &scale[which_mv],
+                              2 * block_size, block_size, which_mv,
+                              subpix);
 
-/*
- * Similar to build_inter_predictors_4b(), but instead of storing the
- * results in d->predictor, we average the contents of d->predictor (which
- * come from an earlier call to build_inter_predictors_4b()) with the
- * predictor of the second reference frame / motion vector.
- */
-void vp9_build_2nd_inter_predictors4b(MACROBLOCKD *xd,
-                                      BLOCKD *d, int pitch) {
-  uint8_t *ptr_base;
-  uint8_t *ptr;
-  uint8_t *pred_ptr = d->predictor;
-  int_mv mv;
+  } else {
+    uint8_t **base_pre0 = which_mv ? d0->base_second_pre : d0->base_pre;
+    uint8_t **base_pre1 = which_mv ? d1->base_second_pre : d1->base_pre;
 
-  ptr_base = *(d->base_second_pre);
-  mv.as_int = d->bmi.as_mv[1].as_int;
-  ptr = ptr_base + d->pre + (mv.as_mv.row >> 3) * d->pre_stride +
-        (mv.as_mv.col >> 3);
-
-  xd->subpix.predict[!!(mv.as_mv.col & 7)][!!(mv.as_mv.row & 7)][1](
-      ptr, d->pre_stride, pred_ptr, pitch,
-      xd->subpix.filter_x[(mv.as_mv.col & 7) << 1], xd->subpix.x_step_q4,
-      xd->subpix.filter_y[(mv.as_mv.row & 7) << 1], xd->subpix.y_step_q4,
-      8, 8);
-}
-
-static void build_inter_predictors2b(MACROBLOCKD *xd, BLOCKD *d, int pitch) {
-  uint8_t *ptr_base;
-  uint8_t *ptr;
-  uint8_t *pred_ptr = d->predictor;
-  int_mv mv;
-
-  ptr_base = *(d->base_pre);
-  mv.as_int = d->bmi.as_mv[0].as_int;
-  ptr = ptr_base + d->pre + (mv.as_mv.row >> 3) * d->pre_stride +
-        (mv.as_mv.col >> 3);
-
-  xd->subpix.predict[!!(mv.as_mv.col & 7)][!!(mv.as_mv.row & 7)][0](
-      ptr, d->pre_stride, pred_ptr, pitch,
-      xd->subpix.filter_x[(mv.as_mv.col & 7) << 1], xd->subpix.x_step_q4,
-      xd->subpix.filter_y[(mv.as_mv.row & 7) << 1], xd->subpix.y_step_q4,
-      8, 4);
+    vp9_build_inter_predictor(*base_pre0 + d0->pre,
+                              d0->pre_stride,
+                              d0->predictor, stride,
+                              &d0->bmi.as_mv[which_mv],
+                              &scale[which_mv],
+                              block_size, block_size, which_mv,
+                              subpix);
+    vp9_build_inter_predictor(*base_pre1 + d1->pre,
+                              d1->pre_stride,
+                              d1->predictor, stride,
+                              &d1->bmi.as_mv[which_mv],
+                              &scale[which_mv],
+                              block_size, block_size, which_mv,
+                              subpix);
+  }
 }
 
 /*encoder only*/
@@ -329,19 +338,14 @@
   }
 
   for (i = 16; i < 24; i += 2) {
+    const int use_second_ref = xd->mode_info_context->mbmi.second_ref_frame > 0;
+    int which_mv;
     BLOCKD *d0 = &blockd[i];
     BLOCKD *d1 = &blockd[i + 1];
 
-    if (d0->bmi.as_mv[0].as_int == d1->bmi.as_mv[0].as_int)
-      build_inter_predictors2b(xd, d0, 8);
-    else {
-      vp9_build_inter_predictors_b(d0, 8, &xd->subpix);
-      vp9_build_inter_predictors_b(d1, 8, &xd->subpix);
-    }
-
-    if (xd->mode_info_context->mbmi.second_ref_frame > 0) {
-      vp9_build_2nd_inter_predictors_b(d0, 8, &xd->subpix);
-      vp9_build_2nd_inter_predictors_b(d1, 8, &xd->subpix);
+    for (which_mv = 0; which_mv < 1 + use_second_ref; ++which_mv) {
+      build_2x1_inter_predictor(d0, d1, xd->scale_factor_uv, 4, 8, which_mv,
+                                &xd->subpix);
     }
   }
 }
@@ -383,91 +387,87 @@
 }
 
 /*encoder only*/
-void vp9_build_1st_inter16x16_predictors_mby(MACROBLOCKD *xd,
-                                             uint8_t *dst_y,
-                                             int dst_ystride,
-                                             int clamp_mvs) {
-  uint8_t *ptr_base = xd->pre.y_buffer;
-  uint8_t *ptr;
-  int pre_stride = xd->block[0].pre_stride;
-  int_mv ymv;
+void vp9_build_inter16x16_predictors_mby(MACROBLOCKD *xd,
+                                         uint8_t *dst_y,
+                                         int dst_ystride) {
+  const int use_second_ref = xd->mode_info_context->mbmi.second_ref_frame > 0;
+  int which_mv;
 
-  ymv.as_int = xd->mode_info_context->mbmi.mv[0].as_int;
+  for (which_mv = 0; which_mv < 1 + use_second_ref; ++which_mv) {
+    const int clamp_mvs =
+        which_mv ? xd->mode_info_context->mbmi.need_to_clamp_secondmv
+                 : xd->mode_info_context->mbmi.need_to_clamp_mvs;
+    uint8_t *base_pre;
+    int_mv ymv;
 
-  if (clamp_mvs)
-    clamp_mv_to_umv_border(&ymv.as_mv, xd);
+    ymv.as_int = xd->mode_info_context->mbmi.mv[which_mv].as_int;
+    base_pre = which_mv ? xd->second_pre.y_buffer
+                        : xd->pre.y_buffer;
+    if (clamp_mvs)
+      clamp_mv_to_umv_border(&ymv.as_mv, xd);
 
-  ptr = ptr_base + (ymv.as_mv.row >> 3) * pre_stride + (ymv.as_mv.col >> 3);
-
-  xd->subpix.predict[!!(ymv.as_mv.col & 7)][!!(ymv.as_mv.row & 7)][0](
-      ptr, pre_stride, dst_y, dst_ystride,
-      xd->subpix.filter_x[(ymv.as_mv.col & 7) << 1], xd->subpix.x_step_q4,
-      xd->subpix.filter_y[(ymv.as_mv.row & 7) << 1], xd->subpix.y_step_q4,
-      16, 16);
+    vp9_build_inter_predictor(base_pre, xd->block[0].pre_stride,
+                              dst_y, dst_ystride,
+                              &ymv, &xd->scale_factor[which_mv],
+                              16, 16, which_mv, &xd->subpix);
+  }
 }
 
-void vp9_build_1st_inter16x16_predictors_mbuv(MACROBLOCKD *xd,
-                                              uint8_t *dst_u,
-                                              uint8_t *dst_v,
-                                              int dst_uvstride) {
-  int offset;
-  uint8_t *uptr, *vptr;
-  int pre_stride = xd->block[0].pre_stride;
-  int_mv _o16x16mv;
-  int_mv _16x16mv;
+void vp9_build_inter16x16_predictors_mbuv(MACROBLOCKD *xd,
+                                          uint8_t *dst_u,
+                                          uint8_t *dst_v,
+                                          int dst_uvstride) {
+  const int use_second_ref = xd->mode_info_context->mbmi.second_ref_frame > 0;
+  int which_mv;
 
-  _16x16mv.as_int = xd->mode_info_context->mbmi.mv[0].as_int;
+  for (which_mv = 0; which_mv < 1 + use_second_ref; ++which_mv) {
+    const int clamp_mvs =
+        which_mv ? xd->mode_info_context->mbmi.need_to_clamp_secondmv
+                 : xd->mode_info_context->mbmi.need_to_clamp_mvs;
+    uint8_t *uptr, *vptr;
+    int pre_stride = xd->block[0].pre_stride;
+    int_mv _o16x16mv;
+    int_mv _16x16mv;
 
-  if (xd->mode_info_context->mbmi.need_to_clamp_mvs)
-    clamp_mv_to_umv_border(&_16x16mv.as_mv, xd);
+    _16x16mv.as_int = xd->mode_info_context->mbmi.mv[which_mv].as_int;
 
-  _o16x16mv = _16x16mv;
-  /* calc uv motion vectors */
-  if (_16x16mv.as_mv.row < 0)
-    _16x16mv.as_mv.row -= 1;
-  else
-    _16x16mv.as_mv.row += 1;
+    if (clamp_mvs)
+      clamp_mv_to_umv_border(&_16x16mv.as_mv, xd);
 
-  if (_16x16mv.as_mv.col < 0)
-    _16x16mv.as_mv.col -= 1;
-  else
-    _16x16mv.as_mv.col += 1;
+    _o16x16mv = _16x16mv;
+    /* calc uv motion vectors */
+    if (_16x16mv.as_mv.row < 0)
+      _16x16mv.as_mv.row -= 1;
+    else
+      _16x16mv.as_mv.row += 1;
 
-  _16x16mv.as_mv.row /= 2;
-  _16x16mv.as_mv.col /= 2;
+    if (_16x16mv.as_mv.col < 0)
+      _16x16mv.as_mv.col -= 1;
+    else
+      _16x16mv.as_mv.col += 1;
 
-  _16x16mv.as_mv.row &= xd->fullpixel_mask;
-  _16x16mv.as_mv.col &= xd->fullpixel_mask;
+    _16x16mv.as_mv.row /= 2;
+    _16x16mv.as_mv.col /= 2;
 
-  pre_stride >>= 1;
-  offset = (_16x16mv.as_mv.row >> 3) * pre_stride + (_16x16mv.as_mv.col >> 3);
-  uptr = xd->pre.u_buffer + offset;
-  vptr = xd->pre.v_buffer + offset;
+    _16x16mv.as_mv.row &= xd->fullpixel_mask;
+    _16x16mv.as_mv.col &= xd->fullpixel_mask;
 
-  xd->subpix.predict[!!(_o16x16mv.as_mv.col & 15)]
-                    [!!(_o16x16mv.as_mv.row & 15)][0](
-      uptr, pre_stride, dst_u, dst_uvstride,
-      xd->subpix.filter_x[_o16x16mv.as_mv.col & 15], xd->subpix.x_step_q4,
-      xd->subpix.filter_y[_o16x16mv.as_mv.row & 15], xd->subpix.y_step_q4,
-      8, 8);
+    pre_stride >>= 1;
+    uptr = (which_mv ? xd->second_pre.u_buffer : xd->pre.u_buffer);
+    vptr = (which_mv ? xd->second_pre.v_buffer : xd->pre.v_buffer);
 
-  xd->subpix.predict[!!(_o16x16mv.as_mv.col & 15)]
-                    [!!(_o16x16mv.as_mv.row & 15)][0](
-      vptr, pre_stride, dst_v, dst_uvstride,
-      xd->subpix.filter_x[_o16x16mv.as_mv.col & 15], xd->subpix.x_step_q4,
-      xd->subpix.filter_y[_o16x16mv.as_mv.row & 15], xd->subpix.y_step_q4,
-      8, 8);
-}
+    vp9_build_inter_predictor_q4(uptr, pre_stride,
+                                 dst_u, dst_uvstride,
+                                 &_16x16mv, &_o16x16mv,
+                                 &xd->scale_factor_uv[which_mv],
+                                 8, 8, which_mv, &xd->subpix);
 
-
-void vp9_build_1st_inter16x16_predictors_mb(MACROBLOCKD *xd,
-                                            uint8_t *dst_y,
-                                            uint8_t *dst_u,
-                                            uint8_t *dst_v,
-                                            int dst_ystride, int dst_uvstride) {
-  vp9_build_1st_inter16x16_predictors_mby(xd, dst_y, dst_ystride,
-      xd->mode_info_context->mbmi.need_to_clamp_mvs);
-  vp9_build_1st_inter16x16_predictors_mbuv(xd, dst_u, dst_v, dst_uvstride);
+    vp9_build_inter_predictor_q4(vptr, pre_stride,
+                                 dst_v, dst_uvstride,
+                                 &_16x16mv, &_o16x16mv,
+                                 &xd->scale_factor_uv[which_mv],
+                                 8, 8, which_mv, &xd->subpix);
+  }
 }
 
 void vp9_build_inter32x32_predictors_sb(MACROBLOCKD *x,
@@ -498,22 +498,17 @@
     x->pre.u_buffer = u1 + y_idx *  8 * x->pre.uv_stride + x_idx *  8;
     x->pre.v_buffer = v1 + y_idx *  8 * x->pre.uv_stride + x_idx *  8;
 
-    vp9_build_1st_inter16x16_predictors_mb(x,
-      dst_y + y_idx * 16 * dst_ystride  + x_idx * 16,
-      dst_u + y_idx *  8 * dst_uvstride + x_idx *  8,
-      dst_v + y_idx *  8 * dst_uvstride + x_idx *  8,
-      dst_ystride, dst_uvstride);
     if (x->mode_info_context->mbmi.second_ref_frame > 0) {
       x->second_pre.y_buffer = y2 + y_idx * 16 * x->pre.y_stride  + x_idx * 16;
       x->second_pre.u_buffer = u2 + y_idx *  8 * x->pre.uv_stride + x_idx *  8;
       x->second_pre.v_buffer = v2 + y_idx *  8 * x->pre.uv_stride + x_idx *  8;
+    }
 
-      vp9_build_2nd_inter16x16_predictors_mb(x,
+    vp9_build_inter16x16_predictors_mb(x,
         dst_y + y_idx * 16 * dst_ystride  + x_idx * 16,
         dst_u + y_idx *  8 * dst_uvstride + x_idx *  8,
         dst_v + y_idx *  8 * dst_uvstride + x_idx *  8,
         dst_ystride, dst_uvstride);
-    }
   }
 
   x->mb_to_top_edge    = edge[0];
@@ -603,144 +598,54 @@
 #endif
 }
 
-/*
- * The following functions should be called after an initial
- * call to vp9_build_1st_inter16x16_predictors_mb() or _mby()/_mbuv().
- * It will run a second filter on a (different) ref
- * frame and average the result with the output of the
- * first filter. The second reference frame is stored
- * in x->second_pre (the reference frame index is in
- * x->mode_info_context->mbmi.second_ref_frame). The second
- * motion vector is x->mode_info_context->mbmi.second_mv.
- *
- * This allows blending prediction from two reference frames
- * which sometimes leads to better prediction than from a
- * single reference framer.
- */
-void vp9_build_2nd_inter16x16_predictors_mby(MACROBLOCKD *xd,
-                                             uint8_t *dst_y,
-                                             int dst_ystride) {
-  uint8_t *ptr;
-
-  int_mv _16x16mv;
-  int mv_row;
-  int mv_col;
-
-  uint8_t *ptr_base = xd->second_pre.y_buffer;
-  int pre_stride = xd->block[0].pre_stride;
-
-  _16x16mv.as_int = xd->mode_info_context->mbmi.mv[1].as_int;
-
-  if (xd->mode_info_context->mbmi.need_to_clamp_secondmv)
-    clamp_mv_to_umv_border(&_16x16mv.as_mv, xd);
-
-  mv_row = _16x16mv.as_mv.row;
-  mv_col = _16x16mv.as_mv.col;
-
-  ptr = ptr_base + (mv_row >> 3) * pre_stride + (mv_col >> 3);
-
-  xd->subpix.predict[!!(mv_col & 7)][!!(mv_row & 7)][1](
-      ptr, pre_stride, dst_y, dst_ystride,
-      xd->subpix.filter_x[(mv_col & 7) << 1], xd->subpix.x_step_q4,
-      xd->subpix.filter_y[(mv_row & 7) << 1], xd->subpix.y_step_q4,
-      16, 16);
-}
-
-void vp9_build_2nd_inter16x16_predictors_mbuv(MACROBLOCKD *xd,
-                                              uint8_t *dst_u,
-                                              uint8_t *dst_v,
-                                              int dst_uvstride) {
-  int offset;
-  uint8_t *uptr, *vptr;
-
-  int_mv _16x16mv;
-  int mv_row;
-  int mv_col;
-  int omv_row, omv_col;
-
-  int pre_stride = xd->block[0].pre_stride;
-
-  _16x16mv.as_int = xd->mode_info_context->mbmi.mv[1].as_int;
-
-  if (xd->mode_info_context->mbmi.need_to_clamp_secondmv)
-    clamp_mv_to_umv_border(&_16x16mv.as_mv, xd);
-
-  mv_row = _16x16mv.as_mv.row;
-  mv_col = _16x16mv.as_mv.col;
-
-  /* calc uv motion vectors */
-  omv_row = mv_row;
-  omv_col = mv_col;
-  mv_row = (mv_row + (mv_row > 0)) >> 1;
-  mv_col = (mv_col + (mv_col > 0)) >> 1;
-
-  mv_row &= xd->fullpixel_mask;
-  mv_col &= xd->fullpixel_mask;
-
-  pre_stride >>= 1;
-  offset = (mv_row >> 3) * pre_stride + (mv_col >> 3);
-  uptr = xd->second_pre.u_buffer + offset;
-  vptr = xd->second_pre.v_buffer + offset;
-
-  xd->subpix.predict[!!(omv_col & 15)][!!(omv_row & 15)][1](
-      uptr, pre_stride, dst_u, dst_uvstride,
-      xd->subpix.filter_x[omv_col & 15], xd->subpix.x_step_q4,
-      xd->subpix.filter_y[omv_row & 15], xd->subpix.y_step_q4,
-      8, 8);
-
-  xd->subpix.predict[!!(omv_col & 15)][!!(omv_row & 15)][1](
-      vptr, pre_stride, dst_v, dst_uvstride,
-      xd->subpix.filter_x[omv_col & 15], xd->subpix.x_step_q4,
-      xd->subpix.filter_y[omv_row & 15], xd->subpix.y_step_q4,
-      8, 8);
-}
-
-void vp9_build_2nd_inter16x16_predictors_mb(MACROBLOCKD *xd,
-                                            uint8_t *dst_y,
-                                            uint8_t *dst_u,
-                                            uint8_t *dst_v,
-                                            int dst_ystride,
-                                            int dst_uvstride) {
-  vp9_build_2nd_inter16x16_predictors_mby(xd, dst_y, dst_ystride);
-  vp9_build_2nd_inter16x16_predictors_mbuv(xd, dst_u, dst_v, dst_uvstride);
-}
-
 static void build_inter4x4_predictors_mb(MACROBLOCKD *xd) {
   int i;
   MB_MODE_INFO * mbmi = &xd->mode_info_context->mbmi;
   BLOCKD *blockd = xd->block;
+  int which_mv = 0;
+  const int use_second_ref = mbmi->second_ref_frame > 0;
 
   if (xd->mode_info_context->mbmi.partitioning != PARTITIONING_4X4) {
-    blockd[ 0].bmi = xd->mode_info_context->bmi[ 0];
-    blockd[ 2].bmi = xd->mode_info_context->bmi[ 2];
-    blockd[ 8].bmi = xd->mode_info_context->bmi[ 8];
-    blockd[10].bmi = xd->mode_info_context->bmi[10];
+    for (i = 0; i < 16; i += 8) {
+      BLOCKD *d0 = &blockd[i];
+      BLOCKD *d1 = &blockd[i + 2];
 
-    if (mbmi->need_to_clamp_mvs) {
-      clamp_mv_to_umv_border(&blockd[ 0].bmi.as_mv[0].as_mv, xd);
-      clamp_mv_to_umv_border(&blockd[ 2].bmi.as_mv[0].as_mv, xd);
-      clamp_mv_to_umv_border(&blockd[ 8].bmi.as_mv[0].as_mv, xd);
-      clamp_mv_to_umv_border(&blockd[10].bmi.as_mv[0].as_mv, xd);
-      if (mbmi->second_ref_frame > 0) {
-        clamp_mv_to_umv_border(&blockd[ 0].bmi.as_mv[1].as_mv, xd);
-        clamp_mv_to_umv_border(&blockd[ 2].bmi.as_mv[1].as_mv, xd);
-        clamp_mv_to_umv_border(&blockd[ 8].bmi.as_mv[1].as_mv, xd);
-        clamp_mv_to_umv_border(&blockd[10].bmi.as_mv[1].as_mv, xd);
+      blockd[i + 0].bmi = xd->mode_info_context->bmi[i + 0];
+      blockd[i + 2].bmi = xd->mode_info_context->bmi[i + 2];
+
+      for (which_mv = 0; which_mv < 1 + use_second_ref; ++which_mv) {
+        if (mbmi->need_to_clamp_mvs) {
+          clamp_mv_to_umv_border(&blockd[i + 0].bmi.as_mv[which_mv].as_mv, xd);
+          clamp_mv_to_umv_border(&blockd[i + 2].bmi.as_mv[which_mv].as_mv, xd);
+        }
+
+        /* TODO(jkoleszar): Enabling this for EIGHTTAP_SMOOTH changes the
+         * result slightly, for reasons that are not immediately obvious to me.
+         * It probably makes sense to enable this for all filter types to be
+         * consistent with the way we do 8x4 below. Leaving disabled for now.
+         */
+        if (mbmi->interp_filter != EIGHTTAP_SMOOTH) {
+          build_2x1_inter_predictor(d0, d1, xd->scale_factor, 8, 16,
+                                    which_mv, &xd->subpix);
+        } else {
+          uint8_t **base_pre0 = which_mv ? d0->base_second_pre : d0->base_pre;
+          uint8_t **base_pre1 = which_mv ? d1->base_second_pre : d1->base_pre;
+
+          vp9_build_inter_predictor(*base_pre0 + d0->pre,
+                                    d0->pre_stride,
+                                    d0->predictor, 16,
+                                    &d0->bmi.as_mv[which_mv],
+                                    &xd->scale_factor[which_mv],
+                                    8, 8, which_mv, &xd->subpix);
+          vp9_build_inter_predictor(*base_pre1 + d1->pre,
+                                    d1->pre_stride,
+                                    d1->predictor, 16,
+                                    &d1->bmi.as_mv[which_mv],
+                                    &xd->scale_factor[which_mv],
+                                    8, 8, which_mv, &xd->subpix);
+        }
       }
     }
-
-
-    vp9_build_inter_predictors4b(xd, &blockd[ 0], 16);
-    vp9_build_inter_predictors4b(xd, &blockd[ 2], 16);
-    vp9_build_inter_predictors4b(xd, &blockd[ 8], 16);
-    vp9_build_inter_predictors4b(xd, &blockd[10], 16);
-
-    if (mbmi->second_ref_frame > 0) {
-      vp9_build_2nd_inter_predictors4b(xd, &blockd[ 0], 16);
-      vp9_build_2nd_inter_predictors4b(xd, &blockd[ 2], 16);
-      vp9_build_2nd_inter_predictors4b(xd, &blockd[ 8], 16);
-      vp9_build_2nd_inter_predictors4b(xd, &blockd[10], 16);
-    }
   } else {
     for (i = 0; i < 16; i += 2) {
       BLOCKD *d0 = &blockd[i];
@@ -749,25 +654,9 @@
       blockd[i + 0].bmi = xd->mode_info_context->bmi[i + 0];
       blockd[i + 1].bmi = xd->mode_info_context->bmi[i + 1];
 
-      if (mbmi->need_to_clamp_mvs) {
-        clamp_mv_to_umv_border(&blockd[i + 0].bmi.as_mv[0].as_mv, xd);
-        clamp_mv_to_umv_border(&blockd[i + 1].bmi.as_mv[0].as_mv, xd);
-        if (mbmi->second_ref_frame > 0) {
-          clamp_mv_to_umv_border(&blockd[i + 0].bmi.as_mv[1].as_mv, xd);
-          clamp_mv_to_umv_border(&blockd[i + 1].bmi.as_mv[1].as_mv, xd);
-        }
-      }
-
-      if (d0->bmi.as_mv[0].as_int == d1->bmi.as_mv[0].as_int)
-        build_inter_predictors2b(xd, d0, 16);
-      else {
-        vp9_build_inter_predictors_b(d0, 16, &xd->subpix);
-        vp9_build_inter_predictors_b(d1, 16, &xd->subpix);
-      }
-
-      if (mbmi->second_ref_frame > 0) {
-        vp9_build_2nd_inter_predictors_b(d0, 16, &xd->subpix);
-        vp9_build_2nd_inter_predictors_b(d1, 16, &xd->subpix);
+      for (which_mv = 0; which_mv < 1 + use_second_ref; ++which_mv) {
+        build_2x1_inter_predictor(d0, d1, xd->scale_factor, 4, 16,
+                                  which_mv, &xd->subpix);
       }
     }
   }
@@ -776,16 +665,9 @@
     BLOCKD *d0 = &blockd[i];
     BLOCKD *d1 = &blockd[i + 1];
 
-    if (d0->bmi.as_mv[0].as_int == d1->bmi.as_mv[0].as_int)
-      build_inter_predictors2b(xd, d0, 8);
-    else {
-      vp9_build_inter_predictors_b(d0, 8, &xd->subpix);
-      vp9_build_inter_predictors_b(d1, 8, &xd->subpix);
-    }
-
-    if (mbmi->second_ref_frame > 0) {
-      vp9_build_2nd_inter_predictors_b(d0, 8, &xd->subpix);
-      vp9_build_2nd_inter_predictors_b(d1, 8, &xd->subpix);
+    for (which_mv = 0; which_mv < 1 + use_second_ref; ++which_mv) {
+      build_2x1_inter_predictor(d0, d1, xd->scale_factor_uv, 4, 8,
+                                which_mv, &xd->subpix);
     }
   }
 }
@@ -882,22 +764,25 @@
   }
 }
 
+void vp9_build_inter16x16_predictors_mb(MACROBLOCKD *xd,
+                                        uint8_t *dst_y,
+                                        uint8_t *dst_u,
+                                        uint8_t *dst_v,
+                                        int dst_ystride,
+                                        int dst_uvstride) {
+  vp9_build_inter16x16_predictors_mby(xd, dst_y, dst_ystride);
+  vp9_build_inter16x16_predictors_mbuv(xd, dst_u, dst_v, dst_uvstride);
+}
+
+
 void vp9_build_inter_predictors_mb(MACROBLOCKD *xd) {
   if (xd->mode_info_context->mbmi.mode != SPLITMV) {
-    vp9_build_1st_inter16x16_predictors_mb(xd, xd->predictor,
-                                           &xd->predictor[256],
-                                           &xd->predictor[320], 16, 8);
+    vp9_build_inter16x16_predictors_mb(xd, xd->predictor,
+                                       &xd->predictor[256],
+                                       &xd->predictor[320], 16, 8);
 
-    if (xd->mode_info_context->mbmi.second_ref_frame > 0) {
-      /* 256 = offset of U plane in Y+U+V buffer;
-       * 320 = offset of V plane in Y+U+V buffer.
-       * (256=16x16, 320=16x16+8x8). */
-      vp9_build_2nd_inter16x16_predictors_mb(xd, xd->predictor,
-                                             &xd->predictor[256],
-                                             &xd->predictor[320], 16, 8);
-    }
 #if CONFIG_COMP_INTERINTRA_PRED
-    else if (xd->mode_info_context->mbmi.second_ref_frame == INTRA_FRAME) {
+    if (xd->mode_info_context->mbmi.second_ref_frame == INTRA_FRAME) {
       vp9_build_interintra_16x16_predictors_mb(xd, xd->predictor,
                                                &xd->predictor[256],
                                                &xd->predictor[320], 16, 8);
diff --git a/vp9/common/vp9_reconinter.h b/vp9/common/vp9_reconinter.h
index 903bd2e..43f7164 100644
--- a/vp9/common/vp9_reconinter.h
+++ b/vp9/common/vp9_reconinter.h
@@ -16,38 +16,21 @@
 
 struct subpix_fn_table;
 
-extern void vp9_build_1st_inter16x16_predictors_mby(MACROBLOCKD *xd,
-                                                    uint8_t *dst_y,
-                                                    int dst_ystride,
-                                                    int clamp_mvs);
+extern void vp9_build_inter16x16_predictors_mby(MACROBLOCKD *xd,
+                                                uint8_t *dst_y,
+                                                int dst_ystride);
 
-extern void vp9_build_1st_inter16x16_predictors_mbuv(MACROBLOCKD *xd,
-                                                     uint8_t *dst_u,
-                                                     uint8_t *dst_v,
-                                                     int dst_uvstride);
+extern void vp9_build_inter16x16_predictors_mbuv(MACROBLOCKD *xd,
+                                                 uint8_t *dst_u,
+                                                 uint8_t *dst_v,
+                                                 int dst_uvstride);
 
-extern void vp9_build_1st_inter16x16_predictors_mb(MACROBLOCKD *xd,
-                                                   uint8_t *dst_y,
-                                                   uint8_t *dst_u,
-                                                   uint8_t *dst_v,
-                                                   int dst_ystride,
-                                                   int dst_uvstride);
-
-extern void vp9_build_2nd_inter16x16_predictors_mby(MACROBLOCKD *xd,
-                                                    uint8_t *dst_y,
-                                                    int dst_ystride);
-
-extern void vp9_build_2nd_inter16x16_predictors_mbuv(MACROBLOCKD *xd,
-                                                     uint8_t *dst_u,
-                                                     uint8_t *dst_v,
-                                                     int dst_uvstride);
-
-extern void vp9_build_2nd_inter16x16_predictors_mb(MACROBLOCKD *xd,
-                                                   uint8_t *dst_y,
-                                                   uint8_t *dst_u,
-                                                   uint8_t *dst_v,
-                                                   int dst_ystride,
-                                                   int dst_uvstride);
+extern void vp9_build_inter16x16_predictors_mb(MACROBLOCKD *xd,
+                                               uint8_t *dst_y,
+                                               uint8_t *dst_u,
+                                               uint8_t *dst_v,
+                                               int dst_ystride,
+                                               int dst_uvstride);
 
 extern void vp9_build_inter32x32_predictors_sb(MACROBLOCKD *x,
                                                uint8_t *dst_y,
@@ -65,22 +48,24 @@
 
 extern void vp9_build_inter_predictors_mb(MACROBLOCKD *xd);
 
-extern void vp9_build_inter_predictors_b(BLOCKD *d, int pitch,
-                                         struct subpix_fn_table *sppf);
-
-extern void vp9_build_2nd_inter_predictors_b(BLOCKD *d, int pitch,
-                                             struct subpix_fn_table *sppf);
-
-extern void vp9_build_inter_predictors4b(MACROBLOCKD *xd, BLOCKD *d,
-                                         int pitch);
-
-extern void vp9_build_2nd_inter_predictors4b(MACROBLOCKD *xd,
-                                             BLOCKD *d, int pitch);
-
 extern void vp9_build_inter4x4_predictors_mbuv(MACROBLOCKD *xd);
 
 extern void vp9_setup_interp_filters(MACROBLOCKD *xd,
                                      INTERPOLATIONFILTERTYPE filter,
                                      VP9_COMMON *cm);
 
+void vp9_build_inter_predictor(const uint8_t *src, int src_stride,
+                               uint8_t *dst, int dst_stride,
+                               const int_mv *mv_q3,
+                               const struct scale_factors *scale,
+                               int w, int h, int do_avg,
+                               const struct subpix_fn_table *subpix);
+
+void vp9_build_inter_predictor_q4(const uint8_t *src, int src_stride,
+                                  uint8_t *dst, int dst_stride,
+                                  const int_mv *fullpel_mv_q3,
+                                  const int_mv *frac_mv_q4,
+                                  const struct scale_factors *scale,
+                                  int w, int h, int do_avg,
+                                  const struct subpix_fn_table *subpix);
 #endif  // VP9_COMMON_VP9_RECONINTER_H_
diff --git a/vp9/decoder/vp9_decodframe.c b/vp9/decoder/vp9_decodframe.c
index d42bccd..830b6fd 100644
--- a/vp9/decoder/vp9_decodframe.c
+++ b/vp9/decoder/vp9_decodframe.c
@@ -177,23 +177,14 @@
                                          xd->dst.y_stride,
                                          xd->dst.uv_stride);
     } else {
-      vp9_build_1st_inter16x16_predictors_mb(xd,
-                                             xd->dst.y_buffer,
-                                             xd->dst.u_buffer,
-                                             xd->dst.v_buffer,
-                                             xd->dst.y_stride,
-                                             xd->dst.uv_stride);
-
-      if (xd->mode_info_context->mbmi.second_ref_frame > 0) {
-        vp9_build_2nd_inter16x16_predictors_mb(xd,
-                                               xd->dst.y_buffer,
-                                               xd->dst.u_buffer,
-                                               xd->dst.v_buffer,
-                                               xd->dst.y_stride,
-                                               xd->dst.uv_stride);
-      }
+      vp9_build_inter16x16_predictors_mb(xd,
+                                         xd->dst.y_buffer,
+                                         xd->dst.u_buffer,
+                                         xd->dst.v_buffer,
+                                         xd->dst.y_stride,
+                                         xd->dst.uv_stride);
 #if CONFIG_COMP_INTERINTRA_PRED
-      else if (xd->mode_info_context->mbmi.second_ref_frame == INTRA_FRAME) {
+      if (xd->mode_info_context->mbmi.second_ref_frame == INTRA_FRAME) {
         vp9_build_interintra_16x16_predictors_mb(xd,
                                                  xd->dst.y_buffer,
                                                  xd->dst.u_buffer,
diff --git a/vp9/encoder/vp9_encodeframe.c b/vp9/encoder/vp9_encodeframe.c
index 8805557..fec5a7c 100644
--- a/vp9/encoder/vp9_encodeframe.c
+++ b/vp9/encoder/vp9_encodeframe.c
@@ -2125,22 +2125,14 @@
         mbmi->mb_skip_coeff = 0;
 
     } else {
-      vp9_build_1st_inter16x16_predictors_mb(xd,
-                                             xd->dst.y_buffer,
-                                             xd->dst.u_buffer,
-                                             xd->dst.v_buffer,
-                                             xd->dst.y_stride,
-                                             xd->dst.uv_stride);
-      if (xd->mode_info_context->mbmi.second_ref_frame > 0) {
-        vp9_build_2nd_inter16x16_predictors_mb(xd,
-                                               xd->dst.y_buffer,
-                                               xd->dst.u_buffer,
-                                               xd->dst.v_buffer,
-                                               xd->dst.y_stride,
-                                               xd->dst.uv_stride);
-      }
+      vp9_build_inter16x16_predictors_mb(xd,
+                                         xd->dst.y_buffer,
+                                         xd->dst.u_buffer,
+                                         xd->dst.v_buffer,
+                                         xd->dst.y_stride,
+                                         xd->dst.uv_stride);
 #if CONFIG_COMP_INTERINTRA_PRED
-      else if (xd->mode_info_context->mbmi.second_ref_frame == INTRA_FRAME) {
+      if (xd->mode_info_context->mbmi.second_ref_frame == INTRA_FRAME) {
         vp9_build_interintra_16x16_predictors_mb(xd,
                                                  xd->dst.y_buffer,
                                                  xd->dst.u_buffer,
diff --git a/vp9/encoder/vp9_encodemb.c b/vp9/encoder/vp9_encodemb.c
index 9ff5dd9..61516dd 100644
--- a/vp9/encoder/vp9_encodemb.c
+++ b/vp9/encoder/vp9_encodemb.c
@@ -713,7 +713,7 @@
   MACROBLOCKD *xd = &x->e_mbd;
   BLOCK *b = &x->block[0];
 
-  vp9_build_1st_inter16x16_predictors_mby(xd, xd->predictor, 16, 0);
+  vp9_build_inter16x16_predictors_mby(xd, xd->predictor, 16);
 
   vp9_subtract_mby(x->src_diff, *(b->base_src), xd->predictor, b->src_stride);
 
diff --git a/vp9/encoder/vp9_mbgraph.c b/vp9/encoder/vp9_mbgraph.c
index 218a47a..40823f6 100644
--- a/vp9/encoder/vp9_mbgraph.c
+++ b/vp9/encoder/vp9_mbgraph.c
@@ -72,7 +72,7 @@
   }
 
   vp9_set_mbmode_and_mvs(x, NEWMV, dst_mv);
-  vp9_build_1st_inter16x16_predictors_mby(xd, xd->predictor, 16, 0);
+  vp9_build_inter16x16_predictors_mby(xd, xd->predictor, 16);
   best_err = vp9_sad16x16(xd->dst.y_buffer, xd->dst.y_stride,
                           xd->predictor, 16, INT_MAX);
 
@@ -292,6 +292,9 @@
   int_mv arf_top_mv, gld_top_mv;
   MODE_INFO mi_local;
 
+  // Make sure the mi context starts in a consistent state.
+  memset(&mi_local, 0, sizeof(mi_local));
+
   // Set up limit values for motion vectors to prevent them extending outside the UMV borders
   arf_top_mv.as_int = 0;
   gld_top_mv.as_int = 0;
diff --git a/vp9/encoder/vp9_rdopt.c b/vp9/encoder/vp9_rdopt.c
index be091ee..d26b5ae 100644
--- a/vp9/encoder/vp9_rdopt.c
+++ b/vp9/encoder/vp9_rdopt.c
@@ -2115,9 +2115,22 @@
       BLOCK *be = &x->block[i];
       int thisdistortion;
 
-      vp9_build_inter_predictors_b(bd, 16, &xd->subpix);
-      if (xd->mode_info_context->mbmi.second_ref_frame > 0)
-        vp9_build_2nd_inter_predictors_b(bd, 16, &xd->subpix);
+      vp9_build_inter_predictor(*(bd->base_pre) + bd->pre,
+                                bd->pre_stride,
+                                bd->predictor, 16,
+                                &bd->bmi.as_mv[0],
+                                &xd->scale_factor[0],
+                                4, 4, 0 /* no avg */, &xd->subpix);
+
+      if (xd->mode_info_context->mbmi.second_ref_frame > 0) {
+        vp9_build_inter_predictor(*(bd->base_second_pre) + bd->pre,
+                                  bd->pre_stride,
+                                  bd->predictor, 16,
+                                  &bd->bmi.as_mv[1],
+                                  &xd->scale_factor[1],
+                                  4, 4, 1 /* avg */, &xd->subpix);
+      }
+
       vp9_subtract_b(be, bd, 16);
       x->fwd_txm4x4(be->src_diff, be->coeff, 32);
       x->quantize_b_4x4(be, bd);
@@ -2159,14 +2172,25 @@
     int ib = vp9_i8x8_block[i];
 
     if (labels[ib] == which_label) {
+      const int use_second_ref =
+          xd->mode_info_context->mbmi.second_ref_frame > 0;
+      int which_mv;
       int idx = (ib & 8) + ((ib & 2) << 1);
       BLOCKD *bd = &xd->block[ib], *bd2 = &xd->block[idx];
       BLOCK *be = &x->block[ib], *be2 = &x->block[idx];
       int thisdistortion;
 
-      vp9_build_inter_predictors4b(xd, bd, 16);
-      if (xd->mode_info_context->mbmi.second_ref_frame > 0)
-        vp9_build_2nd_inter_predictors4b(xd, bd, 16);
+      for (which_mv = 0; which_mv < 1 + use_second_ref; ++which_mv) {
+        uint8_t **base_pre = which_mv ? bd->base_second_pre : bd->base_pre;
+
+        vp9_build_inter_predictor(*base_pre + bd->pre,
+                                  bd->pre_stride,
+                                  bd->predictor, 16,
+                                  &bd->bmi.as_mv[which_mv],
+                                  &xd->scale_factor[which_mv],
+                                  8, 8, which_mv, &xd->subpix);
+      }
+
       vp9_subtract_4b_c(be, bd, 16);
 
       if (xd->mode_info_context->mbmi.txfm_size == TX_4X4) {
@@ -3482,19 +3506,19 @@
         unsigned int sse, var;
         int tmp_rate_y, tmp_rate_u, tmp_rate_v;
         int tmp_dist_y, tmp_dist_u, tmp_dist_v;
-        vp9_build_1st_inter16x16_predictors_mby(xd, xd->predictor, 16, 0);
-        if (is_comp_pred)
-          vp9_build_2nd_inter16x16_predictors_mby(xd, xd->predictor, 16);
+        // TODO(jkoleszar): these 2 y/uv should be replaced with one call to
+        // vp9_build_interintra_16x16_predictors_mb().
+        vp9_build_inter16x16_predictors_mby(xd, xd->predictor, 16);
+
 #if CONFIG_COMP_INTERINTRA_PRED
         if (is_comp_interintra_pred) {
           vp9_build_interintra_16x16_predictors_mby(xd, xd->predictor, 16);
         }
 #endif
-        vp9_build_1st_inter16x16_predictors_mbuv(xd, xd->predictor + 256,
-                                                 xd->predictor + 320, 8);
-        if (is_comp_pred)
-          vp9_build_2nd_inter16x16_predictors_mbuv(xd, xd->predictor + 256,
-                                                   xd->predictor + 320, 8);
+
+        vp9_build_inter16x16_predictors_mbuv(xd, xd->predictor + 256,
+                                             xd->predictor + 320, 8);
+
 #if CONFIG_COMP_INTERINTRA_PRED
         if (is_comp_interintra_pred) {
           vp9_build_interintra_16x16_predictors_mbuv(xd, xd->predictor + 256,
@@ -3598,19 +3622,16 @@
                                          xd->dst.y_stride,
                                          xd->dst.uv_stride);
     } else {
-      vp9_build_1st_inter16x16_predictors_mby(xd, xd->predictor, 16, 0);
-      if (is_comp_pred)
-        vp9_build_2nd_inter16x16_predictors_mby(xd, xd->predictor, 16);
+      // TODO(jkoleszar): These y/uv fns can be replaced with their mb
+      // equivalent
+      vp9_build_inter16x16_predictors_mby(xd, xd->predictor, 16);
 #if CONFIG_COMP_INTERINTRA_PRED
       if (is_comp_interintra_pred) {
         vp9_build_interintra_16x16_predictors_mby(xd, xd->predictor, 16);
       }
 #endif
-      vp9_build_1st_inter16x16_predictors_mbuv(xd, &xd->predictor[256],
-                                               &xd->predictor[320], 8);
-      if (is_comp_pred)
-        vp9_build_2nd_inter16x16_predictors_mbuv(xd, &xd->predictor[256],
-                                                 &xd->predictor[320], 8);
+      vp9_build_inter16x16_predictors_mbuv(xd, &xd->predictor[256],
+                                           &xd->predictor[320], 8);
 #if CONFIG_COMP_INTERINTRA_PRED
       if (is_comp_interintra_pred) {
         vp9_build_interintra_16x16_predictors_mbuv(xd, &xd->predictor[256],
diff --git a/vp9/encoder/vp9_temporal_filter.c b/vp9/encoder/vp9_temporal_filter.c
index d016e52..39c02e6 100644
--- a/vp9/encoder/vp9_temporal_filter.c
+++ b/vp9/encoder/vp9_temporal_filter.c
@@ -12,6 +12,7 @@
 #include <limits.h>
 
 #include "vp9/common/vp9_onyxc_int.h"
+#include "vp9/common/vp9_reconinter.h"
 #include "vp9/encoder/vp9_onyx_int.h"
 #include "vp9/common/vp9_systemdependent.h"
 #include "vp9/encoder/vp9_quantize.h"
@@ -42,40 +43,35 @@
                                             int mv_row,
                                             int mv_col,
                                             uint8_t *pred) {
-  int offset;
-  uint8_t *yptr, *uptr, *vptr;
-  int omv_row, omv_col;
+  const int which_mv = 0;
+  int_mv subpel_mv;
+  int_mv fullpel_mv;
 
-  // Y
-  yptr = y_mb_ptr + (mv_row >> 3) * stride + (mv_col >> 3);
+  subpel_mv.as_mv.row = mv_row;
+  subpel_mv.as_mv.col = mv_col;
+  // TODO(jkoleszar): Make this rounding consistent with the rest of the code
+  fullpel_mv.as_mv.row = (mv_row >> 1) & ~7;
+  fullpel_mv.as_mv.col = (mv_col >> 1) & ~7;
 
-  xd->subpix.predict[!!(mv_col & 7)][!!(mv_row & 7)][0](
-      yptr, stride, &pred[0], 16,
-      xd->subpix.filter_x[(mv_col & 7) << 1], xd->subpix.x_step_q4,
-      xd->subpix.filter_y[(mv_row & 7) << 1], xd->subpix.y_step_q4,
-      16, 16);
+  vp9_build_inter_predictor(y_mb_ptr, stride,
+                            &pred[0], 16,
+                            &subpel_mv,
+                            &xd->scale_factor[which_mv],
+                            16, 16, which_mv, &xd->subpix);
 
-  // U & V
-  omv_row = mv_row;
-  omv_col = mv_col;
-  mv_row >>= 1;
-  mv_col >>= 1;
   stride = (stride + 1) >> 1;
-  offset = (mv_row >> 3) * stride + (mv_col >> 3);
-  uptr = u_mb_ptr + offset;
-  vptr = v_mb_ptr + offset;
 
-  xd->subpix.predict[!!(omv_col & 15)][!!(omv_row & 15)][0](
-      uptr, stride, &pred[256], 8,
-      xd->subpix.filter_x[(omv_col & 15)], xd->subpix.x_step_q4,
-      xd->subpix.filter_y[(omv_row & 15)], xd->subpix.y_step_q4,
-      8, 8);
+  vp9_build_inter_predictor_q4(u_mb_ptr, stride,
+                               &pred[256], 8,
+                               &fullpel_mv, &subpel_mv,
+                               &xd->scale_factor_uv[which_mv],
+                               8, 8, which_mv, &xd->subpix);
 
-  xd->subpix.predict[!!(omv_col & 15)][!!(omv_row & 15)][0](
-      vptr, stride, &pred[320], 8,
-      xd->subpix.filter_x[(omv_col & 15)], xd->subpix.x_step_q4,
-      xd->subpix.filter_y[(omv_row & 15)], xd->subpix.y_step_q4,
-      8, 8);
+  vp9_build_inter_predictor_q4(v_mb_ptr, stride,
+                               &pred[320], 8,
+                               &fullpel_mv, &subpel_mv,
+                               &xd->scale_factor_uv[which_mv],
+                               8, 8, which_mv, &xd->subpix);
 }
 
 void vp9_temporal_filter_apply_c(uint8_t *frame1,