Fix frame scaling prediction

Use higher precision offsets for more accurate predictor
generation when references are at a different scale from
the coded frame.

Change-Id: I4c2c0ec67fa4824273cb3bd072211f41ac7802e8
diff --git a/av1/common/convolve.c b/av1/common/convolve.c
index 4c0fd4b..b8cc7db 100644
--- a/av1/common/convolve.c
+++ b/av1/common/convolve.c
@@ -55,6 +55,39 @@
   }
 }
 
+void av1_convolve_horiz_scale(const uint8_t *src, int src_stride, uint8_t *dst,
+                              int dst_stride, int w, int h,
+                              const InterpFilterParams filter_params,
+                              const int subpel_x_qn, int x_step_qn,
+                              ConvolveParams *conv_params) {
+  int x, y;
+  int filter_size = filter_params.taps;
+  assert(conv_params->round == CONVOLVE_OPT_ROUND);
+  src -= filter_size / 2 - 1;
+  for (y = 0; y < h; ++y) {
+    int x_qn = subpel_x_qn;
+    for (x = 0; x < w; ++x) {
+      const uint8_t *const src_x = &src[x_qn >> SCALE_SUBPEL_BITS];
+      const int x_filter_idx = (x_qn & SCALE_SUBPEL_MASK) >> SCALE_EXTRA_BITS;
+      assert(x_filter_idx < SUBPEL_SHIFTS);
+      const int16_t *x_filter =
+          av1_get_interp_filter_subpel_kernel(filter_params, x_filter_idx);
+      int k, sum = 0;
+      for (k = 0; k < filter_size; ++k) sum += src_x[k] * x_filter[k];
+
+      sum = clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS));
+      if (conv_params->do_average)
+        dst[x] = ROUND_POWER_OF_TWO(dst[x] + sum, 1);
+      else
+        dst[x] = sum;
+
+      x_qn += x_step_qn;
+    }
+    src += src_stride;
+    dst += dst_stride;
+  }
+}
+
 void av1_convolve_vert_c(const uint8_t *src, int src_stride, uint8_t *dst,
                          int dst_stride, int w, int h,
                          const InterpFilterParams filter_params,
@@ -87,6 +120,41 @@
   }
 }
 
+void av1_convolve_vert_scale(const uint8_t *src, int src_stride, uint8_t *dst,
+                             int dst_stride, int w, int h,
+                             const InterpFilterParams filter_params,
+                             const int subpel_y_qn, int y_step_qn,
+                             ConvolveParams *conv_params) {
+  int x, y;
+  int filter_size = filter_params.taps;
+  assert(conv_params->round == CONVOLVE_OPT_ROUND);
+  src -= src_stride * (filter_size / 2 - 1);
+  for (x = 0; x < w; ++x) {
+    int y_qn = subpel_y_qn;
+    for (y = 0; y < h; ++y) {
+      const uint8_t *const src_y =
+          &src[(y_qn >> SCALE_SUBPEL_BITS) * src_stride];
+      const int y_filter_idx = (y_qn & SCALE_SUBPEL_MASK) >> SCALE_EXTRA_BITS;
+      assert(y_filter_idx < SUBPEL_SHIFTS);
+      const int16_t *y_filter =
+          av1_get_interp_filter_subpel_kernel(filter_params, y_filter_idx);
+      int k, sum = 0;
+      for (k = 0; k < filter_size; ++k)
+        sum += src_y[k * src_stride] * y_filter[k];
+
+      sum = clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS));
+      if (conv_params->do_average)
+        dst[y * dst_stride] = ROUND_POWER_OF_TWO(dst[y * dst_stride] + sum, 1);
+      else
+        dst[y * dst_stride] = sum;
+
+      y_qn += y_step_qn;
+    }
+    ++src;
+    ++dst;
+  }
+}
+
 static void convolve_copy(const uint8_t *src, int src_stride, uint8_t *dst,
                           int dst_stride, int w, int h,
                           ConvolveParams *conv_params) {
@@ -152,6 +220,28 @@
   }
 }
 
+void av1_convolve_horiz_facade_scale(const uint8_t *src, int src_stride,
+                                     uint8_t *dst, int dst_stride, int w, int h,
+                                     const InterpFilterParams filter_params,
+                                     const int subpel_x_qn, int x_step_qn,
+                                     ConvolveParams *conv_params) {
+  assert(conv_params->round == CONVOLVE_OPT_ROUND);
+  if (filter_params.taps == SUBPEL_TAPS) {
+    const int16_t *filter_x = av1_get_interp_filter_subpel_kernel(
+        filter_params, subpel_x_qn >> SCALE_EXTRA_BITS);
+    if (conv_params->do_average == 0)
+      aom_convolve8_horiz_scale(src, src_stride, dst, dst_stride, filter_x,
+                                subpel_x_qn, x_step_qn, NULL, 0, -1, w, h);
+    else
+      aom_convolve8_avg_horiz_scale(src, src_stride, dst, dst_stride, filter_x,
+                                    subpel_x_qn, x_step_qn, NULL, 0, -1, w, h);
+  } else {
+    av1_convolve_horiz_scale(src, src_stride, dst, dst_stride, w, h,
+                             filter_params, subpel_x_qn, x_step_qn,
+                             conv_params);
+  }
+}
+
 void av1_convolve_vert_facade(const uint8_t *src, int src_stride, uint8_t *dst,
                               int dst_stride, int w, int h,
                               const InterpFilterParams filter_params,
@@ -196,6 +286,28 @@
   }
 }
 
+void av1_convolve_vert_facade_scale(const uint8_t *src, int src_stride,
+                                    uint8_t *dst, int dst_stride, int w, int h,
+                                    const InterpFilterParams filter_params,
+                                    const int subpel_y_qn, int y_step_qn,
+                                    ConvolveParams *conv_params) {
+  assert(conv_params->round == CONVOLVE_OPT_ROUND);
+  if (filter_params.taps == SUBPEL_TAPS) {
+    const int16_t *filter_y = av1_get_interp_filter_subpel_kernel(
+        filter_params, subpel_y_qn >> SCALE_EXTRA_BITS);
+    if (conv_params->do_average == 0) {
+      aom_convolve8_vert_scale(src, src_stride, dst, dst_stride, NULL, 0, -1,
+                               filter_y, subpel_y_qn, y_step_qn, w, h);
+    } else {
+      aom_convolve8_avg_vert_scale(src, src_stride, dst, dst_stride, NULL, 0,
+                                   -1, filter_y, subpel_y_qn, y_step_qn, w, h);
+    }
+  } else {
+    av1_convolve_vert_scale(src, src_stride, dst, dst_stride, w, h,
+                            filter_params, subpel_y_qn, y_step_qn, conv_params);
+  }
+}
+
 #if CONFIG_CONVOLVE_ROUND
 void av1_convolve_rounding(const int32_t *src, int src_stride, uint8_t *dst,
                            int dst_stride, int w, int h, int bits) {
@@ -587,8 +699,8 @@
                             ConvolveParams *conv_params,
                             ConvolveFunc convolve_horiz,
                             ConvolveFunc convolve_vert) {
-  int ignore_horiz = x_step_q4 == 16 && subpel_x_q4 == 0;
-  int ignore_vert = y_step_q4 == 16 && subpel_y_q4 == 0;
+  int ignore_horiz = x_step_q4 == SUBPEL_SHIFTS && subpel_x_q4 == 0;
+  int ignore_vert = y_step_q4 == SUBPEL_SHIFTS && subpel_y_q4 == 0;
 #if CONFIG_DUAL_FILTER
   InterpFilterParams filter_params_x =
       av1_get_interp_filter_params(interp_filter[1 + 2 * conv_params->ref]);
@@ -701,6 +813,135 @@
   }
 }
 
+static void convolve_scale_helper(const uint8_t *src, int src_stride,
+                                  uint8_t *dst, int dst_stride, int w, int h,
+#if CONFIG_DUAL_FILTER
+                                  const InterpFilter *interp_filter,
+#else
+                                  const InterpFilter interp_filter,
+#endif
+                                  const int subpel_x_qn, int x_step_qn,
+                                  const int subpel_y_qn, int y_step_qn,
+                                  ConvolveParams *conv_params,
+                                  ConvolveFunc convolve_horiz,
+                                  ConvolveFunc convolve_vert) {
+  int ignore_horiz = x_step_qn == SCALE_SUBPEL_SHIFTS && subpel_x_qn == 0;
+  int ignore_vert = y_step_qn == SCALE_SUBPEL_SHIFTS && subpel_y_qn == 0;
+#if CONFIG_DUAL_FILTER
+  InterpFilterParams filter_params_x =
+      av1_get_interp_filter_params(interp_filter[1 + 2 * conv_params->ref]);
+  InterpFilterParams filter_params_y =
+      av1_get_interp_filter_params(interp_filter[0 + 2 * conv_params->ref]);
+  InterpFilterParams filter_params;
+#else
+  InterpFilterParams filter_params =
+      av1_get_interp_filter_params(interp_filter);
+#endif
+  assert(conv_params->round == CONVOLVE_OPT_ROUND);
+
+  assert(w <= MAX_BLOCK_WIDTH);
+  assert(h <= MAX_BLOCK_HEIGHT);
+  assert(y_step_qn <= (MAX_STEP << SCALE_EXTRA_BITS));
+  assert(x_step_qn <= (MAX_STEP << SCALE_EXTRA_BITS));
+
+  if (ignore_horiz && ignore_vert) {
+    convolve_copy(src, src_stride, dst, dst_stride, w, h, conv_params);
+  } else if (ignore_vert) {
+#if CONFIG_DUAL_FILTER
+    filter_params = filter_params_x;
+#endif
+    assert(filter_params.taps <= MAX_FILTER_TAP);
+    convolve_horiz(src, src_stride, dst, dst_stride, w, h, filter_params,
+                   subpel_x_qn, x_step_qn, conv_params);
+  } else if (ignore_horiz) {
+#if CONFIG_DUAL_FILTER
+    filter_params = filter_params_y;
+#endif
+    assert(filter_params.taps <= MAX_FILTER_TAP);
+    convolve_vert(src, src_stride, dst, dst_stride, w, h, filter_params,
+                  subpel_y_qn, y_step_qn, conv_params);
+  } else {
+    // temp's size is set to a 256 aligned value to facilitate SIMD
+    // implementation. The value is greater than (maximum possible intermediate
+    // height or width) * MAX_SB_SIZE
+    DECLARE_ALIGNED(16, uint8_t,
+                    temp[((MAX_SB_SIZE * 2 + 16) + 16) * MAX_SB_SIZE]);
+    int max_intermediate_size = ((MAX_SB_SIZE * 2 + 16) + 16);
+    int filter_size;
+#if CONFIG_DUAL_FILTER && USE_EXTRA_FILTER
+    if (interp_filter[0 + 2 * conv_params->ref] == MULTITAP_SHARP &&
+        interp_filter[1 + 2 * conv_params->ref] == MULTITAP_SHARP) {
+      // Avoid two directions both using 12-tap filter.
+      // This will reduce hardware implementation cost.
+      filter_params_y = av1_get_interp_filter_params(EIGHTTAP_SHARP);
+    }
+
+    // we do filter with fewer taps first to reduce hardware implementation
+    // complexity
+    if (filter_params_y.taps < filter_params_x.taps) {
+      int intermediate_width;
+      int temp_stride = max_intermediate_size;
+      ConvolveParams temp_conv_params;
+      temp_conv_params.ref = 0;
+      temp_conv_params.do_average = 0;
+      temp_conv_params.round = CONVOLVE_OPT_ROUND;
+      filter_params = filter_params_y;
+      filter_size = filter_params_x.taps;
+      intermediate_width =
+          (((w - 1) * x_step_qn + subpel_x_qn) >> SCALE_SUBPEL_BITS) +
+          filter_size;
+      assert(intermediate_width <= max_intermediate_size);
+
+      assert(filter_params.taps <= MAX_FILTER_TAP);
+
+      convolve_vert(src - (filter_size / 2 - 1), src_stride, temp, temp_stride,
+                    intermediate_width, h, filter_params, subpel_y_qn,
+                    y_step_qn, &temp_conv_params);
+
+      filter_params = filter_params_x;
+      assert(filter_params.taps <= MAX_FILTER_TAP);
+      convolve_horiz(temp + (filter_size / 2 - 1), temp_stride, dst, dst_stride,
+                     w, h, filter_params, subpel_x_qn, x_step_qn, conv_params);
+    } else {
+#endif  // CONFIG_DUAL_FILTER && USE_EXTRA_FILTER
+      int intermediate_height;
+      int temp_stride = MAX_SB_SIZE;
+      ConvolveParams temp_conv_params;
+      temp_conv_params.ref = 0;
+      temp_conv_params.do_average = 0;
+      temp_conv_params.round = CONVOLVE_OPT_ROUND;
+#if CONFIG_DUAL_FILTER
+      filter_params = filter_params_x;
+      filter_size = filter_params_y.taps;
+#else
+    filter_size = filter_params.taps;
+#endif
+      intermediate_height =
+          (((h - 1) * y_step_qn + subpel_y_qn) >> SCALE_SUBPEL_BITS) +
+          filter_size;
+      assert(intermediate_height <= max_intermediate_size);
+      (void)max_intermediate_size;
+
+      assert(filter_params.taps <= MAX_FILTER_TAP);
+
+      convolve_horiz(src - src_stride * (filter_size / 2 - 1), src_stride, temp,
+                     temp_stride, w, intermediate_height, filter_params,
+                     subpel_x_qn, x_step_qn, &temp_conv_params);
+
+#if CONFIG_DUAL_FILTER
+      filter_params = filter_params_y;
+#endif
+      assert(filter_params.taps <= MAX_FILTER_TAP);
+
+      convolve_vert(temp + temp_stride * (filter_size / 2 - 1), temp_stride,
+                    dst, dst_stride, w, h, filter_params, subpel_y_qn,
+                    y_step_qn, conv_params);
+#if CONFIG_DUAL_FILTER && USE_EXTRA_FILTER
+    }
+#endif  // CONFIG_DUAL_FILTER && USE_EXTRA_FILTER
+  }
+}
+
 void av1_convolve(const uint8_t *src, int src_stride, uint8_t *dst,
                   int dst_stride, int w, int h,
 #if CONFIG_DUAL_FILTER
@@ -729,6 +970,22 @@
                   av1_convolve_horiz_facade_c, av1_convolve_vert_facade_c);
 }
 
+void av1_convolve_scale(const uint8_t *src, int src_stride, uint8_t *dst,
+                        int dst_stride, int w, int h,
+#if CONFIG_DUAL_FILTER
+                        const InterpFilter *interp_filter,
+#else
+                        const InterpFilter interp_filter,
+#endif
+                        const int subpel_x_q4, int x_step_q4,
+                        const int subpel_y_q4, int y_step_q4,
+                        ConvolveParams *conv_params) {
+  convolve_scale_helper(src, src_stride, dst, dst_stride, w, h, interp_filter,
+                        subpel_x_q4, x_step_q4, subpel_y_q4, y_step_q4,
+                        conv_params, av1_convolve_horiz_facade_scale,
+                        av1_convolve_vert_facade_scale);
+}
+
 void av1_lowbd_convolve_init_c(void) {
   // A placeholder for SIMD initialization
   return;
@@ -899,8 +1156,8 @@
                          int bd) {
   uint16_t *src = CONVERT_TO_SHORTPTR(src8);
   uint16_t *dst = CONVERT_TO_SHORTPTR(dst8);
-  int ignore_horiz = x_step_q4 == 16 && subpel_x_q4 == 0;
-  int ignore_vert = y_step_q4 == 16 && subpel_y_q4 == 0;
+  int ignore_horiz = x_step_q4 == SUBPEL_SHIFTS && subpel_x_q4 == 0;
+  int ignore_vert = y_step_q4 == SUBPEL_SHIFTS && subpel_y_q4 == 0;
 
   assert(w <= MAX_BLOCK_WIDTH);
   assert(h <= MAX_BLOCK_HEIGHT);
diff --git a/av1/common/convolve.h b/av1/common/convolve.h
index 6888107..2df310d 100644
--- a/av1/common/convolve.h
+++ b/av1/common/convolve.h
@@ -114,6 +114,16 @@
                     const int subpel_x, int xstep, const int subpel_y,
                     int ystep, ConvolveParams *conv_params);
 
+void av1_convolve_scale(const uint8_t *src, int src_stride, uint8_t *dst,
+                        int dst_stride, int w, int h,
+#if CONFIG_DUAL_FILTER
+                        const InterpFilter *interp_filter,
+#else
+                        const InterpFilter interp_filter,
+#endif
+                        const int subpel_x, int xstep, const int subpel_y,
+                        int ystep, ConvolveParams *conv_params);
+
 #if CONFIG_HIGHBITDEPTH
 void av1_highbd_convolve(const uint8_t *src, int src_stride, uint8_t *dst,
                          int dst_stride, int w, int h,
diff --git a/av1/common/reconinter.c b/av1/common/reconinter.c
index 363e291..92486b4 100644
--- a/av1/common/reconinter.c
+++ b/av1/common/reconinter.c
@@ -827,6 +827,8 @@
   const MV mv_q4 = { is_q4 ? src_mv->row : src_mv->row * 2,
                      is_q4 ? src_mv->col : src_mv->col * 2 };
   MV32 mv = av1_scale_mv(&mv_q4, x, y, sf);
+  mv.col += SCALE_EXTRA_OFF;
+  mv.row += SCALE_EXTRA_OFF;
   const int subpel_x = mv.col & SCALE_SUBPEL_MASK;
   const int subpel_y = mv.row & SCALE_SUBPEL_MASK;
   ConvolveParams conv_params = get_conv_params(ref, ref, plane);
@@ -865,6 +867,8 @@
   const MV mv_q4 = { is_q4 ? src_mv->row : src_mv->row * 2,
                      is_q4 ? src_mv->col : src_mv->col * 2 };
   MV32 mv = av1_scale_mv(&mv_q4, x, y, sf);
+  mv.col += SCALE_EXTRA_OFF;
+  mv.row += SCALE_EXTRA_OFF;
   const int subpel_x = mv.col & SCALE_SUBPEL_MASK;
   const int subpel_y = mv.row & SCALE_SUBPEL_MASK;
 
@@ -1039,6 +1043,8 @@
             orig_pos_x += mv.col * (1 << (1 - ssx));
             int pos_y = sf->scale_value_y(orig_pos_y, sf);
             int pos_x = sf->scale_value_x(orig_pos_x, sf);
+            pos_x += SCALE_EXTRA_OFF;
+            pos_y += SCALE_EXTRA_OFF;
 
             const int top = -((AOM_INTERP_EXTEND + bh) << SCALE_SUBPEL_BITS);
             const int bottom = (pre_buf->height + AOM_INTERP_EXTEND)
@@ -1160,6 +1166,8 @@
         orig_pos_x += mv.col * (1 << (1 - ssx));
         int pos_y = sf->scale_value_y(orig_pos_y, sf);
         int pos_x = sf->scale_value_x(orig_pos_x, sf);
+        pos_x += SCALE_EXTRA_OFF;
+        pos_y += SCALE_EXTRA_OFF;
 
         // Clamp against the reference frame borders, with enough extension
         // that we don't force the reference block to be partially onscreen.
@@ -3014,6 +3022,8 @@
     orig_pos_x += mv.col * (1 << (1 - ssx));
     int pos_y = sf->scale_value_y(orig_pos_y, sf);
     int pos_x = sf->scale_value_x(orig_pos_x, sf);
+    pos_x += SCALE_EXTRA_OFF;
+    pos_y += SCALE_EXTRA_OFF;
 
     const int top = -((AOM_INTERP_EXTEND + bh) << SCALE_SUBPEL_BITS);
     const int bottom = (pre_buf->height + AOM_INTERP_EXTEND)
diff --git a/av1/common/reconinter.h b/av1/common/reconinter.h
index ff053e1..5f33df8 100644
--- a/av1/common/reconinter.h
+++ b/av1/common/reconinter.h
@@ -66,10 +66,8 @@
   if (has_scale(xs, ys)) {
     // TODO(afergs, debargha): Use a different scale convolve function
     // that uses higher precision for subpel_x, subpel_y, xs, ys
-    av1_convolve_c(src, src_stride, dst, dst_stride, w, h, interp_filter,
-                   subpel_x >> SCALE_EXTRA_BITS, xs >> SCALE_EXTRA_BITS,
-                   subpel_y >> SCALE_EXTRA_BITS, ys >> SCALE_EXTRA_BITS,
-                   conv_params);
+    av1_convolve_scale(src, src_stride, dst, dst_stride, w, h, interp_filter,
+                       subpel_x, xs, subpel_y, ys, conv_params);
   } else {
     subpel_x >>= SCALE_EXTRA_BITS;
     subpel_y >>= SCALE_EXTRA_BITS;
diff --git a/av1/common/resize.c b/av1/common/resize.c
index d0bdfc8..8ddca0a 100644
--- a/av1/common/resize.c
+++ b/av1/common/resize.c
@@ -1055,8 +1055,13 @@
 }
 
 void av1_calculate_scaled_size(int *width, int *height, int num) {
-  *width = *width * num / SCALE_DENOMINATOR;
-  *height = *height * num / SCALE_DENOMINATOR;
+  if (num != SCALE_DENOMINATOR) {
+    *width = *width * num / SCALE_DENOMINATOR;
+    *height = *height * num / SCALE_DENOMINATOR;
+    // Make width and height even
+    *width += *width & 1;
+    *height += *height & 1;
+  }
 }
 
 #if CONFIG_FRAME_SUPERRES
diff --git a/av1/common/scale.c b/av1/common/scale.c
index 6635b87..d8a8bcc 100644
--- a/av1/common/scale.c
+++ b/av1/common/scale.c
@@ -16,14 +16,20 @@
 
 // Note: Expect val to be in q4 precision
 static INLINE int scaled_x(int val, const struct scale_factors *sf) {
-  return (int)((int64_t)val * sf->x_scale_fp >>
-               (REF_SCALE_SHIFT - SCALE_EXTRA_BITS));
+  const int off = (sf->x_scale_fp - (1 << REF_SCALE_SHIFT))
+                  << (SUBPEL_BITS - 1);
+  const int64_t tval = (int64_t)val * sf->x_scale_fp + off;
+  return (int)ROUND_POWER_OF_TWO_SIGNED_64(tval,
+                                           REF_SCALE_SHIFT - SCALE_EXTRA_BITS);
 }
 
 // Note: Expect val to be in q4 precision
 static INLINE int scaled_y(int val, const struct scale_factors *sf) {
-  return (int)((int64_t)val * sf->y_scale_fp >>
-               (REF_SCALE_SHIFT - SCALE_EXTRA_BITS));
+  const int off = (sf->y_scale_fp - (1 << REF_SCALE_SHIFT))
+                  << (SUBPEL_BITS - 1);
+  const int64_t tval = (int64_t)val * sf->y_scale_fp + off;
+  return (int)ROUND_POWER_OF_TWO_SIGNED_64(tval,
+                                           REF_SCALE_SHIFT - SCALE_EXTRA_BITS);
 }
 
 // Note: Expect val to be in q4 precision
@@ -37,16 +43,24 @@
   // and use fixed point scaling factors in decoding and encoding routines.
   // Hardware implementations can calculate scale factor in device driver
   // and use multiplication and shifting on hardware instead of division.
-  return (other_size << REF_SCALE_SHIFT) / this_size;
+  return ((other_size << REF_SCALE_SHIFT) + this_size / 2) / this_size;
 }
 
-// Note: x and y are integer precision, mv is g4 precision.
+static int get_coarse_point_scale_factor(int other_size, int this_size) {
+  // Calculate scaling factor once for each reference frame
+  // and use fixed point scaling factors in decoding and encoding routines.
+  // Hardware implementations can calculate scale factor in device driver
+  // and use multiplication and shifting on hardware instead of division.
+  return ((other_size << SCALE_SUBPEL_BITS) + this_size / 2) / this_size;
+}
+
+// Note: x and y are integer precision, mvq4 is q4 precision.
 MV32 av1_scale_mv(const MV *mvq4, int x, int y,
                   const struct scale_factors *sf) {
-  const int x_off_q4 = scaled_x(x << SUBPEL_BITS, sf) & SCALE_SUBPEL_MASK;
-  const int y_off_q4 = scaled_y(y << SUBPEL_BITS, sf) & SCALE_SUBPEL_MASK;
-  const MV32 res = { scaled_y(mvq4->row, sf) + y_off_q4,
-                     scaled_x(mvq4->col, sf) + x_off_q4 };
+  const int x_off_q4 = scaled_x(x << SUBPEL_BITS, sf);
+  const int y_off_q4 = scaled_y(y << SUBPEL_BITS, sf);
+  const MV32 res = { scaled_y((y << SUBPEL_BITS) + mvq4->row, sf) - y_off_q4,
+                     scaled_x((x << SUBPEL_BITS) + mvq4->col, sf) - x_off_q4 };
   return res;
 }
 
@@ -66,8 +80,9 @@
 
   sf->x_scale_fp = get_fixed_point_scale_factor(other_w, this_w);
   sf->y_scale_fp = get_fixed_point_scale_factor(other_h, this_h);
-  sf->x_step_q4 = scaled_x(SUBPEL_SHIFTS, sf);
-  sf->y_step_q4 = scaled_y(SUBPEL_SHIFTS, sf);
+
+  sf->x_step_q4 = get_coarse_point_scale_factor(other_w, this_w);
+  sf->y_step_q4 = get_coarse_point_scale_factor(other_h, this_h);
 
   if (av1_is_scaled(sf)) {
     sf->scale_value_x = scaled_x;
diff --git a/av1/common/scale.h b/av1/common/scale.h
index e035075..3aa61eb 100644
--- a/av1/common/scale.h
+++ b/av1/common/scale.h
@@ -21,11 +21,6 @@
 
 #define SCALE_DENOMINATOR 16
 
-#define SCALE_SUBPEL_BITS 8
-#define SCALE_SUBPEL_SHIFTS (1 << SCALE_SUBPEL_BITS)
-#define SCALE_SUBPEL_MASK (SCALE_SUBPEL_SHIFTS - 1)
-#define SCALE_EXTRA_BITS (SCALE_SUBPEL_BITS - SUBPEL_BITS)
-
 #define REF_SCALE_SHIFT 14
 #define REF_NO_SCALE (1 << REF_SCALE_SHIFT)
 #define REF_INVALID_SCALE -1