Merge "Adds subsecond frame rates to webm"
diff --git a/test/variance_test.cc b/test/variance_test.cc
index e45d90f..78657d9 100644
--- a/test/variance_test.cc
+++ b/test/variance_test.cc
@@ -96,8 +96,8 @@
     }
   }
   RoundHighBitDepth(bit_depth, &se, &sse);
-  *sse_ptr = sse;
-  return sse - (((int64_t) se * se) >> (l2w + l2h));
+  *sse_ptr = (uint32_t) sse;
+  return (unsigned int) (sse - (((int64_t) se * se) >> (l2w + l2h)));
 }
 
 static unsigned int subpel_variance_ref(const uint8_t *ref, const uint8_t *src,
@@ -142,8 +142,8 @@
     }
   }
   RoundHighBitDepth(bit_depth, &se, &sse);
-  *sse_ptr = sse;
-  return sse - (((int64_t) se * se) >> (l2w + l2h));
+  *sse_ptr = (unsigned int) sse;
+  return (unsigned int) (sse - (((int64_t) se * se) >> (l2w + l2h)));
 }
 
 typedef unsigned int (*SumOfSquaresFunction)(const int16_t *src);
@@ -510,8 +510,8 @@
     }
   }
   RoundHighBitDepth(bit_depth, &se, &sse);
-  *sse_ptr = sse;
-  return sse - (((int64_t) se * se) >> (l2w + l2h));
+  *sse_ptr = (unsigned int) sse;
+  return (unsigned int) (sse - (((int64_t) se * se) >> (l2w + l2h)));
 }
 
 template<typename SubpelVarianceFunctionType>
diff --git a/vp9/common/arm/neon/vp9_reconintra_neon.c b/vp9/common/arm/neon/vp9_reconintra_neon.c
index 387439f..499c42a 100644
--- a/vp9/common/arm/neon/vp9_reconintra_neon.c
+++ b/vp9/common/arm/neon/vp9_reconintra_neon.c
@@ -163,40 +163,40 @@
 
 #if !HAVE_NEON_ASM
 
-void vp9_v_predictor_4x4_neon(uint8_t *dst, ptrdiff_t y_stride,
+void vp9_v_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
                               const uint8_t *above, const uint8_t *left) {
   int i;
   uint32x2_t d0u32 = vdup_n_u32(0);
   (void)left;
 
   d0u32 = vld1_lane_u32((const uint32_t *)above, d0u32, 0);
-  for (i = 0; i < 4; i++, dst += y_stride)
+  for (i = 0; i < 4; i++, dst += stride)
     vst1_lane_u32((uint32_t *)dst, d0u32, 0);
 }
 
-void vp9_v_predictor_8x8_neon(uint8_t *dst, ptrdiff_t y_stride,
+void vp9_v_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride,
                               const uint8_t *above, const uint8_t *left) {
   int i;
   uint8x8_t d0u8 = vdup_n_u8(0);
   (void)left;
 
   d0u8 = vld1_u8(above);
-  for (i = 0; i < 8; i++, dst += y_stride)
+  for (i = 0; i < 8; i++, dst += stride)
     vst1_u8(dst, d0u8);
 }
 
-void vp9_v_predictor_16x16_neon(uint8_t *dst, ptrdiff_t y_stride,
+void vp9_v_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride,
                                 const uint8_t *above, const uint8_t *left) {
   int i;
   uint8x16_t q0u8 = vdupq_n_u8(0);
   (void)left;
 
   q0u8 = vld1q_u8(above);
-  for (i = 0; i < 16; i++, dst += y_stride)
+  for (i = 0; i < 16; i++, dst += stride)
     vst1q_u8(dst, q0u8);
 }
 
-void vp9_v_predictor_32x32_neon(uint8_t *dst, ptrdiff_t y_stride,
+void vp9_v_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride,
                                 const uint8_t *above, const uint8_t *left) {
   int i;
   uint8x16_t q0u8 = vdupq_n_u8(0);
@@ -205,13 +205,13 @@
 
   q0u8 = vld1q_u8(above);
   q1u8 = vld1q_u8(above + 16);
-  for (i = 0; i < 32; i++, dst += y_stride) {
+  for (i = 0; i < 32; i++, dst += stride) {
     vst1q_u8(dst, q0u8);
     vst1q_u8(dst + 16, q1u8);
   }
 }
 
-void vp9_h_predictor_4x4_neon(uint8_t *dst, ptrdiff_t y_stride,
+void vp9_h_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
                               const uint8_t *above, const uint8_t *left) {
   uint8x8_t d0u8 = vdup_n_u8(0);
   uint32x2_t d1u32 = vdup_n_u32(0);
@@ -221,18 +221,18 @@
 
   d0u8 = vdup_lane_u8(vreinterpret_u8_u32(d1u32), 0);
   vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d0u8), 0);
-  dst += y_stride;
+  dst += stride;
   d0u8 = vdup_lane_u8(vreinterpret_u8_u32(d1u32), 1);
   vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d0u8), 0);
-  dst += y_stride;
+  dst += stride;
   d0u8 = vdup_lane_u8(vreinterpret_u8_u32(d1u32), 2);
   vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d0u8), 0);
-  dst += y_stride;
+  dst += stride;
   d0u8 = vdup_lane_u8(vreinterpret_u8_u32(d1u32), 3);
   vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d0u8), 0);
 }
 
-void vp9_h_predictor_8x8_neon(uint8_t *dst, ptrdiff_t y_stride,
+void vp9_h_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride,
                               const uint8_t *above, const uint8_t *left) {
   uint8x8_t d0u8 = vdup_n_u8(0);
   uint64x1_t d1u64 = vdup_n_u64(0);
@@ -242,30 +242,30 @@
 
   d0u8 = vdup_lane_u8(vreinterpret_u8_u64(d1u64), 0);
   vst1_u8(dst, d0u8);
-  dst += y_stride;
+  dst += stride;
   d0u8 = vdup_lane_u8(vreinterpret_u8_u64(d1u64), 1);
   vst1_u8(dst, d0u8);
-  dst += y_stride;
+  dst += stride;
   d0u8 = vdup_lane_u8(vreinterpret_u8_u64(d1u64), 2);
   vst1_u8(dst, d0u8);
-  dst += y_stride;
+  dst += stride;
   d0u8 = vdup_lane_u8(vreinterpret_u8_u64(d1u64), 3);
   vst1_u8(dst, d0u8);
-  dst += y_stride;
+  dst += stride;
   d0u8 = vdup_lane_u8(vreinterpret_u8_u64(d1u64), 4);
   vst1_u8(dst, d0u8);
-  dst += y_stride;
+  dst += stride;
   d0u8 = vdup_lane_u8(vreinterpret_u8_u64(d1u64), 5);
   vst1_u8(dst, d0u8);
-  dst += y_stride;
+  dst += stride;
   d0u8 = vdup_lane_u8(vreinterpret_u8_u64(d1u64), 6);
   vst1_u8(dst, d0u8);
-  dst += y_stride;
+  dst += stride;
   d0u8 = vdup_lane_u8(vreinterpret_u8_u64(d1u64), 7);
   vst1_u8(dst, d0u8);
 }
 
-void vp9_h_predictor_16x16_neon(uint8_t *dst, ptrdiff_t y_stride,
+void vp9_h_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride,
                                 const uint8_t *above, const uint8_t *left) {
   int j;
   uint8x8_t d2u8 = vdup_n_u8(0);
@@ -278,32 +278,32 @@
   for (j = 0; j < 2; j++, d2u8 = vget_high_u8(q1u8)) {
     q0u8 = vdupq_lane_u8(d2u8, 0);
     vst1q_u8(dst, q0u8);
-    dst += y_stride;
+    dst += stride;
     q0u8 = vdupq_lane_u8(d2u8, 1);
     vst1q_u8(dst, q0u8);
-    dst += y_stride;
+    dst += stride;
     q0u8 = vdupq_lane_u8(d2u8, 2);
     vst1q_u8(dst, q0u8);
-    dst += y_stride;
+    dst += stride;
     q0u8 = vdupq_lane_u8(d2u8, 3);
     vst1q_u8(dst, q0u8);
-    dst += y_stride;
+    dst += stride;
     q0u8 = vdupq_lane_u8(d2u8, 4);
     vst1q_u8(dst, q0u8);
-    dst += y_stride;
+    dst += stride;
     q0u8 = vdupq_lane_u8(d2u8, 5);
     vst1q_u8(dst, q0u8);
-    dst += y_stride;
+    dst += stride;
     q0u8 = vdupq_lane_u8(d2u8, 6);
     vst1q_u8(dst, q0u8);
-    dst += y_stride;
+    dst += stride;
     q0u8 = vdupq_lane_u8(d2u8, 7);
     vst1q_u8(dst, q0u8);
-    dst += y_stride;
+    dst += stride;
   }
 }
 
-void vp9_h_predictor_32x32_neon(uint8_t *dst, ptrdiff_t y_stride,
+void vp9_h_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride,
                                 const uint8_t *above, const uint8_t *left) {
   int j, k;
   uint8x8_t d2u8 = vdup_n_u8(0);
@@ -318,40 +318,40 @@
       q0u8 = vdupq_lane_u8(d2u8, 0);
       vst1q_u8(dst, q0u8);
       vst1q_u8(dst + 16, q0u8);
-      dst += y_stride;
+      dst += stride;
       q0u8 = vdupq_lane_u8(d2u8, 1);
       vst1q_u8(dst, q0u8);
       vst1q_u8(dst + 16, q0u8);
-      dst += y_stride;
+      dst += stride;
       q0u8 = vdupq_lane_u8(d2u8, 2);
       vst1q_u8(dst, q0u8);
       vst1q_u8(dst + 16, q0u8);
-      dst += y_stride;
+      dst += stride;
       q0u8 = vdupq_lane_u8(d2u8, 3);
       vst1q_u8(dst, q0u8);
       vst1q_u8(dst + 16, q0u8);
-      dst += y_stride;
+      dst += stride;
       q0u8 = vdupq_lane_u8(d2u8, 4);
       vst1q_u8(dst, q0u8);
       vst1q_u8(dst + 16, q0u8);
-      dst += y_stride;
+      dst += stride;
       q0u8 = vdupq_lane_u8(d2u8, 5);
       vst1q_u8(dst, q0u8);
       vst1q_u8(dst + 16, q0u8);
-      dst += y_stride;
+      dst += stride;
       q0u8 = vdupq_lane_u8(d2u8, 6);
       vst1q_u8(dst, q0u8);
       vst1q_u8(dst + 16, q0u8);
-      dst += y_stride;
+      dst += stride;
       q0u8 = vdupq_lane_u8(d2u8, 7);
       vst1q_u8(dst, q0u8);
       vst1q_u8(dst + 16, q0u8);
-      dst += y_stride;
+      dst += stride;
     }
   }
 }
 
-void vp9_tm_predictor_4x4_neon(uint8_t *dst, ptrdiff_t y_stride,
+void vp9_tm_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
                                const uint8_t *above, const uint8_t *left) {
   int i;
   uint16x8_t q1u16, q3u16;
@@ -359,10 +359,10 @@
   uint8x8_t d0u8 = vdup_n_u8(0);
   uint32x2_t d2u32 = vdup_n_u32(0);
 
-  d0u8 = vdup_n_u8(above[-1]);
+  d0u8 = vld1_dup_u8(above - 1);
   d2u32 = vld1_lane_u32((const uint32_t *)above, d2u32, 0);
   q3u16 = vsubl_u8(vreinterpret_u8_u32(d2u32), d0u8);
-  for (i = 0; i < 4; i++, dst += y_stride) {
+  for (i = 0; i < 4; i++, dst += stride) {
     q1u16 = vdupq_n_u16((uint16_t)left[i]);
     q1s16 = vaddq_s16(vreinterpretq_s16_u16(q1u16),
                       vreinterpretq_s16_u16(q3u16));
@@ -371,7 +371,7 @@
   }
 }
 
-void vp9_tm_predictor_8x8_neon(uint8_t *dst, ptrdiff_t y_stride,
+void vp9_tm_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride,
                                const uint8_t *above, const uint8_t *left) {
   int j;
   uint16x8_t q0u16, q3u16, q10u16;
@@ -379,7 +379,7 @@
   uint16x4_t d20u16;
   uint8x8_t d0u8, d2u8, d30u8;
 
-  d0u8 = vdup_n_u8(above[-1]);
+  d0u8 = vld1_dup_u8(above - 1);
   d30u8 = vld1_u8(left);
   d2u8 = vld1_u8(above);
   q10u16 = vmovl_u8(d30u8);
@@ -391,29 +391,29 @@
                       vreinterpretq_s16_u16(q0u16));
     d0u8 = vqmovun_s16(q0s16);
     vst1_u64((uint64_t *)dst, vreinterpret_u64_u8(d0u8));
-    dst += y_stride;
+    dst += stride;
     q0u16 = vdupq_lane_u16(d20u16, 1);
     q0s16 = vaddq_s16(vreinterpretq_s16_u16(q3u16),
                       vreinterpretq_s16_u16(q0u16));
     d0u8 = vqmovun_s16(q0s16);
     vst1_u64((uint64_t *)dst, vreinterpret_u64_u8(d0u8));
-    dst += y_stride;
+    dst += stride;
     q0u16 = vdupq_lane_u16(d20u16, 2);
     q0s16 = vaddq_s16(vreinterpretq_s16_u16(q3u16),
                       vreinterpretq_s16_u16(q0u16));
     d0u8 = vqmovun_s16(q0s16);
     vst1_u64((uint64_t *)dst, vreinterpret_u64_u8(d0u8));
-    dst += y_stride;
+    dst += stride;
     q0u16 = vdupq_lane_u16(d20u16, 3);
     q0s16 = vaddq_s16(vreinterpretq_s16_u16(q3u16),
                       vreinterpretq_s16_u16(q0u16));
     d0u8 = vqmovun_s16(q0s16);
     vst1_u64((uint64_t *)dst, vreinterpret_u64_u8(d0u8));
-    dst += y_stride;
+    dst += stride;
   }
 }
 
-void vp9_tm_predictor_16x16_neon(uint8_t *dst, ptrdiff_t y_stride,
+void vp9_tm_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride,
                                  const uint8_t *above, const uint8_t *left) {
   int j, k;
   uint16x8_t q0u16, q2u16, q3u16, q8u16, q10u16;
@@ -422,7 +422,7 @@
   uint16x4_t d20u16;
   uint8x8_t d2u8, d3u8, d18u8, d22u8, d23u8;
 
-  q0u8 = vdupq_n_u8(above[-1]);
+  q0u8 = vld1q_dup_u8(above - 1);
   q1u8 = vld1q_u8(above);
   q2u16 = vsubl_u8(vget_low_u8(q1u8), vget_low_u8(q0u8));
   q3u16 = vsubl_u8(vget_high_u8(q1u8), vget_high_u8(q0u8));
@@ -447,10 +447,10 @@
       d23u8 = vqmovun_s16(q8s16);
       vst1_u64((uint64_t *)dst, vreinterpret_u64_u8(d2u8));
       vst1_u64((uint64_t *)(dst + 8), vreinterpret_u64_u8(d3u8));
-      dst += y_stride;
+      dst += stride;
       vst1_u64((uint64_t *)dst, vreinterpret_u64_u8(d22u8));
       vst1_u64((uint64_t *)(dst + 8), vreinterpret_u64_u8(d23u8));
-      dst += y_stride;
+      dst += stride;
 
       q0u16 = vdupq_lane_u16(d20u16, 2);
       q8u16 = vdupq_lane_u16(d20u16, 3);
@@ -468,15 +468,15 @@
       d23u8 = vqmovun_s16(q8s16);
       vst1_u64((uint64_t *)dst, vreinterpret_u64_u8(d2u8));
       vst1_u64((uint64_t *)(dst + 8), vreinterpret_u64_u8(d3u8));
-      dst += y_stride;
+      dst += stride;
       vst1_u64((uint64_t *)dst, vreinterpret_u64_u8(d22u8));
       vst1_u64((uint64_t *)(dst + 8), vreinterpret_u64_u8(d23u8));
-      dst += y_stride;
+      dst += stride;
     }
   }
 }
 
-void vp9_tm_predictor_32x32_neon(uint8_t *dst, ptrdiff_t y_stride,
+void vp9_tm_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride,
                                  const uint8_t *above, const uint8_t *left) {
   int j, k;
   uint16x8_t q0u16, q3u16, q8u16, q9u16, q10u16, q11u16;
@@ -485,7 +485,7 @@
   uint16x4_t d6u16;
   uint8x8_t d0u8, d1u8, d2u8, d3u8, d26u8;
 
-  q0u8 = vdupq_n_u8(above[-1]);
+  q0u8 = vld1q_dup_u8(above - 1);
   q1u8 = vld1q_u8(above);
   q2u8 = vld1q_u8(above + 16);
   q8u16 = vsubl_u8(vget_low_u8(q1u8), vget_low_u8(q0u8));
@@ -514,7 +514,7 @@
       q1u8 = vcombine_u8(d2u8, d3u8);
       vst1q_u64((uint64_t *)dst, vreinterpretq_u64_u8(q0u8));
       vst1q_u64((uint64_t *)(dst + 16), vreinterpretq_u64_u8(q1u8));
-      dst += y_stride;
+      dst += stride;
 
       q0u16 = vdupq_lane_u16(d6u16, 1);
       q12s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16),
@@ -533,7 +533,7 @@
       q1u8 = vcombine_u8(d2u8, d3u8);
       vst1q_u64((uint64_t *)dst, vreinterpretq_u64_u8(q0u8));
       vst1q_u64((uint64_t *)(dst + 16), vreinterpretq_u64_u8(q1u8));
-      dst += y_stride;
+      dst += stride;
 
       q0u16 = vdupq_lane_u16(d6u16, 2);
       q12s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16),
@@ -552,7 +552,7 @@
       q1u8 = vcombine_u8(d2u8, d3u8);
       vst1q_u64((uint64_t *)dst, vreinterpretq_u64_u8(q0u8));
       vst1q_u64((uint64_t *)(dst + 16), vreinterpretq_u64_u8(q1u8));
-      dst += y_stride;
+      dst += stride;
 
       q0u16 = vdupq_lane_u16(d6u16, 3);
       q12s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16),
@@ -571,7 +571,7 @@
       q1u8 = vcombine_u8(d2u8, d3u8);
       vst1q_u64((uint64_t *)dst, vreinterpretq_u64_u8(q0u8));
       vst1q_u64((uint64_t *)(dst + 16), vreinterpretq_u64_u8(q1u8));
-      dst += y_stride;
+      dst += stride;
     }
   }
 }
diff --git a/vp9/common/arm/neon/vp9_reconintra_neon_asm.asm b/vp9/common/arm/neon/vp9_reconintra_neon_asm.asm
index dc9856f..d4f6d9b 100644
--- a/vp9/common/arm/neon/vp9_reconintra_neon_asm.asm
+++ b/vp9/common/arm/neon/vp9_reconintra_neon_asm.asm
@@ -345,8 +345,7 @@
 |vp9_tm_predictor_8x8_neon| PROC
     ; Load ytop_left = above[-1];
     sub                 r12, r2, #1
-    ldrb                r12, [r12]
-    vdup.u8             d0, r12
+    vld1.8              {d0[]}, [r12]
 
     ; preload 8 left
     vld1.8              {d30}, [r3]
@@ -418,8 +417,7 @@
 |vp9_tm_predictor_16x16_neon| PROC
     ; Load ytop_left = above[-1];
     sub                 r12, r2, #1
-    ldrb                r12, [r12]
-    vdup.u8             q0, r12
+    vld1.8              {d0[]}, [r12]
 
     ; Load above 8 pixels
     vld1.8              {q1}, [r2]
@@ -429,7 +427,7 @@
 
     ; Compute above - ytop_left
     vsubl.u8            q2, d2, d0
-    vsubl.u8            q3, d3, d1
+    vsubl.u8            q3, d3, d0
 
     vmovl.u8            q10, d18
 
@@ -512,8 +510,7 @@
 |vp9_tm_predictor_32x32_neon| PROC
     ; Load ytop_left = above[-1];
     sub                 r12, r2, #1
-    ldrb                r12, [r12]
-    vdup.u8             q0, r12
+    vld1.8              {d0[]}, [r12]
 
     ; Load above 32 pixels
     vld1.8              {q1}, [r2]!
@@ -524,9 +521,9 @@
 
     ; Compute above - ytop_left
     vsubl.u8            q8, d2, d0
-    vsubl.u8            q9, d3, d1
+    vsubl.u8            q9, d3, d0
     vsubl.u8            q10, d4, d0
-    vsubl.u8            q11, d5, d1
+    vsubl.u8            q11, d5, d0
 
     vmovl.u8            q3, d26
 
diff --git a/vp9/encoder/vp9_encoder.c b/vp9/encoder/vp9_encoder.c
index 2fdf408..3c0f52e 100644
--- a/vp9/encoder/vp9_encoder.c
+++ b/vp9/encoder/vp9_encoder.c
@@ -2804,8 +2804,9 @@
 
   if (cpi->twopass.total_left_stats.coded_error != 0.0)
     fprintf(f, "%10u %dx%d %d %d %10d %10d %10d %10d"
-        "%10"PRId64" %10"PRId64" %10"PRId64" %10"PRId64" %10d "
-        "%7.2lf %7.2lf %7.2lf %7.2lf %7.2lf"
+       "%10"PRId64" %10"PRId64" %5d %5d %10"PRId64" "
+       "%10"PRId64" %10"PRId64" %10d "
+       "%7.2lf %7.2lf %7.2lf %7.2lf %7.2lf"
         "%6d %6d %5d %5d %5d "
         "%10"PRId64" %10.3lf"
         "%10lf %8u %10"PRId64" %10d %10d\n",
@@ -2818,6 +2819,9 @@
         cpi->rc.projected_frame_size / cpi->common.MBs,
         (cpi->rc.projected_frame_size - cpi->rc.this_frame_target),
         cpi->rc.vbr_bits_off_target,
+        cpi->rc.vbr_bits_off_target_fast,
+        cpi->twopass.extend_minq,
+        cpi->twopass.extend_minq_fast,
         cpi->rc.total_target_vs_actual,
         (cpi->rc.starting_buffer_level - cpi->rc.bits_off_target),
         cpi->rc.total_actual_bits, cm->base_qindex,
diff --git a/vp9/encoder/vp9_firstpass.c b/vp9/encoder/vp9_firstpass.c
index 942eac9..856a665 100644
--- a/vp9/encoder/vp9_firstpass.c
+++ b/vp9/encoder/vp9_firstpass.c
@@ -1248,8 +1248,9 @@
     twopass->modified_error_left = modified_error_total;
   }
 
-  // Reset the vbr bits off target counter
+  // Reset the vbr bits off target counters
   cpi->rc.vbr_bits_off_target = 0;
+  cpi->rc.vbr_bits_off_target_fast = 0;
 
   cpi->rc.rate_error_estimate = 0;
 
@@ -2650,6 +2651,7 @@
 
 #define MINQ_ADJ_LIMIT 48
 #define MINQ_ADJ_LIMIT_CQ 20
+#define HIGH_UNDERSHOOT_RATIO 2
 void vp9_twopass_postencode_update(VP9_COMP *cpi) {
   TWO_PASS *const twopass = &cpi->twopass;
   RATE_CONTROL *const rc = &cpi->rc;
@@ -2716,5 +2718,32 @@
 
     twopass->extend_minq = clamp(twopass->extend_minq, 0, minq_adj_limit);
     twopass->extend_maxq = clamp(twopass->extend_maxq, 0, maxq_adj_limit);
+
+    // If there is a big and undexpected undershoot then feed the extra
+    // bits back in quickly. One situation where this may happen is if a
+    // frame is unexpectedly almost perfectly predicted by the ARF or GF
+    // but not very well predcited by the previous frame.
+    if (!frame_is_kf_gf_arf(cpi) && !cpi->rc.is_src_frame_alt_ref) {
+      int fast_extra_thresh = rc->base_frame_target / HIGH_UNDERSHOOT_RATIO;
+      if (rc->projected_frame_size < fast_extra_thresh) {
+        rc->vbr_bits_off_target_fast +=
+          fast_extra_thresh - rc->projected_frame_size;
+        rc->vbr_bits_off_target_fast =
+          MIN(rc->vbr_bits_off_target_fast, (4 * rc->avg_frame_bandwidth));
+
+        // Fast adaptation of minQ if necessary to use up the extra bits.
+        if (rc->avg_frame_bandwidth) {
+          twopass->extend_minq_fast =
+            (int)(rc->vbr_bits_off_target_fast * 8 / rc->avg_frame_bandwidth);
+        }
+        twopass->extend_minq_fast = MIN(twopass->extend_minq_fast,
+                                        minq_adj_limit - twopass->extend_minq);
+      } else if (rc->vbr_bits_off_target_fast) {
+        twopass->extend_minq_fast = MIN(twopass->extend_minq_fast,
+                                        minq_adj_limit - twopass->extend_minq);
+      } else {
+        twopass->extend_minq_fast = 0;
+      }
+    }
   }
 }
diff --git a/vp9/encoder/vp9_firstpass.h b/vp9/encoder/vp9_firstpass.h
index 08e7a8b..4a03855 100644
--- a/vp9/encoder/vp9_firstpass.h
+++ b/vp9/encoder/vp9_firstpass.h
@@ -122,6 +122,7 @@
   int baseline_active_worst_quality;
   int extend_minq;
   int extend_maxq;
+  int extend_minq_fast;
 
   GF_GROUP gf_group;
 } TWO_PASS;
@@ -135,6 +136,7 @@
 
 void vp9_init_second_pass(struct VP9_COMP *cpi);
 void vp9_rc_get_second_pass_params(struct VP9_COMP *cpi);
+void vp9_twopass_postencode_update(struct VP9_COMP *cpi);
 
 // Post encode update of the rate control parameters for 2-pass
 void vp9_twopass_postencode_update(struct VP9_COMP *cpi);
diff --git a/vp9/encoder/vp9_ratectrl.c b/vp9/encoder/vp9_ratectrl.c
index 7211e99..98095ac 100644
--- a/vp9/encoder/vp9_ratectrl.c
+++ b/vp9/encoder/vp9_ratectrl.c
@@ -1061,10 +1061,12 @@
     if (frame_is_intra_only(cm) ||
         (!rc->is_src_frame_alt_ref &&
          (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame))) {
-      active_best_quality -= cpi->twopass.extend_minq;
+      active_best_quality -=
+        (cpi->twopass.extend_minq + cpi->twopass.extend_minq_fast);
       active_worst_quality += (cpi->twopass.extend_maxq / 2);
     } else {
-      active_best_quality -= cpi->twopass.extend_minq / 2;
+      active_best_quality -=
+        (cpi->twopass.extend_minq + cpi->twopass.extend_minq_fast) / 2;
       active_worst_quality += cpi->twopass.extend_maxq;
     }
   }
@@ -1671,9 +1673,9 @@
 
 #define VBR_PCT_ADJUSTMENT_LIMIT 50
 // For VBR...adjustment to the frame target based on error from previous frames
-static void vbr_rate_correction(VP9_COMP *cpi,
-                                int *this_frame_target,
-                                int64_t vbr_bits_off_target) {
+static void vbr_rate_correction(VP9_COMP *cpi, int *this_frame_target) {
+  RATE_CONTROL *const rc = &cpi->rc;
+  int64_t vbr_bits_off_target = rc->vbr_bits_off_target;
   int max_delta;
   double position_factor = 1.0;
 
@@ -1697,6 +1699,20 @@
       (vbr_bits_off_target < -max_delta) ? max_delta
                                          : (int)-vbr_bits_off_target;
   }
+
+  // Fast redistribution of bits arising from massive local undershoot.
+  // Dont do it for kf,arf,gf or overlay frames.
+  if (!frame_is_kf_gf_arf(cpi) && !rc->is_src_frame_alt_ref &&
+      rc->vbr_bits_off_target_fast) {
+    int one_frame_bits = MAX(rc->avg_frame_bandwidth, *this_frame_target);
+    int fast_extra_bits;
+    fast_extra_bits =
+      (int)MIN(rc->vbr_bits_off_target_fast, one_frame_bits);
+    fast_extra_bits = (int)MIN(fast_extra_bits,
+      MAX(one_frame_bits / 8, rc->vbr_bits_off_target_fast / 8));
+    *this_frame_target += (int)fast_extra_bits;
+    rc->vbr_bits_off_target_fast -= fast_extra_bits;
+  }
 }
 
 void vp9_set_target_rate(VP9_COMP *cpi) {
@@ -1705,6 +1721,6 @@
 
   // Correction to rate target based on prior over or under shoot.
   if (cpi->oxcf.rc_mode == VPX_VBR || cpi->oxcf.rc_mode == VPX_CQ)
-    vbr_rate_correction(cpi, &target_rate, rc->vbr_bits_off_target);
+    vbr_rate_correction(cpi, &target_rate);
   vp9_rc_set_frame_target(cpi, target_rate);
 }
diff --git a/vp9/encoder/vp9_ratectrl.h b/vp9/encoder/vp9_ratectrl.h
index a2e0699..e12d200 100644
--- a/vp9/encoder/vp9_ratectrl.h
+++ b/vp9/encoder/vp9_ratectrl.h
@@ -100,6 +100,7 @@
   int64_t buffer_level;
   int64_t bits_off_target;
   int64_t vbr_bits_off_target;
+  int64_t vbr_bits_off_target_fast;
 
   int decimation_factor;
   int decimation_count;