Merge "Refine motion vector prediction for NEWMV mode"
diff --git a/build/make/configure.sh b/build/make/configure.sh
index 75279b9..f8329aa 100755
--- a/build/make/configure.sh
+++ b/build/make/configure.sh
@@ -979,8 +979,10 @@
fi
# append any user defined extra cflags
- check_add_cflags ${extra_cflags} || \
- die "Requested extra CFLAGS '${extra_cflags}' not supported by compiler"
+ if [ -n "${extra_cflags}" ] ; then
+ check_add_cflags ${extra_cflags} || \
+ die "Requested extra CFLAGS '${extra_cflags}' not supported by compiler"
+ fi
}
process_toolchain() {
diff --git a/vp8/encoder/arm/armv5te/vp8_packtokens_partitions_armv5.asm b/vp8/encoder/arm/armv5te/vp8_packtokens_partitions_armv5.asm
index 57cd318..42dae13 100644
--- a/vp8/encoder/arm/armv5te/vp8_packtokens_partitions_armv5.asm
+++ b/vp8/encoder/arm/armv5te/vp8_packtokens_partitions_armv5.asm
@@ -65,6 +65,8 @@
numparts_loop
ldr r10, [sp, #40] ; ptr
ldr r5, [sp, #36] ; move mb_rows to the counting section
+ sub r5, r5, r11 ; move start point with each partition
+ ; mb_rows starts at i
str r5, [sp, #12]
; Reset all of the VP8 Writer data for each partition that
diff --git a/vp8/encoder/arm/neon/vp8_subpixelvariance16x16_neon.asm b/vp8/encoder/arm/neon/vp8_subpixelvariance16x16_neon.asm
index 1b09cfe..1475f76 100644
--- a/vp8/encoder/arm/neon/vp8_subpixelvariance16x16_neon.asm
+++ b/vp8/encoder/arm/neon/vp8_subpixelvariance16x16_neon.asm
@@ -9,7 +9,7 @@
;
- EXPORT |vp8_sub_pixel_variance16x16_neon|
+ EXPORT |vp8_sub_pixel_variance16x16_neon_func|
ARM
REQUIRE8
PRESERVE8
@@ -24,7 +24,7 @@
; stack(r6) unsigned int *sse
;note: most of the code is copied from bilinear_predict16x16_neon and vp8_variance16x16_neon.
-|vp8_sub_pixel_variance16x16_neon| PROC
+|vp8_sub_pixel_variance16x16_neon_func| PROC
push {r4-r6, lr}
ldr r12, _BilinearTaps_coeff_
diff --git a/vp8/encoder/arm/variance_arm.c b/vp8/encoder/arm/variance_arm.c
new file mode 100644
index 0000000..4c72485
--- /dev/null
+++ b/vp8/encoder/arm/variance_arm.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vpx_config.h"
+
+#if HAVE_ARMV7
+
+unsigned int vp8_sub_pixel_variance16x16_neon
+(
+ const unsigned char *src_ptr,
+ int src_pixels_per_line,
+ int xoffset,
+ int yoffset,
+ const unsigned char *dst_ptr,
+ int dst_pixels_per_line,
+ unsigned int *sse
+)
+{
+ if (xoffset == 4 && yoffset == 0)
+ return vp8_variance_halfpixvar16x16_h_neon(src_ptr, src_pixels_per_line, dst_ptr, dst_pixels_per_line, sse);
+ else if (xoffset == 0 && yoffset == 4)
+ return vp8_variance_halfpixvar16x16_v_neon(src_ptr, src_pixels_per_line, dst_ptr, dst_pixels_per_line, sse);
+ else if (xoffset == 4 && yoffset == 4)
+ return vp8_variance_halfpixvar16x16_hv_neon(src_ptr, src_pixels_per_line, dst_ptr, dst_pixels_per_line, sse);
+ else
+ return vp8_sub_pixel_variance16x16_neon_func(src_ptr, src_pixels_per_line, xoffset, yoffset, dst_ptr, dst_pixels_per_line, sse);
+}
+
+#endif
diff --git a/vp8/encoder/arm/variance_arm.h b/vp8/encoder/arm/variance_arm.h
index 0e5f62f..3cbacfa 100644
--- a/vp8/encoder/arm/variance_arm.h
+++ b/vp8/encoder/arm/variance_arm.h
@@ -30,6 +30,7 @@
//extern prototype_subpixvariance(vp8_sub_pixel_variance8x16_c);
//extern prototype_subpixvariance(vp8_sub_pixel_variance16x8_c);
extern prototype_subpixvariance(vp8_sub_pixel_variance16x16_neon);
+extern prototype_subpixvariance(vp8_sub_pixel_variance16x16_neon_func);
extern prototype_variance(vp8_variance_halfpixvar16x16_h_neon);
extern prototype_variance(vp8_variance_halfpixvar16x16_v_neon);
extern prototype_variance(vp8_variance_halfpixvar16x16_hv_neon);
diff --git a/vp8/encoder/block.h b/vp8/encoder/block.h
index bf94e50..3ad40ef 100644
--- a/vp8/encoder/block.h
+++ b/vp8/encoder/block.h
@@ -112,6 +112,7 @@
unsigned int token_costs[BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [vp8_coef_tokens];
int optimize;
+ int q_index;
void (*vp8_short_fdct4x4)(short *input, short *output, int pitch);
void (*vp8_short_fdct8x4)(short *input, short *output, int pitch);
diff --git a/vp8/encoder/encodeframe.c b/vp8/encoder/encodeframe.c
index 4c95f28..1689b43 100644
--- a/vp8/encoder/encodeframe.c
+++ b/vp8/encoder/encodeframe.c
@@ -365,6 +365,33 @@
x->e_mbd.block[24].dequant = cpi->common.Y2dequant[QIndex];
x->block[24].zrun_zbin_boost = cpi->zrun_zbin_boost_y2[QIndex];
x->block[24].zbin_extra = (short)zbin_extra;
+
+ /* save this macroblock QIndex for vp8_update_zbin_extra() */
+ x->q_index = QIndex;
+}
+void vp8_update_zbin_extra(VP8_COMP *cpi, MACROBLOCK *x)
+{
+ int i;
+ int QIndex = x->q_index;
+ int zbin_extra;
+
+ // Y
+ zbin_extra = (cpi->common.Y1dequant[QIndex][1] * (cpi->zbin_over_quant + cpi->zbin_mode_boost)) >> 7;
+ for (i = 0; i < 16; i++)
+ {
+ x->block[i].zbin_extra = (short)zbin_extra;
+ }
+
+ // UV
+ zbin_extra = (cpi->common.UVdequant[QIndex][1] * (cpi->zbin_over_quant + cpi->zbin_mode_boost)) >> 7;
+ for (i = 16; i < 24; i++)
+ {
+ x->block[i].zbin_extra = (short)zbin_extra;
+ }
+
+ // Y2
+ zbin_extra = (cpi->common.Y2dequant[QIndex][1] * ((cpi->zbin_over_quant / 2) + cpi->zbin_mode_boost)) >> 7;
+ x->block[24].zbin_extra = (short)zbin_extra;
}
void vp8cx_frame_init_quantizer(VP8_COMP *cpi)
@@ -1261,10 +1288,17 @@
if (cpi->sf.RD)
{
+ int zbin_mode_boost_enabled = cpi->zbin_mode_boost_enabled;
+
/* Are we using the fast quantizer for the mode selection? */
if(cpi->sf.use_fastquant_for_pick)
+ {
cpi->mb.quantize_b = QUANTIZE_INVOKE(&cpi->rtcd.quantize, fastquantb);
+ /* the fast quantizer does not use zbin_extra, so
+ * do not recalculate */
+ cpi->zbin_mode_boost_enabled = 0;
+ }
inter_error = vp8_rd_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate, &distortion, &intra_error);
/* switch back to the regular quantizer for the encode */
@@ -1273,6 +1307,9 @@
cpi->mb.quantize_b = QUANTIZE_INVOKE(&cpi->rtcd.quantize, quantb);
}
+ /* restore cpi->zbin_mode_boost_enabled */
+ cpi->zbin_mode_boost_enabled = zbin_mode_boost_enabled;
+
}
else
#endif
@@ -1289,7 +1326,7 @@
#endif
// MB level adjutment to quantizer setup
- if (xd->segmentation_enabled || cpi->zbin_mode_boost_enabled)
+ if (xd->segmentation_enabled)
{
// If cyclic update enabled
if (cpi->cyclic_refresh_mode_enabled)
@@ -1299,9 +1336,14 @@
((xd->mode_info_context->mbmi.ref_frame != LAST_FRAME) || (xd->mode_info_context->mbmi.mode != ZEROMV)))
{
xd->mode_info_context->mbmi.segment_id = 0;
+
+ /* segment_id changed, so update */
+ vp8cx_mb_init_quantizer(cpi, x);
}
}
+ }
+ {
// Experimental code. Special case for gf and arf zeromv modes. Increase zbin size to supress noise
if (cpi->zbin_mode_boost_enabled)
{
@@ -1325,7 +1367,7 @@
else
cpi->zbin_mode_boost = 0;
- vp8cx_mb_init_quantizer(cpi, x);
+ vp8_update_zbin_extra(cpi, x);
}
cpi->count_mb_ref_frame_usage[xd->mode_info_context->mbmi.ref_frame] ++;
diff --git a/vp8/encoder/encodeintra.c b/vp8/encoder/encodeintra.c
index 0f327ce..9163b42 100644
--- a/vp8/encoder/encodeintra.c
+++ b/vp8/encoder/encodeintra.c
@@ -144,51 +144,6 @@
}
}
-void vp8_encode_intra16x16mbyrd(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
-{
- int b;
-
- vp8_build_intra_predictors_mby_ptr(&x->e_mbd);
-
- ENCODEMB_INVOKE(&rtcd->encodemb, submby)(x->src_diff, x->src.y_buffer, x->e_mbd.predictor, x->src.y_stride);
-
- vp8_transform_intra_mby(x);
-
- vp8_quantize_mby(x);
-
- vp8_inverse_transform_mby(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
-
- RECON_INVOKE(&rtcd->common->recon, recon_mby)
- (IF_RTCD(&rtcd->common->recon), &x->e_mbd);
-
- // make sure block modes are set the way we want them for context updates
- for (b = 0; b < 16; b++)
- {
- BLOCKD *d = &x->e_mbd.block[b];
-
- switch (x->e_mbd.mode_info_context->mbmi.mode)
- {
-
- case DC_PRED:
- d->bmi.mode = B_DC_PRED;
- break;
- case V_PRED:
- d->bmi.mode = B_VE_PRED;
- break;
- case H_PRED:
- d->bmi.mode = B_HE_PRED;
- break;
- case TM_PRED:
- d->bmi.mode = B_TM_PRED;
- break;
- default:
- d->bmi.mode = B_DC_PRED;
- break;
-
- }
- }
-}
-
void vp8_encode_intra16x16mbuv(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
{
vp8_build_intra_predictors_mbuv(&x->e_mbd);
diff --git a/vp8/encoder/encodeintra.h b/vp8/encoder/encodeintra.h
index 5be23d1..c0247b0 100644
--- a/vp8/encoder/encodeintra.h
+++ b/vp8/encoder/encodeintra.h
@@ -19,7 +19,6 @@
void vp8_encode_intra4x4block(const VP8_ENCODER_RTCD *, MACROBLOCK *x, BLOCK *be, BLOCKD *b, int best_mode);
void vp8_update_mode_context(int *abmode, int *lbmode, int i, int best_mode);
void vp8_encode_intra4x4block_rd(const VP8_ENCODER_RTCD *, MACROBLOCK *x, BLOCK *be, BLOCKD *b, int best_mode);
-void vp8_encode_intra16x16mbyrd(const VP8_ENCODER_RTCD *, MACROBLOCK *x);
void vp8_encode_intra16x16mbuvrd(const VP8_ENCODER_RTCD *, MACROBLOCK *x);
#endif
diff --git a/vp8/encoder/firstpass.c b/vp8/encoder/firstpass.c
index a77ced7..06e26be 100644
--- a/vp8/encoder/firstpass.c
+++ b/vp8/encoder/firstpass.c
@@ -2423,12 +2423,35 @@
if (cpi->oxcf.auto_key
&& cpi->frames_to_key > (int)cpi->key_frame_frequency )
{
+ FIRSTPASS_STATS *current_pos = cpi->stats_in;
+ FIRSTPASS_STATS tmp_frame;
+
cpi->frames_to_key /= 2;
- // Estimate corrected kf group error
- kf_group_err /= 2.0;
- kf_group_intra_err /= 2.0;
- kf_group_coded_err /= 2.0;
+ // Copy first frame details
+ vpx_memcpy(&tmp_frame, &first_frame, sizeof(first_frame));
+
+ // Reset to the start of the group
+ reset_fpf_position(cpi, start_position);
+
+ kf_group_err = 0;
+ kf_group_intra_err = 0;
+ kf_group_coded_err = 0;
+
+ // Rescan to get the correct error data for the forced kf group
+ for( i = 0; i < cpi->frames_to_key; i++ )
+ {
+ // Accumulate kf group errors
+ kf_group_err += calculate_modified_err(cpi, &tmp_frame);
+ kf_group_intra_err += tmp_frame.intra_error;
+ kf_group_coded_err += tmp_frame.coded_error;
+
+ // Load a the next frame's stats
+ vp8_input_stats(cpi, &tmp_frame);
+ }
+
+ // Reset to the start of the group
+ reset_fpf_position(cpi, current_pos);
cpi->next_key_frame_forced = TRUE;
}
diff --git a/vp8/encoder/onyx_if.c b/vp8/encoder/onyx_if.c
index 5aa2224..93d2501 100644
--- a/vp8/encoder/onyx_if.c
+++ b/vp8/encoder/onyx_if.c
@@ -3127,6 +3127,10 @@
cm->uvdc_delta_q = 0;
cm->uvac_delta_q = 0;
+ if(Q<4)
+ {
+ cm->y2dc_delta_q = 4-Q;
+ }
// Set Segment specific quatizers
mbd->segment_feature_data[MB_LVL_ALT_Q][0] = cpi->segment_feature_data[MB_LVL_ALT_Q][0];
mbd->segment_feature_data[MB_LVL_ALT_Q][1] = cpi->segment_feature_data[MB_LVL_ALT_Q][1];
@@ -4410,7 +4414,7 @@
}
}
-
+ // Update the GF useage maps.
// Update the GF useage maps.
// This is done after completing the compression of a frame when all modes etc. are finalized but before loop filter
vp8_update_gf_useage_maps(cpi, cm, &cpi->mb);
@@ -4615,7 +4619,8 @@
}
// Update the buffer level variable.
- if (cpi->common.refresh_alt_ref_frame)
+ // Non-viewable frames are a special case and are treated as pure overhead.
+ if ( !cm->show_frame )
cpi->bits_off_target -= cpi->projected_frame_size;
else
cpi->bits_off_target += cpi->av_per_frame_bandwidth - cpi->projected_frame_size;
diff --git a/vp8/encoder/quantize.c b/vp8/encoder/quantize.c
index be9f26c..4a2329f 100644
--- a/vp8/encoder/quantize.c
+++ b/vp8/encoder/quantize.c
@@ -129,9 +129,6 @@
rc = vp8_default_zig_zag1d[i];
z = coeff_ptr[rc];
- //if ( i == 0 )
- // zbin = zbin_ptr[rc] + *zbin_boost_ptr + zbin_oq_value/2;
- //else
zbin = zbin_ptr[rc] + *zbin_boost_ptr + zbin_oq_value;
zbin_boost_ptr ++;
@@ -144,13 +141,13 @@
y = (((x * quant_ptr[rc]) >> 16) + x)
>> quant_shift_ptr[rc]; // quantize (x)
x = (y ^ sz) - sz; // get the sign back
- qcoeff_ptr[rc] = x; // write to destination
- dqcoeff_ptr[rc] = x * dequant_ptr[rc]; // dequantized value
+ qcoeff_ptr[rc] = x; // write to destination
+ dqcoeff_ptr[rc] = x * dequant_ptr[rc]; // dequantized value
if (y)
{
eob = i; // last nonzero coeffs
- zbin_boost_ptr = &b->zrun_zbin_boost[0]; // reset zero runlength
+ zbin_boost_ptr = b->zrun_zbin_boost; // reset zero runlength
}
}
}
diff --git a/vp8/encoder/rdopt.c b/vp8/encoder/rdopt.c
index f04a746..d66acbc 100644
--- a/vp8/encoder/rdopt.c
+++ b/vp8/encoder/rdopt.c
@@ -43,7 +43,9 @@
#endif
-void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x);
+extern void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x);
+extern void vp8_update_zbin_extra(VP8_COMP *cpi, MACROBLOCK *x);
+
#define RDCOST(RM,DM,R,D) ( ((128+(R)*(RM)) >> 8) + (DM)*(D) )
@@ -241,10 +243,9 @@
cpi->RDMULT += (cpi->RDMULT * rd_iifactor[cpi->next_iiratio]) >> 4;
}
- if (cpi->RDMULT < 125)
- cpi->RDMULT = 125;
-
cpi->mb.errorperbit = (cpi->RDMULT / 100);
+ cpi->mb.errorperbit += (cpi->mb.errorperbit==0);
+
vp8_set_speed_features(cpi);
if (cpi->common.simpler_lpf)
@@ -537,6 +538,51 @@
return cost;
}
+static void macro_block_yrd( MACROBLOCK *mb,
+ int *Rate,
+ int *Distortion,
+ const vp8_encodemb_rtcd_vtable_t *rtcd)
+{
+ int b;
+ MACROBLOCKD *const x = &mb->e_mbd;
+ BLOCK *const mb_y2 = mb->block + 24;
+ BLOCKD *const x_y2 = x->block + 24;
+ short *Y2DCPtr = mb_y2->src_diff;
+ BLOCK *beptr;
+ int d;
+
+ ENCODEMB_INVOKE(rtcd, submby)( mb->src_diff, mb->src.y_buffer,
+ mb->e_mbd.predictor, mb->src.y_stride );
+
+ // Fdct and building the 2nd order block
+ for (beptr = mb->block; beptr < mb->block + 16; beptr += 2)
+ {
+ mb->vp8_short_fdct8x4(beptr->src_diff, beptr->coeff, 32);
+ *Y2DCPtr++ = beptr->coeff[0];
+ *Y2DCPtr++ = beptr->coeff[16];
+ }
+
+ // 2nd order fdct
+ mb->short_walsh4x4(mb_y2->src_diff, mb_y2->coeff, 8);
+
+ // Quantization
+ for (b = 0; b < 16; b++)
+ {
+ mb->quantize_b(&mb->block[b], &mb->e_mbd.block[b]);
+ }
+
+ // DC predication and Quantization of 2nd Order block
+ mb->quantize_b(mb_y2, x_y2);
+
+ // Distortion
+ d = ENCODEMB_INVOKE(rtcd, mberr)(mb, 1) << 2;
+ d += ENCODEMB_INVOKE(rtcd, berr)(mb_y2->coeff, x_y2->dqcoeff);
+
+ *Distortion = (d >> 4);
+
+ // rate
+ *Rate = vp8_rdcost_mby(mb);
+}
static void rd_pick_intra4x4block(
VP8_COMP *cpi,
@@ -653,33 +699,35 @@
return RDCOST(mb->rdmult, mb->rddiv, cost, distortion);
}
-int vp8_rd_pick_intra16x16mby_mode(VP8_COMP *cpi, MACROBLOCK *x, int *Rate, int *rate_y, int *Distortion)
+int vp8_rd_pick_intra16x16mby_mode(VP8_COMP *cpi,
+ MACROBLOCK *x,
+ int *Rate,
+ int *rate_y,
+ int *Distortion)
{
-
MB_PREDICTION_MODE mode;
MB_PREDICTION_MODE UNINITIALIZED_IS_SAFE(mode_selected);
int rate, ratey;
unsigned int distortion;
int best_rd = INT_MAX;
+ int this_rd;
+ int i;
//Y Search for 16x16 intra prediction mode
for (mode = DC_PRED; mode <= TM_PRED; mode++)
{
- int this_rd;
- int dummy;
- rate = 0;
+ for (i = 0; i < 16; i++)
+ {
+ vpx_memset(&x->e_mbd.block[i].bmi, 0, sizeof(B_MODE_INFO));
+ }
x->e_mbd.mode_info_context->mbmi.mode = mode;
- rate += x->mbmode_cost[x->e_mbd.frame_type][x->e_mbd.mode_info_context->mbmi.mode];
+ vp8_build_intra_predictors_mby_ptr(&x->e_mbd);
- vp8_encode_intra16x16mbyrd(IF_RTCD(&cpi->rtcd), x);
-
- ratey = vp8_rdcost_mby(x);
-
- rate += ratey;
-
- VARIANCE_INVOKE(&cpi->rtcd.variance, get16x16var)(x->src.y_buffer, x->src.y_stride, x->e_mbd.dst.y_buffer, x->e_mbd.dst.y_stride, &distortion, &dummy);
+ macro_block_yrd(x, &ratey, &distortion, IF_RTCD(&cpi->rtcd.encodemb));
+ rate = ratey + x->mbmode_cost[x->e_mbd.frame_type]
+ [x->e_mbd.mode_info_context->mbmi.mode];
this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
@@ -697,7 +745,6 @@
return best_rd;
}
-
static int rd_cost_mbuv(MACROBLOCK *mb)
{
int b;
@@ -938,48 +985,6 @@
return distortion;
}
-static void macro_block_yrd(MACROBLOCK *mb, int *Rate, int *Distortion, const vp8_encodemb_rtcd_vtable_t *rtcd)
-{
- int b;
- MACROBLOCKD *const x = &mb->e_mbd;
- BLOCK *const mb_y2 = mb->block + 24;
- BLOCKD *const x_y2 = x->block + 24;
- short *Y2DCPtr = mb_y2->src_diff;
- BLOCK *beptr;
- int d;
-
- ENCODEMB_INVOKE(rtcd, submby)(mb->src_diff, mb->src.y_buffer, mb->e_mbd.predictor, mb->src.y_stride);
-
- // Fdct and building the 2nd order block
- for (beptr = mb->block; beptr < mb->block + 16; beptr += 2)
- {
- mb->vp8_short_fdct8x4(beptr->src_diff, beptr->coeff, 32);
- *Y2DCPtr++ = beptr->coeff[0];
- *Y2DCPtr++ = beptr->coeff[16];
- }
-
- // 2nd order fdct
- mb->short_walsh4x4(mb_y2->src_diff, mb_y2->coeff, 8);
-
- // Quantization
- for (b = 0; b < 16; b++)
- {
- mb->quantize_b(&mb->block[b], &mb->e_mbd.block[b]);
- }
-
- // DC predication and Quantization of 2nd Order block
- mb->quantize_b(mb_y2, x_y2);
-
- // Distortion
- d = ENCODEMB_INVOKE(rtcd, mberr)(mb, 1) << 2;
- d += ENCODEMB_INVOKE(rtcd, berr)(mb_y2->coeff, x_y2->dqcoeff);
-
- *Distortion = (d >> 4);
-
- // rate
- *Rate = vp8_rdcost_mby(mb);
-}
-
unsigned char vp8_mbsplit_offset2[4][16] = {
{ 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{ 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
@@ -1990,7 +1995,7 @@
cpi->zbin_mode_boost = MV_ZBIN_BOOST;
}
- vp8cx_mb_init_quantizer(cpi, x);
+ vp8_update_zbin_extra(cpi, x);
}
switch (this_mode)
diff --git a/vp8/encoder/x86/quantize_sse2.asm b/vp8/encoder/x86/quantize_sse2.asm
index 57bf3c9..45e1a2a 100644
--- a/vp8/encoder/x86/quantize_sse2.asm
+++ b/vp8/encoder/x86/quantize_sse2.asm
@@ -11,220 +11,169 @@
%include "vpx_ports/x86_abi_support.asm"
-;int vp8_regular_quantize_b_impl_sse2(short *coeff_ptr, short *zbin_ptr,
-; short *qcoeff_ptr,short *dequant_ptr,
-; const int *default_zig_zag, short *round_ptr,
-; short *quant_ptr, short *dqcoeff_ptr,
+;int vp8_regular_quantize_b_impl_sse2(
+; short *coeff_ptr,
+; short *zbin_ptr,
+; short *qcoeff_ptr,
+; short *dequant_ptr,
+; const int *default_zig_zag,
+; short *round_ptr,
+; short *quant_ptr,
+; short *dqcoeff_ptr,
; unsigned short zbin_oq_value,
-; short *zbin_boost_ptr);
+; short *zbin_boost_ptr,
+; short *quant_shift);
;
global sym(vp8_regular_quantize_b_impl_sse2)
sym(vp8_regular_quantize_b_impl_sse2):
push rbp
mov rbp, rsp
- SHADOW_ARGS_TO_STACK 10
+ SHADOW_ARGS_TO_STACK 11
+ SAVE_XMM
push rsi
push rdi
push rbx
+ ALIGN_STACK 16, rax
+ %define abs_minus_zbin 0
+ %define temp_qcoeff 32
+ %define qcoeff 64
+ %define eob_tmp 96
+ %define stack_size 112
+ sub rsp, stack_size
; end prolog
- ALIGN_STACK 16, rax
+ mov rdx, arg(0) ; coeff_ptr
+ mov rcx, arg(1) ; zbin_ptr
+ movd xmm7, arg(8) ; zbin_oq_value
+ mov rdi, arg(5) ; round_ptr
+ mov rsi, arg(6) ; quant_ptr
- %define abs_minus_zbin_lo 0
- %define abs_minus_zbin_hi 16
- %define temp_qcoeff_lo 32
- %define temp_qcoeff_hi 48
- %define save_xmm6 64
- %define save_xmm7 80
- %define eob 96
-
- %define vp8_regularquantizeb_stack_size eob + 16
-
- sub rsp, vp8_regularquantizeb_stack_size
-
- movdqa OWORD PTR[rsp + save_xmm6], xmm6
- movdqa OWORD PTR[rsp + save_xmm7], xmm7
-
- mov rdx, arg(0) ;coeff_ptr
- mov eax, arg(8) ;zbin_oq_value
-
- mov rcx, arg(1) ;zbin_ptr
- movd xmm7, eax
-
+ ; z
movdqa xmm0, OWORD PTR[rdx]
movdqa xmm4, OWORD PTR[rdx + 16]
+ pshuflw xmm7, xmm7, 0
+ punpcklwd xmm7, xmm7 ; duplicated zbin_oq_value
+
movdqa xmm1, xmm0
movdqa xmm5, xmm4
- psraw xmm0, 15 ;sign of z (aka sz)
- psraw xmm4, 15 ;sign of z (aka sz)
+ ; sz
+ psraw xmm0, 15
+ psraw xmm4, 15
+ ; (z ^ sz)
pxor xmm1, xmm0
pxor xmm5, xmm4
- movdqa xmm2, OWORD PTR[rcx] ;load zbin_ptr
- movdqa xmm3, OWORD PTR[rcx + 16] ;load zbin_ptr
-
- pshuflw xmm7, xmm7, 0
- psubw xmm1, xmm0 ;x = abs(z)
-
- punpcklwd xmm7, xmm7 ;duplicated zbin_oq_value
- psubw xmm5, xmm4 ;x = abs(z)
-
- paddw xmm2, xmm7
- paddw xmm3, xmm7
-
- psubw xmm1, xmm2 ;sub (zbin_ptr + zbin_oq_value)
- psubw xmm5, xmm3 ;sub (zbin_ptr + zbin_oq_value)
-
- mov rdi, arg(5) ;round_ptr
- mov rsi, arg(6) ;quant_ptr
-
- movdqa OWORD PTR[rsp + abs_minus_zbin_lo], xmm1
- movdqa OWORD PTR[rsp + abs_minus_zbin_hi], xmm5
-
- paddw xmm1, xmm2 ;add (zbin_ptr + zbin_oq_value) back
- paddw xmm5, xmm3 ;add (zbin_ptr + zbin_oq_value) back
-
- movdqa xmm2, OWORD PTR[rdi]
- movdqa xmm3, OWORD PTR[rsi]
-
- movdqa xmm6, OWORD PTR[rdi + 16]
- movdqa xmm7, OWORD PTR[rsi + 16]
-
- paddw xmm1, xmm2
- paddw xmm5, xmm6
-
- pmulhw xmm1, xmm3
- pmulhw xmm5, xmm7
-
- mov rsi, arg(2) ;qcoeff_ptr
- pxor xmm6, xmm6
-
- pxor xmm1, xmm0
- pxor xmm5, xmm4
-
+ ; x = abs(z)
psubw xmm1, xmm0
psubw xmm5, xmm4
- movdqa OWORD PTR[rsp + temp_qcoeff_lo], xmm1
- movdqa OWORD PTR[rsp + temp_qcoeff_hi], xmm5
+ movdqa xmm2, OWORD PTR[rcx]
+ movdqa xmm3, OWORD PTR[rcx + 16]
- movdqa OWORD PTR[rsi], xmm6 ;zero qcoeff
- movdqa OWORD PTR[rsi + 16], xmm6 ;zero qcoeff
+ ; *zbin_ptr + zbin_oq_value
+ paddw xmm2, xmm7
+ paddw xmm3, xmm7
- xor rax, rax
- mov rcx, -1
+ ; x - (*zbin_ptr + zbin_oq_value)
+ psubw xmm1, xmm2
+ psubw xmm5, xmm3
+ movdqa OWORD PTR[rsp + abs_minus_zbin], xmm1
+ movdqa OWORD PTR[rsp + abs_minus_zbin + 16], xmm5
- mov [rsp + eob], rcx
- mov rsi, arg(9) ;zbin_boost_ptr
-
- mov rbx, arg(4) ;default_zig_zag
-
-rq_zigzag_loop:
- movsxd rcx, DWORD PTR[rbx + rax*4] ;now we have rc
- movsx edi, WORD PTR [rsi] ;*zbin_boost_ptr aka zbin
- lea rsi, [rsi + 2] ;zbin_boost_ptr++
-
- movsx edx, WORD PTR[rsp + abs_minus_zbin_lo + rcx *2]
-
- sub edx, edi ;x - zbin
- jl rq_zigzag_1
-
- mov rdi, arg(2) ;qcoeff_ptr
-
- movsx edx, WORD PTR[rsp + temp_qcoeff_lo + rcx *2]
-
- cmp edx, 0
- je rq_zigzag_1
-
- mov WORD PTR[rdi + rcx * 2], dx ;qcoeff_ptr[rc] = temp_qcoeff[rc]
-
- mov rsi, arg(9) ;zbin_boost_ptr
- mov [rsp + eob], rax ;eob = i
-
-rq_zigzag_1:
- movsxd rcx, DWORD PTR[rbx + rax*4 + 4]
- movsx edi, WORD PTR [rsi] ;*zbin_boost_ptr aka zbin
- lea rsi, [rsi + 2] ;zbin_boost_ptr++
-
- movsx edx, WORD PTR[rsp + abs_minus_zbin_lo + rcx *2]
- lea rax, [rax + 1]
-
- sub edx, edi ;x - zbin
- jl rq_zigzag_1a
-
- mov rdi, arg(2) ;qcoeff_ptr
-
- movsx edx, WORD PTR[rsp + temp_qcoeff_lo + rcx *2]
-
- cmp edx, 0
- je rq_zigzag_1a
-
- mov WORD PTR[rdi + rcx * 2], dx ;qcoeff_ptr[rc] = temp_qcoeff[rc]
-
- mov rsi, arg(9) ;zbin_boost_ptr
- mov [rsp + eob], rax ;eob = i
-
-rq_zigzag_1a:
- movsxd rcx, DWORD PTR[rbx + rax*4 + 4]
- movsx edi, WORD PTR [rsi] ;*zbin_boost_ptr aka zbin
- lea rsi, [rsi + 2] ;zbin_boost_ptr++
-
- movsx edx, WORD PTR[rsp + abs_minus_zbin_lo + rcx *2]
- lea rax, [rax + 1]
-
- sub edx, edi ;x - zbin
- jl rq_zigzag_1b
-
- mov rdi, arg(2) ;qcoeff_ptr
-
- movsx edx, WORD PTR[rsp + temp_qcoeff_lo + rcx *2]
-
- cmp edx, 0
- je rq_zigzag_1b
-
- mov WORD PTR[rdi + rcx * 2], dx ;qcoeff_ptr[rc] = temp_qcoeff[rc]
-
- mov rsi, arg(9) ;zbin_boost_ptr
- mov [rsp + eob], rax ;eob = i
-
-rq_zigzag_1b:
- movsxd rcx, DWORD PTR[rbx + rax*4 + 4]
- movsx edi, WORD PTR [rsi] ;*zbin_boost_ptr aka zbin
- lea rsi, [rsi + 2] ;zbin_boost_ptr++
-
- movsx edx, WORD PTR[rsp + abs_minus_zbin_lo + rcx *2]
- lea rax, [rax + 1]
-
- sub edx, edi ;x - zbin
- jl rq_zigzag_1c
-
- mov rdi, arg(2) ;qcoeff_ptr
-
- movsx edx, WORD PTR[rsp + temp_qcoeff_lo + rcx *2]
-
- cmp edx, 0
- je rq_zigzag_1c
-
- mov WORD PTR[rdi + rcx * 2], dx ;qcoeff_ptr[rc] = temp_qcoeff[rc]
-
- mov rsi, arg(9) ;zbin_boost_ptr
- mov [rsp + eob], rax ;eob = i
-
-rq_zigzag_1c:
- lea rax, [rax + 1]
-
- cmp rax, 16
- jl rq_zigzag_loop
-
- mov rdi, arg(2) ;qcoeff_ptr
- mov rcx, arg(3) ;dequant_ptr
- mov rsi, arg(7) ;dqcoeff_ptr
+ ; add (zbin_ptr + zbin_oq_value) back
+ paddw xmm1, xmm2
+ paddw xmm5, xmm3
movdqa xmm2, OWORD PTR[rdi]
- movdqa xmm3, OWORD PTR[rdi + 16]
+ movdqa xmm6, OWORD PTR[rdi + 16]
+
+ movdqa xmm3, OWORD PTR[rsi]
+ movdqa xmm7, OWORD PTR[rsi + 16]
+
+ ; x + round
+ paddw xmm1, xmm2
+ paddw xmm5, xmm6
+
+ ; y = x * quant_ptr >> 16
+ pmulhw xmm3, xmm1
+ pmulhw xmm7, xmm5
+
+ ; y += x
+ paddw xmm1, xmm3
+ paddw xmm5, xmm7
+
+ movdqa OWORD PTR[rsp + temp_qcoeff], xmm1
+ movdqa OWORD PTR[rsp + temp_qcoeff + 16], xmm5
+
+ pxor xmm6, xmm6
+ ; zero qcoeff
+ movdqa OWORD PTR[rsp + qcoeff], xmm6
+ movdqa OWORD PTR[rsp + qcoeff + 16], xmm6
+
+ mov [rsp + eob_tmp], DWORD -1 ; eob
+ mov rsi, arg(9) ; zbin_boost_ptr
+ mov rdi, arg(4) ; default_zig_zag
+ mov rax, arg(10) ; quant_shift_ptr
+
+%macro ZIGZAG_LOOP 2
+rq_zigzag_loop_%1:
+ movsxd rdx, DWORD PTR[rdi + (%1 * 4)] ; rc
+ movsx ebx, WORD PTR [rsi] ; *zbin_boost_ptr
+ lea rsi, [rsi + 2] ; zbin_boost_ptr++
+
+ ; x
+ movsx ecx, WORD PTR[rsp + abs_minus_zbin + rdx *2]
+
+ ; if (x >= zbin)
+ sub ecx, ebx ; x - zbin
+ jl rq_zigzag_loop_%2 ; x < zbin
+
+ movsx ebx, WORD PTR[rsp + temp_qcoeff + rdx *2]
+
+ ; downshift by quant_shift[rdx]
+ movsx ecx, WORD PTR[rax + rdx*2] ; quant_shift_ptr[rc]
+ sar ebx, cl ; also sets Z bit
+ je rq_zigzag_loop_%2 ; !y
+ mov WORD PTR[rsp + qcoeff + rdx * 2], bx ;qcoeff_ptr[rc] = temp_qcoeff[rc]
+
+ mov rsi, arg(9) ; reset to b->zrun_zbin_boost
+ mov [rsp + eob_tmp], DWORD %1 ; eob = i
+%endmacro
+ZIGZAG_LOOP 0, 1
+ZIGZAG_LOOP 1, 2
+ZIGZAG_LOOP 2, 3
+ZIGZAG_LOOP 3, 4
+ZIGZAG_LOOP 4, 5
+ZIGZAG_LOOP 5, 6
+ZIGZAG_LOOP 6, 7
+ZIGZAG_LOOP 7, 8
+ZIGZAG_LOOP 8, 9
+ZIGZAG_LOOP 9, 10
+ZIGZAG_LOOP 10, 11
+ZIGZAG_LOOP 11, 12
+ZIGZAG_LOOP 12, 13
+ZIGZAG_LOOP 13, 14
+ZIGZAG_LOOP 14, 15
+ZIGZAG_LOOP 15, end
+rq_zigzag_loop_end:
+
+ mov rbx, arg(2) ; qcoeff_ptr
+ mov rcx, arg(3) ; dequant_ptr
+ mov rsi, arg(7) ; dqcoeff_ptr
+ mov rax, [rsp + eob_tmp] ; eob
+
+ movdqa xmm2, OWORD PTR[rsp + qcoeff]
+ movdqa xmm3, OWORD PTR[rsp + qcoeff + 16]
+
+ ; y ^ sz
+ pxor xmm2, xmm0
+ pxor xmm3, xmm4
+ ; x = (y ^ sz) - sz
+ psubw xmm2, xmm0
+ psubw xmm3, xmm4
movdqa xmm0, OWORD PTR[rcx]
movdqa xmm1, OWORD PTR[rcx + 16]
@@ -232,23 +181,20 @@
pmullw xmm0, xmm2
pmullw xmm1, xmm3
- movdqa OWORD PTR[rsi], xmm0 ;store dqcoeff
- movdqa OWORD PTR[rsi + 16], xmm1 ;store dqcoeff
-
- mov rax, [rsp + eob]
-
- movdqa xmm6, OWORD PTR[rsp + save_xmm6]
- movdqa xmm7, OWORD PTR[rsp + save_xmm7]
+ movdqa OWORD PTR[rbx], xmm2
+ movdqa OWORD PTR[rbx + 16], xmm3
+ movdqa OWORD PTR[rsi], xmm0 ; store dqcoeff
+ movdqa OWORD PTR[rsi + 16], xmm1 ; store dqcoeff
add rax, 1
- add rsp, vp8_regularquantizeb_stack_size
- pop rsp
-
; begin epilog
+ add rsp, stack_size
+ pop rsp
pop rbx
pop rdi
pop rsi
+ RESTORE_XMM
UNSHADOW_ARGS
pop rbp
ret
diff --git a/vp8/encoder/x86/quantize_ssse3.asm b/vp8/encoder/x86/quantize_ssse3.asm
old mode 100755
new mode 100644
diff --git a/vp8/encoder/x86/quantize_x86.h b/vp8/encoder/x86/quantize_x86.h
index b5b22c0..266efb4 100644
--- a/vp8/encoder/x86/quantize_x86.h
+++ b/vp8/encoder/x86/quantize_x86.h
@@ -27,11 +27,11 @@
#if !CONFIG_RUNTIME_CPU_DETECT
-/* The sse2 quantizer has not been updated to match the new exact
- * quantizer introduced in commit e04e2935
- *#undef vp8_quantize_quantb
- *#define vp8_quantize_quantb vp8_regular_quantize_b_sse2
- */
+// Currently, this function realizes a gain on x86 and a loss on x86_64
+#if ARCH_X86
+#undef vp8_quantize_quantb
+#define vp8_quantize_quantb vp8_regular_quantize_b_sse2
+#endif
#endif
diff --git a/vp8/encoder/x86/x86_csystemdependent.c b/vp8/encoder/x86/x86_csystemdependent.c
index f9b3ea1..31438f9 100644
--- a/vp8/encoder/x86/x86_csystemdependent.c
+++ b/vp8/encoder/x86/x86_csystemdependent.c
@@ -108,37 +108,26 @@
int vp8_regular_quantize_b_impl_sse2(short *coeff_ptr, short *zbin_ptr,
- short *qcoeff_ptr,short *dequant_ptr,
- const int *default_zig_zag, short *round_ptr,
- short *quant_ptr, short *dqcoeff_ptr,
- unsigned short zbin_oq_value,
- short *zbin_boost_ptr);
+ short *qcoeff_ptr,short *dequant_ptr,
+ const int *default_zig_zag, short *round_ptr,
+ short *quant_ptr, short *dqcoeff_ptr,
+ unsigned short zbin_oq_value,
+ short *zbin_boost_ptr,
+ short *quant_shift_ptr);
void vp8_regular_quantize_b_sse2(BLOCK *b,BLOCKD *d)
{
- short *zbin_boost_ptr = b->zrun_zbin_boost;
- short *coeff_ptr = b->coeff;
- short *zbin_ptr = b->zbin;
- short *round_ptr = b->round;
- short *quant_ptr = b->quant;
- short *qcoeff_ptr = d->qcoeff;
- short *dqcoeff_ptr = d->dqcoeff;
- short *dequant_ptr = d->dequant;
- short zbin_oq_value = b->zbin_extra;
-
- d->eob = vp8_regular_quantize_b_impl_sse2(
- coeff_ptr,
- zbin_ptr,
- qcoeff_ptr,
- dequant_ptr,
- vp8_default_zig_zag1d,
-
- round_ptr,
- quant_ptr,
- dqcoeff_ptr,
- zbin_oq_value,
- zbin_boost_ptr
- );
+ d->eob = vp8_regular_quantize_b_impl_sse2(b->coeff,
+ b->zbin,
+ d->qcoeff,
+ d->dequant,
+ vp8_default_zig_zag1d,
+ b->round,
+ b->quant,
+ d->dqcoeff,
+ b->zbin_extra,
+ b->zrun_zbin_boost,
+ b->quant_shift);
}
int vp8_mbblock_error_xmm_impl(short *coeff_ptr, short *dcoef_ptr, int dc);
@@ -307,7 +296,9 @@
cpi->rtcd.encodemb.submby = vp8_subtract_mby_sse2;
cpi->rtcd.encodemb.submbuv = vp8_subtract_mbuv_sse2;
- /*cpi->rtcd.quantize.quantb = vp8_regular_quantize_b_sse2;*/
+#if ARCH_X86
+ cpi->rtcd.quantize.quantb = vp8_regular_quantize_b_sse2;
+#endif
cpi->rtcd.quantize.fastquantb = vp8_fast_quantize_b_sse2;
cpi->rtcd.temporal.apply = vp8_temporal_filter_apply_sse2;
diff --git a/vp8/vp8cx_arm.mk b/vp8/vp8cx_arm.mk
index da27e08..6b624a7 100644
--- a/vp8/vp8cx_arm.mk
+++ b/vp8/vp8cx_arm.mk
@@ -16,6 +16,7 @@
VP8_CX_SRCS-$(ARCH_ARM) += encoder/arm/arm_csystemdependent.c
VP8_CX_SRCS-$(HAVE_ARMV7) += encoder/arm/encodemb_arm.c
+VP8_CX_SRCS-$(HAVE_ARMV7) += encoder/arm/variance_arm.c
VP8_CX_SRCS-$(HAVE_ARMV7) += encoder/arm/quantize_arm.c
VP8_CX_SRCS-$(HAVE_ARMV7) += encoder/arm/picklpf_arm.c
VP8_CX_SRCS-$(HAVE_ARMV5TE) += encoder/arm/boolhuff_arm.c
@@ -59,7 +60,7 @@
vpx_vp8_enc_asm_offsets.asm: obj_int_extract
vpx_vp8_enc_asm_offsets.asm: $(VP8_PREFIX)encoder/arm/vpx_vp8_enc_asm_offsets.c.o
./obj_int_extract rvds $< $(ADS2GAS) > $@
-OBJS-yes += $(VP8_PREFIX)encoder/arm/vpx_vp7_enc_asm_offsets.c.o
+OBJS-yes += $(VP8_PREFIX)encoder/arm/vpx_vp8_enc_asm_offsets.c.o
CLEAN-OBJS += vpx_vp8_enc_asm_offsets.asm
$(filter %$(ASM).o,$(OBJS-yes)): vpx_vp8_enc_asm_offsets.asm
endif