Merge "Rename so -> scan_order in vp9_encodemb.c"
diff --git a/test/borders_test.cc b/test/borders_test.cc
index dcdedcf..5071541 100644
--- a/test/borders_test.cc
+++ b/test/borders_test.cc
@@ -67,7 +67,7 @@
cfg_.g_lag_in_frames = 25;
cfg_.rc_2pass_vbr_minsection_pct = 5;
- cfg_.rc_2pass_vbr_minsection_pct = 2000;
+ cfg_.rc_2pass_vbr_maxsection_pct = 2000;
cfg_.rc_target_bitrate = 200;
cfg_.rc_min_quantizer = 40;
diff --git a/vp9/decoder/vp9_decodeframe.c b/vp9/decoder/vp9_decodeframe.c
index 56b05ce..7fc1dd2 100644
--- a/vp9/decoder/vp9_decodeframe.c
+++ b/vp9/decoder/vp9_decodeframe.c
@@ -348,23 +348,27 @@
static void set_offsets(VP9_COMMON *const cm, MACROBLOCKD *const xd,
const TileInfo *const tile,
BLOCK_SIZE bsize, int mi_row, int mi_col) {
- const int bh = num_8x8_blocks_high_lookup[bsize];
const int bw = num_8x8_blocks_wide_lookup[bsize];
+ const int bh = num_8x8_blocks_high_lookup[bsize];
+ const int x_mis = MIN(bw, cm->mi_cols - mi_col);
+ const int y_mis = MIN(bh, cm->mi_rows - mi_row);
const int offset = mi_row * cm->mode_info_stride + mi_col;
const int tile_offset = tile->mi_row_start * cm->mode_info_stride +
tile->mi_col_start;
+ int x, y;
xd->mi_8x8 = cm->mi_grid_visible + offset;
xd->prev_mi_8x8 = cm->prev_mi_grid_visible + offset;
-
- // we are using the mode info context stream here
- xd->mi_8x8[0] = xd->mi_stream + offset - tile_offset;
- xd->mi_8x8[0]->mbmi.sb_type = bsize;
-
// Special case: if prev_mi is NULL, the previous mode info context
// cannot be used.
xd->last_mi = cm->prev_mi ? xd->prev_mi_8x8[0] : NULL;
+ xd->mi_8x8[0] = xd->mi_stream + offset - tile_offset;
+ xd->mi_8x8[0]->mbmi.sb_type = bsize;
+ for (y = 0; y < y_mis; ++y)
+ for (x = !y; x < x_mis; ++x)
+ xd->mi_8x8[y * cm->mode_info_stride + x] = xd->mi_8x8[0];
+
set_skip_context(xd, xd->above_context, xd->left_context, mi_row, mi_col);
// Distance of Mb to the various image edges. These are specified to 8th pel
diff --git a/vp9/decoder/vp9_decodemv.c b/vp9/decoder/vp9_decodemv.c
index 164576d..9206c00 100644
--- a/vp9/decoder/vp9_decodemv.c
+++ b/vp9/decoder/vp9_decodemv.c
@@ -162,12 +162,12 @@
static void read_intra_frame_mode_info(VP9_COMMON *const cm,
MACROBLOCKD *const xd,
- MODE_INFO *const m,
int mi_row, int mi_col, vp9_reader *r) {
- MB_MODE_INFO *const mbmi = &m->mbmi;
- const BLOCK_SIZE bsize = mbmi->sb_type;
+ MODE_INFO *const mi = xd->mi_8x8[0];
+ MB_MODE_INFO *const mbmi = &mi->mbmi;
const MODE_INFO *above_mi = xd->mi_8x8[-cm->mode_info_stride];
const MODE_INFO *left_mi = xd->left_available ? xd->mi_8x8[-1] : NULL;
+ const BLOCK_SIZE bsize = mbmi->sb_type;
mbmi->segment_id = read_intra_segment_id(cm, xd, mi_row, mi_col, r);
mbmi->skip_coeff = read_skip_coeff(cm, xd, mbmi->segment_id, r);
@@ -176,8 +176,8 @@
mbmi->ref_frame[1] = NONE;
if (bsize >= BLOCK_8X8) {
- const MB_PREDICTION_MODE A = above_block_mode(m, above_mi, 0);
- const MB_PREDICTION_MODE L = left_block_mode(m, left_mi, 0);
+ const MB_PREDICTION_MODE A = above_block_mode(mi, above_mi, 0);
+ const MB_PREDICTION_MODE L = left_block_mode(mi, left_mi, 0);
mbmi->mode = read_intra_mode(r, vp9_kf_y_mode_prob[A][L]);
} else {
// Only 4x4, 4x8, 8x4 blocks
@@ -188,19 +188,19 @@
for (idy = 0; idy < 2; idy += num_4x4_h) {
for (idx = 0; idx < 2; idx += num_4x4_w) {
const int ib = idy * 2 + idx;
- const MB_PREDICTION_MODE A = above_block_mode(m, above_mi, ib);
- const MB_PREDICTION_MODE L = left_block_mode(m, left_mi, ib);
+ const MB_PREDICTION_MODE A = above_block_mode(mi, above_mi, ib);
+ const MB_PREDICTION_MODE L = left_block_mode(mi, left_mi, ib);
const MB_PREDICTION_MODE b_mode = read_intra_mode(r,
vp9_kf_y_mode_prob[A][L]);
- m->bmi[ib].as_mode = b_mode;
+ mi->bmi[ib].as_mode = b_mode;
if (num_4x4_h == 2)
- m->bmi[ib + 2].as_mode = b_mode;
+ mi->bmi[ib + 2].as_mode = b_mode;
if (num_4x4_w == 2)
- m->bmi[ib + 1].as_mode = b_mode;
+ mi->bmi[ib + 1].as_mode = b_mode;
}
}
- mbmi->mode = m->bmi[3].as_mode;
+ mbmi->mode = mi->bmi[3].as_mode;
}
mbmi->uv_mode = read_intra_mode(r, vp9_kf_uv_mode_prob[mbmi->mode]);
@@ -509,8 +509,8 @@
static void read_inter_frame_mode_info(VP9_COMMON *const cm,
MACROBLOCKD *const xd,
const TileInfo *const tile,
- MODE_INFO *const mi,
int mi_row, int mi_col, vp9_reader *r) {
+ MODE_INFO *const mi = xd->mi_8x8[0];
MB_MODE_INFO *const mbmi = &mi->mbmi;
int inter_block;
@@ -528,25 +528,10 @@
read_intra_block_mode_info(cm, mi, r);
}
-void vp9_read_mode_info(VP9_COMMON *cm, MACROBLOCKD *xd,
- const TileInfo *const tile,
+void vp9_read_mode_info(VP9_COMMON *cm, MACROBLOCKD *xd, const TileInfo *tile,
int mi_row, int mi_col, vp9_reader *r) {
- MODE_INFO *const mi = xd->mi_8x8[0];
- const BLOCK_SIZE bsize = mi->mbmi.sb_type;
- const int bw = num_8x8_blocks_wide_lookup[bsize];
- const int bh = num_8x8_blocks_high_lookup[bsize];
- const int y_mis = MIN(bh, cm->mi_rows - mi_row);
- const int x_mis = MIN(bw, cm->mi_cols - mi_col);
- int x, y, z;
-
if (frame_is_intra_only(cm))
- read_intra_frame_mode_info(cm, xd, mi, mi_row, mi_col, r);
+ read_intra_frame_mode_info(cm, xd, mi_row, mi_col, r);
else
- read_inter_frame_mode_info(cm, xd, tile, mi, mi_row, mi_col, r);
-
- for (y = 0, z = 0; y < y_mis; y++, z += cm->mode_info_stride) {
- for (x = !y; x < x_mis; x++) {
- xd->mi_8x8[z + x] = mi;
- }
- }
+ read_inter_frame_mode_info(cm, xd, tile, mi_row, mi_col, r);
}
diff --git a/vp9/encoder/vp9_bitstream.c b/vp9/encoder/vp9_bitstream.c
index 5a337b5..1356ca5 100644
--- a/vp9/encoder/vp9_bitstream.c
+++ b/vp9/encoder/vp9_bitstream.c
@@ -63,14 +63,14 @@
static void write_intra_mode(vp9_writer *w, MB_PREDICTION_MODE mode,
const vp9_prob *probs) {
- write_token(w, vp9_intra_mode_tree, probs, &intra_mode_encodings[mode]);
+ vp9_write_token(w, vp9_intra_mode_tree, probs, &intra_mode_encodings[mode]);
}
static void write_inter_mode(vp9_writer *w, MB_PREDICTION_MODE mode,
const vp9_prob *probs) {
assert(is_inter_mode(mode));
- write_token(w, vp9_inter_mode_tree, probs,
- &inter_mode_encodings[INTER_OFFSET(mode)]);
+ vp9_write_token(w, vp9_inter_mode_tree, probs,
+ &inter_mode_encodings[INTER_OFFSET(mode)]);
}
static INLINE void write_be32(uint8_t *p, int value) {
@@ -179,12 +179,12 @@
if (t >= TWO_TOKEN && t < EOB_TOKEN) {
int len = UNCONSTRAINED_NODES - p->skip_eob_node;
int bits = v >> (n - len);
- treed_write(w, vp9_coef_tree, p->context_tree, bits, len, i);
- treed_write(w, vp9_coef_con_tree,
- vp9_pareto8_full[p->context_tree[PIVOT_NODE] - 1], v, n - len,
- 0);
+ vp9_write_tree(w, vp9_coef_tree, p->context_tree, bits, len, i);
+ vp9_write_tree(w, vp9_coef_con_tree,
+ vp9_pareto8_full[p->context_tree[PIVOT_NODE] - 1],
+ v, n - len, 0);
} else {
- treed_write(w, vp9_coef_tree, p->context_tree, v, n, i);
+ vp9_write_tree(w, vp9_coef_tree, p->context_tree, v, n, i);
}
if (b->base_val) {
@@ -214,7 +214,7 @@
static void write_segment_id(vp9_writer *w, const struct segmentation *seg,
int segment_id) {
if (seg->enabled && seg->update_map)
- treed_write(w, vp9_segment_tree, seg->tree_probs, segment_id, 3, 0);
+ vp9_write_tree(w, vp9_segment_tree, seg->tree_probs, segment_id, 3, 0);
}
// This function encodes the reference frame
@@ -332,16 +332,15 @@
if (!vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)) {
if (bsize >= BLOCK_8X8) {
write_inter_mode(bc, mode, mv_ref_p);
- ++cm->counts.inter_mode[mi->mode_context[rf]]
- [INTER_OFFSET(mode)];
+ ++cm->counts.inter_mode[mi->mode_context[rf]][INTER_OFFSET(mode)];
}
}
if (cm->mcomp_filter_type == SWITCHABLE) {
const int ctx = vp9_get_pred_context_switchable_interp(xd);
- write_token(bc, vp9_switchable_interp_tree,
- cm->fc.switchable_interp_prob[ctx],
- &switchable_interp_encodings[mi->interp_filter]);
+ vp9_write_token(bc, vp9_switchable_interp_tree,
+ cm->fc.switchable_interp_prob[ctx],
+ &switchable_interp_encodings[mi->interp_filter]);
} else {
assert(mi->interp_filter == cm->mcomp_filter_type);
}
@@ -470,7 +469,7 @@
const int has_cols = (mi_col + hbs) < cm->mi_cols;
if (has_rows && has_cols) {
- write_token(w, vp9_partition_tree, probs, &partition_encodings[p]);
+ vp9_write_token(w, vp9_partition_tree, probs, &partition_encodings[p]);
} else if (!has_rows && has_cols) {
assert(p == PARTITION_SPLIT || p == PARTITION_HORZ);
vp9_write(w, p == PARTITION_SPLIT, probs[1]);
diff --git a/vp9/encoder/vp9_boolhuff.h b/vp9/encoder/vp9_boolhuff.h
index c3f340d..a0fff38 100644
--- a/vp9/encoder/vp9_boolhuff.h
+++ b/vp9/encoder/vp9_boolhuff.h
@@ -111,5 +111,6 @@
vp9_write_bit(w, 1 & (data >> bit));
}
+#define vp9_write_prob(w, v) vp9_write_literal((w), (v), 8)
#endif // VP9_ENCODER_VP9_BOOLHUFF_H_
diff --git a/vp9/encoder/vp9_encodemv.c b/vp9/encoder/vp9_encodemv.c
index 3f01c77..9af28f9 100644
--- a/vp9/encoder/vp9_encodemv.c
+++ b/vp9/encoder/vp9_encodemv.c
@@ -47,13 +47,13 @@
vp9_write(w, sign, mvcomp->sign);
// Class
- write_token(w, vp9_mv_class_tree, mvcomp->classes,
- &mv_class_encodings[mv_class]);
+ vp9_write_token(w, vp9_mv_class_tree, mvcomp->classes,
+ &mv_class_encodings[mv_class]);
// Integer bits
if (mv_class == MV_CLASS_0) {
- write_token(w, vp9_mv_class0_tree, mvcomp->class0,
- &mv_class0_encodings[d]);
+ vp9_write_token(w, vp9_mv_class0_tree, mvcomp->class0,
+ &mv_class0_encodings[d]);
} else {
int i;
const int n = mv_class + CLASS0_BITS - 1; // number of bits
@@ -62,9 +62,9 @@
}
// Fractional bits
- write_token(w, vp9_mv_fp_tree,
- mv_class == MV_CLASS_0 ? mvcomp->class0_fp[d] : mvcomp->fp,
- &mv_fp_encodings[fr]);
+ vp9_write_token(w, vp9_mv_fp_tree,
+ mv_class == MV_CLASS_0 ? mvcomp->class0_fp[d] : mvcomp->fp,
+ &mv_fp_encodings[fr]);
// High precision bit
if (usehp)
@@ -209,7 +209,7 @@
const MV_JOINT_TYPE j = vp9_get_mv_joint(&diff);
usehp = usehp && vp9_use_mv_hp(ref);
- write_token(w, vp9_mv_joint_tree, mvctx->joints, &mv_joint_encodings[j]);
+ vp9_write_token(w, vp9_mv_joint_tree, mvctx->joints, &mv_joint_encodings[j]);
if (mv_joint_vertical(j))
encode_mv_component(w, diff.row, &mvctx->comps[0], usehp);
diff --git a/vp9/encoder/vp9_firstpass.c b/vp9/encoder/vp9_firstpass.c
index 5f42d0e..f6e8667 100644
--- a/vp9/encoder/vp9_firstpass.c
+++ b/vp9/encoder/vp9_firstpass.c
@@ -364,36 +364,32 @@
output_stats(cpi, cpi->output_pkt_list, &cpi->twopass.total_stats);
}
-static void zz_motion_search(VP9_COMP *cpi, MACROBLOCK *x,
- YV12_BUFFER_CONFIG *recon_buffer,
- int *best_motion_err, int recon_yoffset) {
+static unsigned int zz_motion_search(VP9_COMP *cpi, MACROBLOCK *x,
+ YV12_BUFFER_CONFIG *recon_buffer,
+ int recon_yoffset) {
MACROBLOCKD *const xd = &x->e_mbd;
+ const uint8_t *const src = x->plane[0].src.buf;
+ const int src_stride = x->plane[0].src.stride;
+ const uint8_t *const ref = xd->plane[0].pre[0].buf
+ = recon_buffer->y_buffer + recon_yoffset;
+ const int ref_stride = xd->plane[0].pre[0].stride;
- // Set up pointers for this macro block recon buffer
- xd->plane[0].pre[0].buf = recon_buffer->y_buffer + recon_yoffset;
-
+ unsigned int sse;
switch (xd->mi_8x8[0]->mbmi.sb_type) {
case BLOCK_8X8:
- vp9_mse8x8(x->plane[0].src.buf, x->plane[0].src.stride,
- xd->plane[0].pre[0].buf, xd->plane[0].pre[0].stride,
- (unsigned int *)(best_motion_err));
+ vp9_mse8x8(src, src_stride, ref, ref_stride, &sse);
break;
case BLOCK_16X8:
- vp9_mse16x8(x->plane[0].src.buf, x->plane[0].src.stride,
- xd->plane[0].pre[0].buf, xd->plane[0].pre[0].stride,
- (unsigned int *)(best_motion_err));
+ vp9_mse16x8(src, src_stride, ref, ref_stride, &sse);
break;
case BLOCK_8X16:
- vp9_mse8x16(x->plane[0].src.buf, x->plane[0].src.stride,
- xd->plane[0].pre[0].buf, xd->plane[0].pre[0].stride,
- (unsigned int *)(best_motion_err));
+ vp9_mse8x16(src, src_stride, ref, ref_stride, &sse);
break;
default:
- vp9_mse16x16(x->plane[0].src.buf, x->plane[0].src.stride,
- xd->plane[0].pre[0].buf, xd->plane[0].pre[0].stride,
- (unsigned int *)(best_motion_err));
+ vp9_mse16x16(src, src_stride, ref, ref_stride, &sse);
break;
}
+ return sse;
}
static void first_pass_motion_search(VP9_COMP *cpi, MACROBLOCK *x,
@@ -583,10 +579,9 @@
int this_error;
int gf_motion_error = INT_MAX;
int use_dc_pred = (mb_col || mb_row) && (!mb_col || !mb_row);
- double error_weight;
+ double error_weight = 1.0;
vp9_clear_system_state(); // __asm emms;
- error_weight = 1.0; // avoid uninitialized warnings
xd->plane[0].dst.buf = new_yv12->y_buffer + recon_yoffset;
xd->plane[1].dst.buf = new_yv12->u_buffer + recon_uvoffset;
@@ -647,11 +642,9 @@
// Other than for the first frame do a motion search
if (cm->current_video_frame > 0) {
int tmp_err;
- int motion_error = INT_MAX;
+ int motion_error = zz_motion_search(cpi, x, lst_yv12, recon_yoffset);
int_mv mv, tmp_mv;
-
// Simple 0,0 motion with no mv overhead
- zz_motion_search(cpi, x, lst_yv12, &motion_error, recon_yoffset);
mv.as_int = tmp_mv.as_int = 0;
// Test last reference frame using the previous best mv as the
@@ -684,8 +677,7 @@
// Experimental search in an older reference frame
if (cm->current_video_frame > 1) {
// Simple 0,0 motion with no mv overhead
- zz_motion_search(cpi, x, gld_yv12,
- &gf_motion_error, recon_yoffset);
+ gf_motion_error = zz_motion_search(cpi, x, gld_yv12, recon_yoffset);
first_pass_motion_search(cpi, x, &zero_ref_mv,
&tmp_mv.as_mv, gld_yv12,
@@ -724,11 +716,9 @@
// very close and very low. This helps with scene cut
// detection for example in cropped clips with black bars
// at the sides or top and bottom.
- if ((((this_error - intrapenalty) * 9) <=
- (motion_error * 10)) &&
- (this_error < (2 * intrapenalty))) {
+ if (((this_error - intrapenalty) * 9 <= motion_error * 10) &&
+ this_error < 2 * intrapenalty)
neutral_count++;
- }
mv.as_mv.row *= 8;
mv.as_mv.col *= 8;
@@ -737,8 +727,7 @@
xd->mi_8x8[0]->mbmi.tx_size = TX_4X4;
xd->mi_8x8[0]->mbmi.ref_frame[0] = LAST_FRAME;
xd->mi_8x8[0]->mbmi.ref_frame[1] = NONE;
- vp9_build_inter_predictors_sby(xd, mb_row << 1,
- mb_col << 1,
+ vp9_build_inter_predictors_sby(xd, mb_row << 1, mb_col << 1,
xd->mi_8x8[0]->mbmi.sb_type);
vp9_encode_sby(x, xd->mi_8x8[0]->mbmi.sb_type);
sum_mvr += mv.as_mv.row;
diff --git a/vp9/encoder/vp9_rdopt.c b/vp9/encoder/vp9_rdopt.c
index cdba1e8..b813f06 100644
--- a/vp9/encoder/vp9_rdopt.c
+++ b/vp9/encoder/vp9_rdopt.c
@@ -3811,8 +3811,6 @@
int skip_uv[TX_SIZES];
MB_PREDICTION_MODE mode_uv[TX_SIZES] = { 0 };
struct scale_factors scale_factor[4];
- unsigned int ref_frame_mask = 0;
- unsigned int mode_mask = 0;
int intra_cost_penalty = 20 * vp9_dc_quant(cpi->common.base_qindex,
cpi->common.y_dc_delta_q);
int_mv seg_mvs[4][MAX_REF_FRAMES];
@@ -3842,15 +3840,6 @@
*returnrate = INT_MAX;
- // Create a mask set to 1 for each reference frame used by a smaller
- // resolution.
- if (cpi->sf.use_avoid_tested_higherror) {
- ref_frame_mask = 0;
- mode_mask = 0;
- ref_frame_mask = ~ref_frame_mask;
- mode_mask = ~mode_mask;
- }
-
for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ref_frame++) {
if (cpi->ref_frame_flags & flag_list[ref_frame]) {
setup_buffer_inter(cpi, x, tile, idx_list[ref_frame], ref_frame,
diff --git a/vp9/encoder/vp9_treewriter.h b/vp9/encoder/vp9_treewriter.h
index a2f9df1..703272c 100644
--- a/vp9/encoder/vp9_treewriter.h
+++ b/vp9/encoder/vp9_treewriter.h
@@ -8,19 +8,12 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
#ifndef VP9_ENCODER_VP9_TREEWRITER_H_
#define VP9_ENCODER_VP9_TREEWRITER_H_
-/* Trees map alphabets into huffman-like codes suitable for an arithmetic
- bit coder. Timothy S Murphy 11 October 2004 */
-
#include "vp9/common/vp9_treecoder.h"
-
#include "vp9/encoder/vp9_boolhuff.h" /* for now */
-#define vp9_write_prob(w, v) vp9_write_literal((w), (v), 8)
-
#define vp9_cost_zero(prob) (vp9_prob_cost[prob])
#define vp9_cost_one(prob) vp9_cost_zero(vp9_complement(prob))
@@ -33,31 +26,6 @@
return ct[0] * vp9_cost_zero(p) + ct[1] * vp9_cost_one(p);
}
-static INLINE void treed_write(vp9_writer *w,
- vp9_tree tree, const vp9_prob *probs,
- int bits, int len,
- vp9_tree_index i) {
- do {
- const int bit = (bits >> --len) & 1;
- vp9_write(w, bit, probs[i >> 1]);
- i = tree[i + bit];
- } while (len);
-}
-
-struct vp9_token {
- int value;
- int len;
-};
-
-
-void vp9_tokens_from_tree(struct vp9_token*, const vp9_tree_index *);
-
-static INLINE void write_token(vp9_writer *w, vp9_tree tree,
- const vp9_prob *probs,
- const struct vp9_token *token) {
- treed_write(w, tree, probs, token->value, token->len, 0);
-}
-
static INLINE int treed_cost(vp9_tree tree, const vp9_prob *probs,
int bits, int len) {
int cost = 0;
@@ -79,4 +47,27 @@
unsigned int branch_ct[ /* n - 1 */ ][2],
const unsigned int num_events[ /* n */ ]);
+struct vp9_token {
+ int value;
+ int len;
+};
+
+void vp9_tokens_from_tree(struct vp9_token*, const vp9_tree_index *);
+
+static INLINE void vp9_write_tree(vp9_writer *w, const vp9_tree_index *tree,
+ const vp9_prob *probs, int bits, int len,
+ vp9_tree_index i) {
+ do {
+ const int bit = (bits >> --len) & 1;
+ vp9_write(w, bit, probs[i >> 1]);
+ i = tree[i + bit];
+ } while (len);
+}
+
+static INLINE void vp9_write_token(vp9_writer *w, const vp9_tree_index *tree,
+ const vp9_prob *probs,
+ const struct vp9_token *token) {
+ vp9_write_tree(w, tree, probs, token->value, token->len, 0);
+}
+
#endif // VP9_ENCODER_VP9_TREEWRITER_H_