apply clang-format
Change-Id: Ib8c9eb6263d6eba6b9d7b2e402b7e83a78c86be9
diff --git a/vp10/encoder/aq_complexity.c b/vp10/encoder/aq_complexity.c
index 2506a4e..912babc 100644
--- a/vp10/encoder/aq_complexity.c
+++ b/vp10/encoder/aq_complexity.c
@@ -19,25 +19,27 @@
#include "vpx_dsp/vpx_dsp_common.h"
#include "vpx_ports/system_state.h"
-#define AQ_C_SEGMENTS 5
-#define DEFAULT_AQ2_SEG 3 // Neutral Q segment
+#define AQ_C_SEGMENTS 5
+#define DEFAULT_AQ2_SEG 3 // Neutral Q segment
#define AQ_C_STRENGTHS 3
-static const double aq_c_q_adj_factor[AQ_C_STRENGTHS][AQ_C_SEGMENTS] =
- { {1.75, 1.25, 1.05, 1.00, 0.90},
- {2.00, 1.50, 1.15, 1.00, 0.85},
- {2.50, 1.75, 1.25, 1.00, 0.80} };
-static const double aq_c_transitions[AQ_C_STRENGTHS][AQ_C_SEGMENTS] =
- { {0.15, 0.30, 0.55, 2.00, 100.0},
- {0.20, 0.40, 0.65, 2.00, 100.0},
- {0.25, 0.50, 0.75, 2.00, 100.0} };
-static const double aq_c_var_thresholds[AQ_C_STRENGTHS][AQ_C_SEGMENTS] =
- { {-4.0, -3.0, -2.0, 100.00, 100.0},
- {-3.5, -2.5, -1.5, 100.00, 100.0},
- {-3.0, -2.0, -1.0, 100.00, 100.0} };
+static const double aq_c_q_adj_factor[AQ_C_STRENGTHS][AQ_C_SEGMENTS] = {
+ { 1.75, 1.25, 1.05, 1.00, 0.90 },
+ { 2.00, 1.50, 1.15, 1.00, 0.85 },
+ { 2.50, 1.75, 1.25, 1.00, 0.80 }
+};
+static const double aq_c_transitions[AQ_C_STRENGTHS][AQ_C_SEGMENTS] = {
+ { 0.15, 0.30, 0.55, 2.00, 100.0 },
+ { 0.20, 0.40, 0.65, 2.00, 100.0 },
+ { 0.25, 0.50, 0.75, 2.00, 100.0 }
+};
+static const double aq_c_var_thresholds[AQ_C_STRENGTHS][AQ_C_SEGMENTS] = {
+ { -4.0, -3.0, -2.0, 100.00, 100.0 },
+ { -3.5, -2.5, -1.5, 100.00, 100.0 },
+ { -3.0, -2.0, -1.0, 100.00, 100.0 }
+};
#define DEFAULT_COMPLEXITY 64
-
static int get_aq_c_strength(int q_index, vpx_bit_depth_t bit_depth) {
// Approximate base quatizer (truncated to int)
const int base_quant = vp10_ac_quant(q_index, 0, bit_depth) / 4;
@@ -81,14 +83,11 @@
for (segment = 0; segment < AQ_C_SEGMENTS; ++segment) {
int qindex_delta;
- if (segment == DEFAULT_AQ2_SEG)
- continue;
+ if (segment == DEFAULT_AQ2_SEG) continue;
- qindex_delta =
- vp10_compute_qdelta_by_rate(&cpi->rc, cm->frame_type, cm->base_qindex,
- aq_c_q_adj_factor[aq_strength][segment],
- cm->bit_depth);
-
+ qindex_delta = vp10_compute_qdelta_by_rate(
+ &cpi->rc, cm->frame_type, cm->base_qindex,
+ aq_c_q_adj_factor[aq_strength][segment], cm->bit_depth);
// For AQ complexity mode, we dont allow Q0 in a segment if the base
// Q is not 0. Q0 (lossless) implies 4x4 only and in AQ mode 2 a segment
@@ -112,7 +111,7 @@
// The choice of segment for a block depends on the ratio of the projected
// bits for the block vs a target average and its spatial complexity.
void vp10_caq_select_segment(VP10_COMP *cpi, MACROBLOCK *mb, BLOCK_SIZE bs,
- int mi_row, int mi_col, int projected_rate) {
+ int mi_row, int mi_col, int projected_rate) {
VP10_COMMON *const cm = &cpi->common;
const int mi_offset = mi_row * cm->mi_cols + mi_col;
@@ -129,26 +128,25 @@
} else {
// Rate depends on fraction of a SB64 in frame (xmis * ymis / bw * bh).
// It is converted to bits * 256 units.
- const int target_rate = (cpi->rc.sb64_target_rate * xmis * ymis * 256) /
- (bw * bh);
+ const int target_rate =
+ (cpi->rc.sb64_target_rate * xmis * ymis * 256) / (bw * bh);
double logvar;
double low_var_thresh;
const int aq_strength = get_aq_c_strength(cm->base_qindex, cm->bit_depth);
vpx_clear_system_state();
- low_var_thresh = (cpi->oxcf.pass == 2)
- ? VPXMAX(cpi->twopass.mb_av_energy, MIN_DEFAULT_LV_THRESH)
- : DEFAULT_LV_THRESH;
+ low_var_thresh = (cpi->oxcf.pass == 2) ? VPXMAX(cpi->twopass.mb_av_energy,
+ MIN_DEFAULT_LV_THRESH)
+ : DEFAULT_LV_THRESH;
vp10_setup_src_planes(mb, cpi->Source, mi_row, mi_col);
logvar = vp10_log_block_var(cpi, mb, bs);
- segment = AQ_C_SEGMENTS - 1; // Just in case no break out below.
+ segment = AQ_C_SEGMENTS - 1; // Just in case no break out below.
for (i = 0; i < AQ_C_SEGMENTS; ++i) {
// Test rate against a threshold value and variance against a threshold.
// Increasing segment number (higher variance and complexity) = higher Q.
- if ((projected_rate <
- target_rate * aq_c_transitions[aq_strength][i]) &&
+ if ((projected_rate < target_rate * aq_c_transitions[aq_strength][i]) &&
(logvar < (low_var_thresh + aq_c_var_thresholds[aq_strength][i]))) {
segment = i;
break;
diff --git a/vp10/encoder/aq_complexity.h b/vp10/encoder/aq_complexity.h
index f9de2ad..5ed461b 100644
--- a/vp10/encoder/aq_complexity.h
+++ b/vp10/encoder/aq_complexity.h
@@ -8,7 +8,6 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
#ifndef VP10_ENCODER_AQ_COMPLEXITY_H_
#define VP10_ENCODER_AQ_COMPLEXITY_H_
@@ -23,8 +22,8 @@
// Select a segment for the current Block.
void vp10_caq_select_segment(struct VP10_COMP *cpi, struct macroblock *,
- BLOCK_SIZE bs,
- int mi_row, int mi_col, int projected_rate);
+ BLOCK_SIZE bs, int mi_row, int mi_col,
+ int projected_rate);
// This function sets up a set of segments with delta Q values around
// the baseline frame quantizer.
diff --git a/vp10/encoder/aq_cyclicrefresh.c b/vp10/encoder/aq_cyclicrefresh.c
index 7a36d61..af49ee6 100644
--- a/vp10/encoder/aq_cyclicrefresh.c
+++ b/vp10/encoder/aq_cyclicrefresh.c
@@ -59,8 +59,7 @@
CYCLIC_REFRESH *vp10_cyclic_refresh_alloc(int mi_rows, int mi_cols) {
size_t last_coded_q_map_size;
CYCLIC_REFRESH *const cr = vpx_calloc(1, sizeof(*cr));
- if (cr == NULL)
- return NULL;
+ if (cr == NULL) return NULL;
cr->map = vpx_calloc(mi_rows * mi_cols, sizeof(*cr->map));
if (cr->map == NULL) {
@@ -94,7 +93,7 @@
// Average bits available per frame = avg_frame_bandwidth
// Number of (8x8) blocks in frame = mi_rows * mi_cols;
const float factor = 0.25;
- const int number_blocks = cm->mi_rows * cm->mi_cols;
+ const int number_blocks = cm->mi_rows * cm->mi_cols;
// The condition below corresponds to turning off at target bitrates:
// (at 30fps), ~12kbps for CIF, 36kbps for VGA, 100kps for HD/720p.
// Also turn off at very small frame sizes, to avoid too large fraction of
@@ -111,10 +110,8 @@
// size of the coding block (i.e., below min_block size rejected), coding
// mode, and rate/distortion.
static int candidate_refresh_aq(const CYCLIC_REFRESH *cr,
- const MB_MODE_INFO *mbmi,
- int64_t rate,
- int64_t dist,
- int bsize) {
+ const MB_MODE_INFO *mbmi, int64_t rate,
+ int64_t dist, int bsize) {
MV mv = mbmi->mv[0].as_mv;
// Reject the block for lower-qp coding if projected distortion
// is above the threshold, and any of the following is true:
@@ -126,11 +123,9 @@
mv.col > cr->motion_thresh || mv.col < -cr->motion_thresh ||
!is_inter_block(mbmi)))
return CR_SEGMENT_ID_BASE;
- else if (bsize >= BLOCK_16X16 &&
- rate < cr->thresh_rate_sb &&
- is_inter_block(mbmi) &&
- mbmi->mv[0].as_int == 0 &&
- cr->rate_boost_fac > 10)
+ else if (bsize >= BLOCK_16X16 && rate < cr->thresh_rate_sb &&
+ is_inter_block(mbmi) && mbmi->mv[0].as_int == 0 &&
+ cr->rate_boost_fac > 10)
// More aggressive delta-q for bigger blocks with zero motion.
return CR_SEGMENT_ID_BOOST2;
else
@@ -141,9 +136,8 @@
static int compute_deltaq(const VP10_COMP *cpi, int q, double rate_factor) {
const CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
const RATE_CONTROL *const rc = &cpi->rc;
- int deltaq = vp10_compute_qdelta_by_rate(rc, cpi->common.frame_type,
- q, rate_factor,
- cpi->common.bit_depth);
+ int deltaq = vp10_compute_qdelta_by_rate(rc, cpi->common.frame_type, q,
+ rate_factor, cpi->common.bit_depth);
if ((-deltaq) > cr->max_qdelta_perc * q / 100) {
deltaq = -cr->max_qdelta_perc * q / 100;
}
@@ -155,7 +149,7 @@
// (with different delta-q). Note this function is called in the postencode
// (called from rc_update_rate_correction_factors()).
int vp10_cyclic_refresh_estimate_bits_at_q(const VP10_COMP *cpi,
- double correction_factor) {
+ double correction_factor) {
const VP10_COMMON *const cm = &cpi->common;
const CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
int estimated_bits;
@@ -166,17 +160,18 @@
double weight_segment1 = (double)cr->actual_num_seg1_blocks / num8x8bl;
double weight_segment2 = (double)cr->actual_num_seg2_blocks / num8x8bl;
// Take segment weighted average for estimated bits.
- estimated_bits = (int)((1.0 - weight_segment1 - weight_segment2) *
- vp10_estimate_bits_at_q(cm->frame_type, cm->base_qindex, mbs,
- correction_factor, cm->bit_depth) +
- weight_segment1 *
- vp10_estimate_bits_at_q(cm->frame_type,
- cm->base_qindex + cr->qindex_delta[1], mbs,
- correction_factor, cm->bit_depth) +
- weight_segment2 *
- vp10_estimate_bits_at_q(cm->frame_type,
- cm->base_qindex + cr->qindex_delta[2], mbs,
- correction_factor, cm->bit_depth));
+ estimated_bits =
+ (int)((1.0 - weight_segment1 - weight_segment2) *
+ vp10_estimate_bits_at_q(cm->frame_type, cm->base_qindex, mbs,
+ correction_factor, cm->bit_depth) +
+ weight_segment1 *
+ vp10_estimate_bits_at_q(cm->frame_type,
+ cm->base_qindex + cr->qindex_delta[1],
+ mbs, correction_factor, cm->bit_depth) +
+ weight_segment2 *
+ vp10_estimate_bits_at_q(cm->frame_type,
+ cm->base_qindex + cr->qindex_delta[2],
+ mbs, correction_factor, cm->bit_depth));
return estimated_bits;
}
@@ -186,24 +181,28 @@
// Note: the segment map is set to either 0/CR_SEGMENT_ID_BASE (no refresh) or
// to 1/CR_SEGMENT_ID_BOOST1 (refresh) for each superblock, prior to encoding.
int vp10_cyclic_refresh_rc_bits_per_mb(const VP10_COMP *cpi, int i,
- double correction_factor) {
+ double correction_factor) {
const VP10_COMMON *const cm = &cpi->common;
CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
int bits_per_mb;
int num8x8bl = cm->MBs << 2;
// Weight for segment prior to encoding: take the average of the target
// number for the frame to be encoded and the actual from the previous frame.
- double weight_segment = (double)((cr->target_num_seg_blocks +
- cr->actual_num_seg1_blocks + cr->actual_num_seg2_blocks) >> 1) /
+ double weight_segment =
+ (double)((cr->target_num_seg_blocks + cr->actual_num_seg1_blocks +
+ cr->actual_num_seg2_blocks) >>
+ 1) /
num8x8bl;
// Compute delta-q corresponding to qindex i.
int deltaq = compute_deltaq(cpi, i, cr->rate_ratio_qdelta);
// Take segment weighted average for bits per mb.
- bits_per_mb = (int)((1.0 - weight_segment) *
- vp10_rc_bits_per_mb(cm->frame_type, i, correction_factor, cm->bit_depth) +
- weight_segment *
- vp10_rc_bits_per_mb(cm->frame_type, i + deltaq, correction_factor,
- cm->bit_depth));
+ bits_per_mb =
+ (int)((1.0 - weight_segment) * vp10_rc_bits_per_mb(cm->frame_type, i,
+ correction_factor,
+ cm->bit_depth) +
+ weight_segment * vp10_rc_bits_per_mb(cm->frame_type, i + deltaq,
+ correction_factor,
+ cm->bit_depth));
return bits_per_mb;
}
@@ -211,12 +210,9 @@
// check if we should reset the segment_id, and update the cyclic_refresh map
// and segmentation map.
void vp10_cyclic_refresh_update_segment(VP10_COMP *const cpi,
- MB_MODE_INFO *const mbmi,
- int mi_row, int mi_col,
- BLOCK_SIZE bsize,
- int64_t rate,
- int64_t dist,
- int skip) {
+ MB_MODE_INFO *const mbmi, int mi_row,
+ int mi_col, BLOCK_SIZE bsize,
+ int64_t rate, int64_t dist, int skip) {
const VP10_COMMON *const cm = &cpi->common;
CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
const int bw = num_8x8_blocks_wide_lookup[bsize];
@@ -224,19 +220,19 @@
const int xmis = VPXMIN(cm->mi_cols - mi_col, bw);
const int ymis = VPXMIN(cm->mi_rows - mi_row, bh);
const int block_index = mi_row * cm->mi_cols + mi_col;
- const int refresh_this_block = candidate_refresh_aq(cr, mbmi, rate, dist,
- bsize);
+ const int refresh_this_block =
+ candidate_refresh_aq(cr, mbmi, rate, dist, bsize);
// Default is to not update the refresh map.
int new_map_value = cr->map[block_index];
- int x = 0; int y = 0;
+ int x = 0;
+ int y = 0;
// If this block is labeled for refresh, check if we should reset the
// segment_id.
if (cyclic_refresh_segment_id_boosted(mbmi->segment_id)) {
mbmi->segment_id = refresh_this_block;
// Reset segment_id if will be skipped.
- if (skip)
- mbmi->segment_id = CR_SEGMENT_ID_BASE;
+ if (skip) mbmi->segment_id = CR_SEGMENT_ID_BASE;
}
// Update the cyclic refresh map, to be used for setting segmentation map
@@ -249,8 +245,7 @@
// Else if it is accepted as candidate for refresh, and has not already
// been refreshed (marked as 1) then mark it as a candidate for cleanup
// for future time (marked as 0), otherwise don't update it.
- if (cr->map[block_index] == 1)
- new_map_value = 0;
+ if (cr->map[block_index] == 1) new_map_value = 0;
} else {
// Leave it marked as block that is not candidate for refresh.
new_map_value = 1;
@@ -283,11 +278,12 @@
cr->actual_num_seg2_blocks = 0;
for (mi_row = 0; mi_row < cm->mi_rows; mi_row++)
for (mi_col = 0; mi_col < cm->mi_cols; mi_col++) {
- if (cyclic_refresh_segment_id(
- seg_map[mi_row * cm->mi_cols + mi_col]) == CR_SEGMENT_ID_BOOST1)
+ if (cyclic_refresh_segment_id(seg_map[mi_row * cm->mi_cols + mi_col]) ==
+ CR_SEGMENT_ID_BOOST1)
cr->actual_num_seg1_blocks++;
else if (cyclic_refresh_segment_id(
- seg_map[mi_row * cm->mi_cols + mi_col]) == CR_SEGMENT_ID_BOOST2)
+ seg_map[mi_row * cm->mi_cols + mi_col]) ==
+ CR_SEGMENT_ID_BOOST2)
cr->actual_num_seg2_blocks++;
}
}
@@ -324,22 +320,22 @@
for (mi_row = 0; mi_row < rows; mi_row++) {
for (mi_col = 0; mi_col < cols; mi_col++) {
- int16_t abs_mvr = mi[0]->mbmi.mv[0].as_mv.row >= 0 ?
- mi[0]->mbmi.mv[0].as_mv.row : -1 * mi[0]->mbmi.mv[0].as_mv.row;
- int16_t abs_mvc = mi[0]->mbmi.mv[0].as_mv.col >= 0 ?
- mi[0]->mbmi.mv[0].as_mv.col : -1 * mi[0]->mbmi.mv[0].as_mv.col;
+ int16_t abs_mvr = mi[0]->mbmi.mv[0].as_mv.row >= 0
+ ? mi[0]->mbmi.mv[0].as_mv.row
+ : -1 * mi[0]->mbmi.mv[0].as_mv.row;
+ int16_t abs_mvc = mi[0]->mbmi.mv[0].as_mv.col >= 0
+ ? mi[0]->mbmi.mv[0].as_mv.col
+ : -1 * mi[0]->mbmi.mv[0].as_mv.col;
// Calculate the motion of the background.
if (abs_mvr <= 16 && abs_mvc <= 16) {
cnt1++;
- if (abs_mvr == 0 && abs_mvc == 0)
- cnt2++;
+ if (abs_mvr == 0 && abs_mvc == 0) cnt2++;
}
mi++;
// Accumulate low_content_frame.
- if (cr->map[mi_row * cols + mi_col] < 1)
- low_content_frame++;
+ if (cr->map[mi_row * cols + mi_col] < 1) low_content_frame++;
}
mi += 8;
}
@@ -350,7 +346,7 @@
// Also, force this frame as a golden update frame if this frame will change
// the resolution (resize_pending != 0).
if (cpi->resize_pending != 0 ||
- (cnt1 * 10 > (70 * rows * cols) && cnt2 * 20 < cnt1)) {
+ (cnt1 * 10 > (70 * rows * cols) && cnt2 * 20 < cnt1)) {
vp10_cyclic_refresh_set_golden_update(cpi);
rc->frames_till_gf_update_due = rc->baseline_gf_interval;
@@ -360,8 +356,7 @@
force_gf_refresh = 1;
}
- fraction_low =
- (double)low_content_frame / (rows * cols);
+ fraction_low = (double)low_content_frame / (rows * cols);
// Update average.
cr->low_content_avg = (fraction_low + 3 * cr->low_content_avg) / 4;
if (!force_gf_refresh && cpi->refresh_golden_frame == 1) {
@@ -425,8 +420,7 @@
// for possible boost/refresh (segment 1). The segment id may get
// reset to 0 later if block gets coded anything other than ZEROMV.
if (cr->map[bl_index2] == 0) {
- if (cr->last_coded_q_map[bl_index2] > qindex_thresh)
- sum_map++;
+ if (cr->last_coded_q_map[bl_index2] > qindex_thresh) sum_map++;
} else if (cr->map[bl_index2] < 0) {
cr->map[bl_index2]++;
}
@@ -459,14 +453,12 @@
cr->time_for_refresh = 0;
// Use larger delta-qp (increase rate_ratio_qdelta) for first few (~4)
// periods of the refresh cycle, after a key frame.
- if (rc->frames_since_key < 4 * cr->percent_refresh)
+ if (rc->frames_since_key < 4 * cr->percent_refresh)
cr->rate_ratio_qdelta = 3.0;
else
cr->rate_ratio_qdelta = 2.0;
// Adjust some parameters for low resolutions at low bitrates.
- if (cm->width <= 352 &&
- cm->height <= 288 &&
- rc->avg_frame_bandwidth < 3400) {
+ if (cm->width <= 352 && cm->height <= 288 && rc->avg_frame_bandwidth < 3400) {
cr->motion_thresh = 4;
cr->rate_boost_fac = 10;
} else {
@@ -481,9 +473,8 @@
const RATE_CONTROL *const rc = &cpi->rc;
CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
struct segmentation *const seg = &cm->seg;
- const int apply_cyclic_refresh = apply_cyclic_refresh_bitrate(cm, rc);
- if (cm->current_video_frame == 0)
- cr->low_content_avg = 0.0;
+ const int apply_cyclic_refresh = apply_cyclic_refresh_bitrate(cm, rc);
+ if (cm->current_video_frame == 0) cr->low_content_avg = 0.0;
// Don't apply refresh on key frame or enhancement layer frames.
if (!apply_cyclic_refresh || cm->frame_type == KEY_FRAME) {
// Set segmentation map to 0 and disable.
@@ -517,7 +508,8 @@
seg->abs_delta = SEGMENT_DELTADATA;
// Note: setting temporal_update has no effect, as the seg-map coding method
- // (temporal or spatial) is determined in vp10_choose_segmap_coding_method(),
+ // (temporal or spatial) is determined in
+ // vp10_choose_segmap_coding_method(),
// based on the coding cost of each method. For error_resilient mode on the
// last_frame_seg_map is set to 0, so if temporal coding is used, it is
// relative to 0 previous map.
diff --git a/vp10/encoder/aq_cyclicrefresh.h b/vp10/encoder/aq_cyclicrefresh.h
index f6714c5..649d714 100644
--- a/vp10/encoder/aq_cyclicrefresh.h
+++ b/vp10/encoder/aq_cyclicrefresh.h
@@ -8,7 +8,6 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
#ifndef VP10_ENCODER_AQ_CYCLICREFRESH_H_
#define VP10_ENCODER_AQ_CYCLICREFRESH_H_
@@ -20,9 +19,9 @@
// The segment ids used in cyclic refresh: from base (no boost) to increasing
// boost (higher delta-qp).
-#define CR_SEGMENT_ID_BASE 0
-#define CR_SEGMENT_ID_BOOST1 1
-#define CR_SEGMENT_ID_BOOST2 2
+#define CR_SEGMENT_ID_BASE 0
+#define CR_SEGMENT_ID_BOOST1 1
+#define CR_SEGMENT_ID_BOOST2 2
// Maximum rate target ratio for setting segment delta-qp.
#define CR_MAX_RATE_TARGET_RATIO 4.0
@@ -39,20 +38,20 @@
// Estimate the bits, incorporating the delta-q from segment 1, after encoding
// the frame.
int vp10_cyclic_refresh_estimate_bits_at_q(const struct VP10_COMP *cpi,
- double correction_factor);
+ double correction_factor);
// Estimate the bits per mb, for a given q = i and a corresponding delta-q
// (for segment 1), prior to encoding the frame.
int vp10_cyclic_refresh_rc_bits_per_mb(const struct VP10_COMP *cpi, int i,
- double correction_factor);
+ double correction_factor);
// Prior to coding a given prediction block, of size bsize at (mi_row, mi_col),
// check if we should reset the segment_id, and update the cyclic_refresh map
// and segmentation map.
void vp10_cyclic_refresh_update_segment(struct VP10_COMP *const cpi,
- MB_MODE_INFO *const mbmi,
- int mi_row, int mi_col, BLOCK_SIZE bsize,
- int64_t rate, int64_t dist, int skip);
+ MB_MODE_INFO *const mbmi, int mi_row,
+ int mi_col, BLOCK_SIZE bsize,
+ int64_t rate, int64_t dist, int skip);
// Update the segmentation map, and related quantities: cyclic refresh map,
// refresh sb_index, and target number of blocks to be refreshed.
diff --git a/vp10/encoder/aq_variance.c b/vp10/encoder/aq_variance.c
index 15ab6b4..520640a 100644
--- a/vp10/encoder/aq_variance.c
+++ b/vp10/encoder/aq_variance.c
@@ -22,19 +22,19 @@
#define ENERGY_MIN (-4)
#define ENERGY_MAX (1)
-#define ENERGY_SPAN (ENERGY_MAX - ENERGY_MIN + 1)
-#define ENERGY_IN_BOUNDS(energy)\
+#define ENERGY_SPAN (ENERGY_MAX - ENERGY_MIN + 1)
+#define ENERGY_IN_BOUNDS(energy) \
assert((energy) >= ENERGY_MIN && (energy) <= ENERGY_MAX)
-static const double rate_ratio[MAX_SEGMENTS] =
- {2.5, 2.0, 1.5, 1.0, 0.75, 1.0, 1.0, 1.0};
-static const int segment_id[ENERGY_SPAN] = {0, 1, 1, 2, 3, 4};
+static const double rate_ratio[MAX_SEGMENTS] = { 2.5, 2.0, 1.5, 1.0,
+ 0.75, 1.0, 1.0, 1.0 };
+static const int segment_id[ENERGY_SPAN] = { 0, 1, 1, 2, 3, 4 };
-#define SEGMENT_ID(i) segment_id[(i) - ENERGY_MIN]
+#define SEGMENT_ID(i) segment_id[(i)-ENERGY_MIN]
-DECLARE_ALIGNED(16, static const uint8_t, vp10_64_zeros[64]) = {0};
+DECLARE_ALIGNED(16, static const uint8_t, vp10_64_zeros[64]) = { 0 };
#if CONFIG_VPX_HIGHBITDEPTH
-DECLARE_ALIGNED(16, static const uint16_t, vp10_highbd_64_zeros[64]) = {0};
+DECLARE_ALIGNED(16, static const uint16_t, vp10_highbd_64_zeros[64]) = { 0 };
#endif
unsigned int vp10_vaq_segment_id(int energy) {
@@ -60,7 +60,7 @@
for (i = 0; i < MAX_SEGMENTS; ++i) {
int qindex_delta =
vp10_compute_qdelta_by_rate(&cpi->rc, cm->frame_type, cm->base_qindex,
- rate_ratio[i], cm->bit_depth);
+ rate_ratio[i], cm->bit_depth);
// We don't allow qindex 0 in a segment if the base value is not 0.
// Q index 0 (lossless) implies 4x4 encoding only and in AQ mode a segment
@@ -84,9 +84,9 @@
/* TODO(agrange, paulwilkins): The block_variance calls the unoptimized versions
* of variance() and highbd_8_variance(). It should not.
*/
-static void aq_variance(const uint8_t *a, int a_stride,
- const uint8_t *b, int b_stride,
- int w, int h, unsigned int *sse, int *sum) {
+static void aq_variance(const uint8_t *a, int a_stride, const uint8_t *b,
+ int b_stride, int w, int h, unsigned int *sse,
+ int *sum) {
int i, j;
*sum = 0;
@@ -105,9 +105,9 @@
}
#if CONFIG_VPX_HIGHBITDEPTH
-static void aq_highbd_variance64(const uint8_t *a8, int a_stride,
- const uint8_t *b8, int b_stride,
- int w, int h, uint64_t *sse, uint64_t *sum) {
+static void aq_highbd_variance64(const uint8_t *a8, int a_stride,
+ const uint8_t *b8, int b_stride, int w, int h,
+ uint64_t *sse, uint64_t *sum) {
int i, j;
uint16_t *a = CONVERT_TO_SHORTPTR(a8);
@@ -126,9 +126,9 @@
}
}
-static void aq_highbd_8_variance(const uint8_t *a8, int a_stride,
- const uint8_t *b8, int b_stride,
- int w, int h, unsigned int *sse, int *sum) {
+static void aq_highbd_8_variance(const uint8_t *a8, int a_stride,
+ const uint8_t *b8, int b_stride, int w, int h,
+ unsigned int *sse, int *sum) {
uint64_t sse_long = 0;
uint64_t sum_long = 0;
aq_highbd_variance64(a8, a_stride, b8, b_stride, w, h, &sse_long, &sum_long);
@@ -141,10 +141,10 @@
BLOCK_SIZE bs) {
MACROBLOCKD *xd = &x->e_mbd;
unsigned int var, sse;
- int right_overflow = (xd->mb_to_right_edge < 0) ?
- ((-xd->mb_to_right_edge) >> 3) : 0;
- int bottom_overflow = (xd->mb_to_bottom_edge < 0) ?
- ((-xd->mb_to_bottom_edge) >> 3) : 0;
+ int right_overflow =
+ (xd->mb_to_right_edge < 0) ? ((-xd->mb_to_right_edge) >> 3) : 0;
+ int bottom_overflow =
+ (xd->mb_to_bottom_edge < 0) ? ((-xd->mb_to_bottom_edge) >> 3) : 0;
if (right_overflow || bottom_overflow) {
const int bw = 8 * num_8x8_blocks_wide_lookup[bs] - right_overflow;
@@ -158,30 +158,27 @@
sse >>= 2 * (xd->bd - 8);
avg >>= (xd->bd - 8);
} else {
- aq_variance(x->plane[0].src.buf, x->plane[0].src.stride,
- vp10_64_zeros, 0, bw, bh, &sse, &avg);
+ aq_variance(x->plane[0].src.buf, x->plane[0].src.stride, vp10_64_zeros, 0,
+ bw, bh, &sse, &avg);
}
#else
- aq_variance(x->plane[0].src.buf, x->plane[0].src.stride,
- vp10_64_zeros, 0, bw, bh, &sse, &avg);
+ aq_variance(x->plane[0].src.buf, x->plane[0].src.stride, vp10_64_zeros, 0,
+ bw, bh, &sse, &avg);
#endif // CONFIG_VPX_HIGHBITDEPTH
var = sse - (((int64_t)avg * avg) / (bw * bh));
return (256 * var) / (bw * bh);
} else {
#if CONFIG_VPX_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
- var = cpi->fn_ptr[bs].vf(x->plane[0].src.buf,
- x->plane[0].src.stride,
- CONVERT_TO_BYTEPTR(vp10_highbd_64_zeros),
- 0, &sse);
+ var =
+ cpi->fn_ptr[bs].vf(x->plane[0].src.buf, x->plane[0].src.stride,
+ CONVERT_TO_BYTEPTR(vp10_highbd_64_zeros), 0, &sse);
} else {
- var = cpi->fn_ptr[bs].vf(x->plane[0].src.buf,
- x->plane[0].src.stride,
+ var = cpi->fn_ptr[bs].vf(x->plane[0].src.buf, x->plane[0].src.stride,
vp10_64_zeros, 0, &sse);
}
#else
- var = cpi->fn_ptr[bs].vf(x->plane[0].src.buf,
- x->plane[0].src.stride,
+ var = cpi->fn_ptr[bs].vf(x->plane[0].src.buf, x->plane[0].src.stride,
vp10_64_zeros, 0, &sse);
#endif // CONFIG_VPX_HIGHBITDEPTH
return (256 * var) >> num_pels_log2_lookup[bs];
@@ -200,7 +197,7 @@
double energy_midpoint;
vpx_clear_system_state();
energy_midpoint =
- (cpi->oxcf.pass == 2) ? cpi->twopass.mb_av_energy : DEFAULT_E_MIDPOINT;
+ (cpi->oxcf.pass == 2) ? cpi->twopass.mb_av_energy : DEFAULT_E_MIDPOINT;
energy = vp10_log_block_var(cpi, x, bs) - energy_midpoint;
return clamp((int)round(energy), ENERGY_MIN, ENERGY_MAX);
}
diff --git a/vp10/encoder/aq_variance.h b/vp10/encoder/aq_variance.h
index 318f5f2..cfb5b86 100644
--- a/vp10/encoder/aq_variance.h
+++ b/vp10/encoder/aq_variance.h
@@ -8,7 +8,6 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
#ifndef VP10_ENCODER_AQ_VARIANCE_H_
#define VP10_ENCODER_AQ_VARIANCE_H_
diff --git a/vp10/encoder/arm/neon/dct_neon.c b/vp10/encoder/arm/neon/dct_neon.c
index b37a2ff..e83853f 100644
--- a/vp10/encoder/arm/neon/dct_neon.c
+++ b/vp10/encoder/arm/neon/dct_neon.c
@@ -17,20 +17,17 @@
#include "vp10/common/blockd.h"
#include "vpx_dsp/txfm_common.h"
-void vp10_fdct8x8_quant_neon(const int16_t *input, int stride,
- int16_t* coeff_ptr, intptr_t n_coeffs,
- int skip_block, const int16_t* zbin_ptr,
- const int16_t* round_ptr, const int16_t* quant_ptr,
- const int16_t* quant_shift_ptr,
- int16_t* qcoeff_ptr, int16_t* dqcoeff_ptr,
- const int16_t* dequant_ptr, uint16_t* eob_ptr,
- const int16_t* scan_ptr,
- const int16_t* iscan_ptr) {
+void vp10_fdct8x8_quant_neon(
+ const int16_t* input, int stride, int16_t* coeff_ptr, intptr_t n_coeffs,
+ int skip_block, const int16_t* zbin_ptr, const int16_t* round_ptr,
+ const int16_t* quant_ptr, const int16_t* quant_shift_ptr,
+ int16_t* qcoeff_ptr, int16_t* dqcoeff_ptr, const int16_t* dequant_ptr,
+ uint16_t* eob_ptr, const int16_t* scan_ptr, const int16_t* iscan_ptr) {
int16_t temp_buffer[64];
(void)coeff_ptr;
vpx_fdct8x8_neon(input, temp_buffer, stride);
vp10_quantize_fp_neon(temp_buffer, n_coeffs, skip_block, zbin_ptr, round_ptr,
- quant_ptr, quant_shift_ptr, qcoeff_ptr, dqcoeff_ptr,
- dequant_ptr, eob_ptr, scan_ptr, iscan_ptr);
+ quant_ptr, quant_shift_ptr, qcoeff_ptr, dqcoeff_ptr,
+ dequant_ptr, eob_ptr, scan_ptr, iscan_ptr);
}
diff --git a/vp10/encoder/arm/neon/error_neon.c b/vp10/encoder/arm/neon/error_neon.c
index 009520a..34805d3 100644
--- a/vp10/encoder/arm/neon/error_neon.c
+++ b/vp10/encoder/arm/neon/error_neon.c
@@ -14,7 +14,7 @@
#include "./vp10_rtcd.h"
int64_t vp10_block_error_fp_neon(const int16_t *coeff, const int16_t *dqcoeff,
- int block_size) {
+ int block_size) {
int64x2_t error = vdupq_n_s64(0);
assert(block_size >= 8);
diff --git a/vp10/encoder/arm/neon/quantize_neon.c b/vp10/encoder/arm/neon/quantize_neon.c
index 9354ced..7b8e447 100644
--- a/vp10/encoder/arm/neon/quantize_neon.c
+++ b/vp10/encoder/arm/neon/quantize_neon.c
@@ -22,12 +22,12 @@
#include "vp10/encoder/rd.h"
void vp10_quantize_fp_neon(const int16_t *coeff_ptr, intptr_t count,
- int skip_block, const int16_t *zbin_ptr,
- const int16_t *round_ptr, const int16_t *quant_ptr,
- const int16_t *quant_shift_ptr, int16_t *qcoeff_ptr,
- int16_t *dqcoeff_ptr, const int16_t *dequant_ptr,
- uint16_t *eob_ptr,
- const int16_t *scan, const int16_t *iscan) {
+ int skip_block, const int16_t *zbin_ptr,
+ const int16_t *round_ptr, const int16_t *quant_ptr,
+ const int16_t *quant_shift_ptr, int16_t *qcoeff_ptr,
+ int16_t *dqcoeff_ptr, const int16_t *dequant_ptr,
+ uint16_t *eob_ptr, const int16_t *scan,
+ const int16_t *iscan) {
// TODO(jingning) Decide the need of these arguments after the
// quantization process is completed.
(void)zbin_ptr;
@@ -54,12 +54,12 @@
const int16x8_t v_coeff = vld1q_s16(&coeff_ptr[0]);
const int16x8_t v_coeff_sign = vshrq_n_s16(v_coeff, 15);
const int16x8_t v_tmp = vabaq_s16(v_round, v_coeff, v_zero);
- const int32x4_t v_tmp_lo = vmull_s16(vget_low_s16(v_tmp),
- vget_low_s16(v_quant));
- const int32x4_t v_tmp_hi = vmull_s16(vget_high_s16(v_tmp),
- vget_high_s16(v_quant));
- const int16x8_t v_tmp2 = vcombine_s16(vshrn_n_s32(v_tmp_lo, 16),
- vshrn_n_s32(v_tmp_hi, 16));
+ const int32x4_t v_tmp_lo =
+ vmull_s16(vget_low_s16(v_tmp), vget_low_s16(v_quant));
+ const int32x4_t v_tmp_hi =
+ vmull_s16(vget_high_s16(v_tmp), vget_high_s16(v_quant));
+ const int16x8_t v_tmp2 =
+ vcombine_s16(vshrn_n_s32(v_tmp_lo, 16), vshrn_n_s32(v_tmp_hi, 16));
const uint16x8_t v_nz_mask = vceqq_s16(v_tmp2, v_zero);
const int16x8_t v_iscan_plus1 = vaddq_s16(v_iscan, v_one);
const int16x8_t v_nz_iscan = vbslq_s16(v_nz_mask, v_zero, v_iscan_plus1);
@@ -79,12 +79,12 @@
const int16x8_t v_coeff = vld1q_s16(&coeff_ptr[i]);
const int16x8_t v_coeff_sign = vshrq_n_s16(v_coeff, 15);
const int16x8_t v_tmp = vabaq_s16(v_round, v_coeff, v_zero);
- const int32x4_t v_tmp_lo = vmull_s16(vget_low_s16(v_tmp),
- vget_low_s16(v_quant));
- const int32x4_t v_tmp_hi = vmull_s16(vget_high_s16(v_tmp),
- vget_high_s16(v_quant));
- const int16x8_t v_tmp2 = vcombine_s16(vshrn_n_s32(v_tmp_lo, 16),
- vshrn_n_s32(v_tmp_hi, 16));
+ const int32x4_t v_tmp_lo =
+ vmull_s16(vget_low_s16(v_tmp), vget_low_s16(v_quant));
+ const int32x4_t v_tmp_hi =
+ vmull_s16(vget_high_s16(v_tmp), vget_high_s16(v_quant));
+ const int16x8_t v_tmp2 =
+ vcombine_s16(vshrn_n_s32(v_tmp_lo, 16), vshrn_n_s32(v_tmp_hi, 16));
const uint16x8_t v_nz_mask = vceqq_s16(v_tmp2, v_zero);
const int16x8_t v_iscan_plus1 = vaddq_s16(v_iscan, v_one);
const int16x8_t v_nz_iscan = vbslq_s16(v_nz_mask, v_zero, v_iscan_plus1);
@@ -96,9 +96,8 @@
vst1q_s16(&dqcoeff_ptr[i], v_dqcoeff);
}
{
- const int16x4_t v_eobmax_3210 =
- vmax_s16(vget_low_s16(v_eobmax_76543210),
- vget_high_s16(v_eobmax_76543210));
+ const int16x4_t v_eobmax_3210 = vmax_s16(
+ vget_low_s16(v_eobmax_76543210), vget_high_s16(v_eobmax_76543210));
const int64x1_t v_eobmax_xx32 =
vshr_n_s64(vreinterpret_s64_s16(v_eobmax_3210), 32);
const int16x4_t v_eobmax_tmp =
diff --git a/vp10/encoder/bitstream.c b/vp10/encoder/bitstream.c
index 0b26458..0fd3d5a 100644
--- a/vp10/encoder/bitstream.c
+++ b/vp10/encoder/bitstream.c
@@ -36,20 +36,22 @@
#include "vp10/encoder/tokenize.h"
static const struct vp10_token intra_mode_encodings[INTRA_MODES] = {
- {0, 1}, {6, 3}, {28, 5}, {30, 5}, {58, 6}, {59, 6}, {126, 7}, {127, 7},
- {62, 6}, {2, 2}};
+ { 0, 1 }, { 6, 3 }, { 28, 5 }, { 30, 5 }, { 58, 6 },
+ { 59, 6 }, { 126, 7 }, { 127, 7 }, { 62, 6 }, { 2, 2 }
+};
static const struct vp10_token switchable_interp_encodings[SWITCHABLE_FILTERS] =
- {{0, 1}, {2, 2}, {3, 2}};
-static const struct vp10_token partition_encodings[PARTITION_TYPES] =
- {{0, 1}, {2, 2}, {6, 3}, {7, 3}};
-static const struct vp10_token inter_mode_encodings[INTER_MODES] =
- {{2, 2}, {6, 3}, {0, 1}, {7, 3}};
+ { { 0, 1 }, { 2, 2 }, { 3, 2 } };
+static const struct vp10_token partition_encodings[PARTITION_TYPES] = {
+ { 0, 1 }, { 2, 2 }, { 6, 3 }, { 7, 3 }
+};
+static const struct vp10_token inter_mode_encodings[INTER_MODES] = {
+ { 2, 2 }, { 6, 3 }, { 0, 1 }, { 7, 3 }
+};
static INLINE void write_uniform(vpx_writer *w, int n, int v) {
int l = get_unsigned_bits(n);
int m = (1 << l) - n;
- if (l == 0)
- return;
+ if (l == 0) return;
if (v < m) {
vpx_write_literal(w, v, l - 1);
} else {
@@ -73,18 +75,18 @@
const vpx_prob *probs) {
assert(is_inter_mode(mode));
vp10_write_token(w, vp10_inter_mode_tree, probs,
- &inter_mode_encodings[INTER_OFFSET(mode)]);
+ &inter_mode_encodings[INTER_OFFSET(mode)]);
}
-static void encode_unsigned_max(struct vpx_write_bit_buffer *wb,
- int data, int max) {
+static void encode_unsigned_max(struct vpx_write_bit_buffer *wb, int data,
+ int max) {
vpx_wb_write_literal(wb, data, get_unsigned_bits(max));
}
static void prob_diff_update(const vpx_tree_index *tree,
vpx_prob probs[/*n - 1*/],
- const unsigned int counts[/*n - 1*/],
- int n, vpx_writer *w) {
+ const unsigned int counts[/*n - 1*/], int n,
+ vpx_writer *w) {
int i;
unsigned int branch_ct[32][2];
@@ -108,19 +110,18 @@
assert(n <= 32);
vp10_tree_probs_from_distribution(tree, branch_ct, counts);
for (i = 0; i < n - 1; ++i) {
- savings += vp10_cond_prob_diff_update_savings(&probs[i],
- branch_ct[i]);
+ savings += vp10_cond_prob_diff_update_savings(&probs[i], branch_ct[i]);
}
return savings;
}
-static void write_selected_tx_size(const VP10_COMMON *cm,
- const MACROBLOCKD *xd, vpx_writer *w) {
+static void write_selected_tx_size(const VP10_COMMON *cm, const MACROBLOCKD *xd,
+ vpx_writer *w) {
TX_SIZE tx_size = xd->mi[0]->mbmi.tx_size;
BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
const TX_SIZE max_tx_size = max_txsize_lookup[bsize];
- const vpx_prob *const tx_probs = get_tx_probs2(max_tx_size, xd,
- &cm->fc->tx_probs);
+ const vpx_prob *const tx_probs =
+ get_tx_probs2(max_tx_size, xd, &cm->fc->tx_probs);
vpx_write(w, tx_size != TX_4X4, tx_probs[0]);
if (tx_size != TX_4X4 && max_tx_size >= TX_16X16) {
vpx_write(w, tx_size != TX_8X8, tx_probs[1]);
@@ -175,36 +176,32 @@
if (do_update) {
for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
for (j = 0; j < TX_TYPES; ++j)
- prob_diff_update(vp10_ext_tx_tree,
- cm->fc->intra_ext_tx_prob[i][j],
- cm->counts.intra_ext_tx[i][j],
- TX_TYPES, w);
+ prob_diff_update(vp10_ext_tx_tree, cm->fc->intra_ext_tx_prob[i][j],
+ cm->counts.intra_ext_tx[i][j], TX_TYPES, w);
}
}
savings = 0;
for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
- savings += prob_diff_update_savings(
- vp10_ext_tx_tree, cm->fc->inter_ext_tx_prob[i],
- cm->counts.inter_ext_tx[i], TX_TYPES);
+ savings +=
+ prob_diff_update_savings(vp10_ext_tx_tree, cm->fc->inter_ext_tx_prob[i],
+ cm->counts.inter_ext_tx[i], TX_TYPES);
}
do_update = savings > savings_thresh;
vpx_write(w, do_update, GROUP_DIFF_UPDATE_PROB);
if (do_update) {
for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
- prob_diff_update(vp10_ext_tx_tree,
- cm->fc->inter_ext_tx_prob[i],
- cm->counts.inter_ext_tx[i],
- TX_TYPES, w);
+ prob_diff_update(vp10_ext_tx_tree, cm->fc->inter_ext_tx_prob[i],
+ cm->counts.inter_ext_tx[i], TX_TYPES, w);
}
}
}
-static void pack_mb_tokens(vpx_writer *w,
- TOKENEXTRA **tp, const TOKENEXTRA *const stop,
+static void pack_mb_tokens(vpx_writer *w, TOKENEXTRA **tp,
+ const TOKENEXTRA *const stop,
vpx_bit_depth_t bit_depth, const TX_SIZE tx) {
TOKENEXTRA *p = *tp;
#if !CONFIG_MISC_FIXES
- (void) tx;
+ (void)tx;
#endif
while (p < stop && p->token != EOSB_TOKEN) {
@@ -223,7 +220,7 @@
b = &vp10_extra_bits[t];
#else
const vp10_extra_bit *const b = &vp10_extra_bits[t];
- (void) bit_depth;
+ (void)bit_depth;
#endif // CONFIG_VPX_HIGHBITDEPTH
/* skip one or two nodes */
@@ -245,8 +242,8 @@
int bits = v >> (n - len);
vp10_write_tree(w, vp10_coef_tree, p->context_tree, bits, len, i);
vp10_write_tree(w, vp10_coef_con_tree,
- vp10_pareto8_full[p->context_tree[PIVOT_NODE] - 1],
- v, n - len, 0);
+ vp10_pareto8_full[p->context_tree[PIVOT_NODE] - 1], v,
+ n - len, 0);
} else {
vp10_write_tree(w, vp10_coef_tree, p->context_tree, v, n, i);
}
@@ -254,8 +251,7 @@
if (b->base_val) {
const int e = p->extra, l = b->len;
#if CONFIG_MISC_FIXES
- int skip_bits =
- (b->base_val == CAT6_MIN_VAL) ? TX_SIZES - 1 - tx : 0;
+ int skip_bits = (b->base_val == CAT6_MIN_VAL) ? TX_SIZES - 1 - tx : 0;
#else
int skip_bits = 0;
#endif
@@ -263,7 +259,7 @@
if (l) {
const unsigned char *pb = b->prob;
int v = e >> 1;
- int n = l; /* number of bits in v, assumed nonzero */
+ int n = l; /* number of bits in v, assumed nonzero */
int i = 0;
do {
@@ -305,7 +301,7 @@
if (segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME)) {
assert(!is_compound);
assert(mbmi->ref_frame[0] ==
- get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME));
+ get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME));
} else {
// does the feature use compound prediction or not
// (if not specified at the frame/segment level)
@@ -356,8 +352,7 @@
const int pred_flag = mbmi->seg_id_predicted;
vpx_prob pred_prob = vp10_get_pred_prob_seg_id(segp, xd);
vpx_write(w, pred_flag, pred_prob);
- if (!pred_flag)
- write_segment_id(w, seg, segp, segment_id);
+ if (!pred_flag) write_segment_id(w, seg, segp, segment_id);
} else {
write_segment_id(w, seg, segp, segment_id);
}
@@ -403,8 +398,8 @@
if (cm->interp_filter == SWITCHABLE) {
const int ctx = vp10_get_pred_context_switchable_interp(xd);
vp10_write_token(w, vp10_switchable_interp_tree,
- cm->fc->switchable_interp_prob[ctx],
- &switchable_interp_encodings[mbmi->interp_filter]);
+ cm->fc->switchable_interp_prob[ctx],
+ &switchable_interp_encodings[mbmi->interp_filter]);
++cpi->interp_filter_selected[0][mbmi->interp_filter];
} else {
assert(mbmi->interp_filter == cm->interp_filter);
@@ -422,8 +417,8 @@
if (b_mode == NEWMV) {
for (ref = 0; ref < 1 + is_compound; ++ref)
vp10_encode_mv(cpi, w, &mi->bmi[j].as_mv[ref].as_mv,
- &mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][0].as_mv,
- nmvc, allow_hp);
+ &mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][0].as_mv,
+ nmvc, allow_hp);
}
}
}
@@ -431,19 +426,17 @@
if (mode == NEWMV) {
for (ref = 0; ref < 1 + is_compound; ++ref)
vp10_encode_mv(cpi, w, &mbmi->mv[ref].as_mv,
- &mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][0].as_mv, nmvc,
- allow_hp);
+ &mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][0].as_mv,
+ nmvc, allow_hp);
}
}
}
- if (mbmi->tx_size < TX_32X32 &&
- cm->base_qindex > 0 && !mbmi->skip &&
+ if (mbmi->tx_size < TX_32X32 && cm->base_qindex > 0 && !mbmi->skip &&
!segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
if (is_inter) {
- vp10_write_token(
- w, vp10_ext_tx_tree,
- cm->fc->inter_ext_tx_prob[mbmi->tx_size],
- &ext_tx_encodings[mbmi->tx_type]);
+ vp10_write_token(w, vp10_ext_tx_tree,
+ cm->fc->inter_ext_tx_prob[mbmi->tx_size],
+ &ext_tx_encodings[mbmi->tx_type]);
} else {
vp10_write_token(
w, vp10_ext_tx_tree,
@@ -452,8 +445,7 @@
&ext_tx_encodings[mbmi->tx_type]);
}
} else {
- if (!mbmi->skip)
- assert(mbmi->tx_type == DCT_DCT);
+ if (!mbmi->skip) assert(mbmi->tx_type == DCT_DCT);
}
}
@@ -471,8 +463,7 @@
const MB_MODE_INFO *const mbmi = &mi->mbmi;
const BLOCK_SIZE bsize = mbmi->sb_type;
- if (seg->update_map)
- write_segment_id(w, seg, segp, mbmi->segment_id);
+ if (seg->update_map) write_segment_id(w, seg, segp, mbmi->segment_id);
write_skip(cm, xd, mbmi->segment_id, mi, w);
@@ -499,8 +490,7 @@
write_intra_mode(w, mbmi->uv_mode, cm->fc->uv_mode_prob[mbmi->mode]);
- if (mbmi->tx_size < TX_32X32 &&
- cm->base_qindex > 0 && !mbmi->skip &&
+ if (mbmi->tx_size < TX_32X32 && cm->base_qindex > 0 && !mbmi->skip &&
!segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
vp10_write_token(
w, vp10_ext_tx_tree,
@@ -512,8 +502,8 @@
static void write_modes_b(VP10_COMP *cpi, const TileInfo *const tile,
vpx_writer *w, TOKENEXTRA **tok,
- const TOKENEXTRA *const tok_end,
- int mi_row, int mi_col) {
+ const TOKENEXTRA *const tok_end, int mi_row,
+ int mi_col) {
const VP10_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
MODE_INFO *m;
@@ -524,8 +514,7 @@
cpi->td.mb.mbmi_ext = cpi->mbmi_ext_base + (mi_row * cm->mi_cols + mi_col);
- set_mi_row_col(xd, tile,
- mi_row, num_8x8_blocks_high_lookup[m->mbmi.sb_type],
+ set_mi_row_col(xd, tile, mi_row, num_8x8_blocks_high_lookup[m->mbmi.sb_type],
mi_col, num_8x8_blocks_wide_lookup[m->mbmi.sb_type],
cm->mi_rows, cm->mi_cols);
if (frame_is_intra_only(cm)) {
@@ -537,8 +526,8 @@
if (!m->mbmi.skip) {
assert(*tok < tok_end);
for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
- TX_SIZE tx = plane ? get_uv_tx_size(&m->mbmi, &xd->plane[plane])
- : m->mbmi.tx_size;
+ TX_SIZE tx =
+ plane ? get_uv_tx_size(&m->mbmi, &xd->plane[plane]) : m->mbmi.tx_size;
pack_mb_tokens(w, tok, tok_end, cm->bit_depth, tx);
assert(*tok < tok_end && (*tok)->token == EOSB_TOKEN);
(*tok)++;
@@ -547,9 +536,9 @@
}
static void write_partition(const VP10_COMMON *const cm,
- const MACROBLOCKD *const xd,
- int hbs, int mi_row, int mi_col,
- PARTITION_TYPE p, BLOCK_SIZE bsize, vpx_writer *w) {
+ const MACROBLOCKD *const xd, int hbs, int mi_row,
+ int mi_col, PARTITION_TYPE p, BLOCK_SIZE bsize,
+ vpx_writer *w) {
const int ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
const vpx_prob *const probs = cm->fc->partition_prob[ctx];
const int has_rows = (mi_row + hbs) < cm->mi_rows;
@@ -568,10 +557,10 @@
}
}
-static void write_modes_sb(VP10_COMP *cpi,
- const TileInfo *const tile, vpx_writer *w,
- TOKENEXTRA **tok, const TOKENEXTRA *const tok_end,
- int mi_row, int mi_col, BLOCK_SIZE bsize) {
+static void write_modes_sb(VP10_COMP *cpi, const TileInfo *const tile,
+ vpx_writer *w, TOKENEXTRA **tok,
+ const TOKENEXTRA *const tok_end, int mi_row,
+ int mi_col, BLOCK_SIZE bsize) {
const VP10_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
@@ -581,8 +570,7 @@
BLOCK_SIZE subsize;
const MODE_INFO *m = NULL;
- if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
- return;
+ if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
m = cm->mi_grid_visible[mi_row * cm->mi_stride + mi_col];
@@ -615,8 +603,7 @@
write_modes_sb(cpi, tile, w, tok, tok_end, mi_row + bs, mi_col + bs,
subsize);
break;
- default:
- assert(0);
+ default: assert(0);
}
}
@@ -626,9 +613,9 @@
update_partition_context(xd, mi_row, mi_col, subsize, bsize);
}
-static void write_modes(VP10_COMP *cpi,
- const TileInfo *const tile, vpx_writer *w,
- TOKENEXTRA **tok, const TOKENEXTRA *const tok_end) {
+static void write_modes(VP10_COMP *cpi, const TileInfo *const tile,
+ vpx_writer *w, TOKENEXTRA **tok,
+ const TOKENEXTRA *const tok_end) {
MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
int mi_row, mi_col;
@@ -637,8 +624,7 @@
vp10_zero(xd->left_seg_context);
for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end;
mi_col += MI_BLOCK_SIZE)
- write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col,
- BLOCK_64X64);
+ write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col, BLOCK_64X64);
}
}
@@ -646,7 +632,7 @@
vp10_coeff_stats *coef_branch_ct,
vp10_coeff_probs_model *coef_probs) {
vp10_coeff_count *coef_counts = cpi->td.rd_counts.coef_counts[tx_size];
- unsigned int (*eob_branch_ct)[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS] =
+ unsigned int(*eob_branch_ct)[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS] =
cpi->common.counts.eob_branch[tx_size];
int i, j, k, l, m;
@@ -655,21 +641,21 @@
for (k = 0; k < COEF_BANDS; ++k) {
for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
vp10_tree_probs_from_distribution(vp10_coef_tree,
- coef_branch_ct[i][j][k][l],
- coef_counts[i][j][k][l]);
- coef_branch_ct[i][j][k][l][0][1] = eob_branch_ct[i][j][k][l] -
- coef_branch_ct[i][j][k][l][0][0];
+ coef_branch_ct[i][j][k][l],
+ coef_counts[i][j][k][l]);
+ coef_branch_ct[i][j][k][l][0][1] =
+ eob_branch_ct[i][j][k][l] - coef_branch_ct[i][j][k][l][0][0];
for (m = 0; m < UNCONSTRAINED_NODES; ++m)
- coef_probs[i][j][k][l][m] = get_binary_prob(
- coef_branch_ct[i][j][k][l][m][0],
- coef_branch_ct[i][j][k][l][m][1]);
+ coef_probs[i][j][k][l][m] =
+ get_binary_prob(coef_branch_ct[i][j][k][l][m][0],
+ coef_branch_ct[i][j][k][l][m][1]);
}
}
}
}
}
-static void update_coef_probs_common(vpx_writer* const bc, VP10_COMP *cpi,
+static void update_coef_probs_common(vpx_writer *const bc, VP10_COMP *cpi,
TX_SIZE tx_size,
vp10_coeff_stats *frame_branch_ct,
vp10_coeff_probs_model *new_coef_probs) {
@@ -683,7 +669,7 @@
case TWO_LOOP: {
/* dry run to see if there is any update at all needed */
int savings = 0;
- int update[2] = {0, 0};
+ int update[2] = { 0, 0 };
for (i = 0; i < PLANE_TYPES; ++i) {
for (j = 0; j < REF_TYPES; ++j) {
for (k = 0; k < COEF_BANDS; ++k) {
@@ -700,8 +686,7 @@
else
s = vp10_prob_diff_update_savings_search(
frame_branch_ct[i][j][k][l][t], oldp, &newp, upd);
- if (s > 0 && newp != oldp)
- u = 1;
+ if (s > 0 && newp != oldp) u = 1;
if (u)
savings += s - (int)(vp10_cost_zero(upd));
else
@@ -737,10 +722,8 @@
old_coef_probs[i][j][k][l], &newp, upd, stepsize);
else
s = vp10_prob_diff_update_savings_search(
- frame_branch_ct[i][j][k][l][t],
- *oldp, &newp, upd);
- if (s > 0 && newp != *oldp)
- u = 1;
+ frame_branch_ct[i][j][k][l][t], *oldp, &newp, upd);
+ if (s > 0 && newp != *oldp) u = 1;
vpx_write(bc, u, upd);
if (u) {
/* send/use new probability */
@@ -775,12 +758,10 @@
old_coef_probs[i][j][k][l], &newp, upd, stepsize);
} else {
s = vp10_prob_diff_update_savings_search(
- frame_branch_ct[i][j][k][l][t],
- *oldp, &newp, upd);
+ frame_branch_ct[i][j][k][l][t], *oldp, &newp, upd);
}
- if (s > 0 && newp != *oldp)
- u = 1;
+ if (s > 0 && newp != *oldp) u = 1;
updates += u;
if (u == 0 && updates == 0) {
noupdates_before_first++;
@@ -809,12 +790,11 @@
}
return;
}
- default:
- assert(0);
+ default: assert(0);
}
}
-static void update_coef_probs(VP10_COMP *cpi, vpx_writer* w) {
+static void update_coef_probs(VP10_COMP *cpi, vpx_writer *w) {
const TX_MODE tx_mode = cpi->common.tx_mode;
const TX_SIZE max_tx_size = tx_mode_to_biggest_tx_size[tx_mode];
TX_SIZE tx_size;
@@ -825,8 +805,7 @@
(tx_size >= TX_16X16 && cpi->sf.tx_size_search_method == USE_TX_8X8)) {
vpx_write_bit(w, 0);
} else {
- build_tree_distribution(cpi, tx_size, frame_branch_ct,
- frame_coef_probs);
+ build_tree_distribution(cpi, tx_size, frame_branch_ct, frame_coef_probs);
update_coef_probs_common(w, cpi, tx_size, frame_branch_ct,
frame_coef_probs);
}
@@ -898,8 +877,7 @@
#endif
vpx_wb_write_bit(wb, seg->enabled);
- if (!seg->enabled)
- return;
+ if (!seg->enabled) return;
// Segmentation map
if (!frame_is_intra_only(cm) && !cm->error_resilient_mode) {
@@ -916,8 +894,7 @@
const int prob = segp->tree_probs[i];
const int update = prob != MAX_PROB;
vpx_wb_write_bit(wb, update);
- if (update)
- vpx_wb_write_literal(wb, prob, 8);
+ if (update) vpx_wb_write_literal(wb, prob, 8);
}
#endif
@@ -934,8 +911,7 @@
const int prob = segp->pred_probs[i];
const int update = prob != MAX_PROB;
vpx_wb_write_bit(wb, update);
- if (update)
- vpx_wb_write_literal(wb, prob, 8);
+ if (update) vpx_wb_write_literal(wb, prob, 8);
}
}
#endif
@@ -970,48 +946,42 @@
static void update_seg_probs(VP10_COMP *cpi, vpx_writer *w) {
VP10_COMMON *cm = &cpi->common;
- if (!cpi->common.seg.enabled)
- return;
+ if (!cpi->common.seg.enabled) return;
if (cpi->common.seg.temporal_update) {
int i;
for (i = 0; i < PREDICTION_PROBS; i++)
vp10_cond_prob_diff_update(w, &cm->fc->seg.pred_probs[i],
- cm->counts.seg.pred[i]);
+ cm->counts.seg.pred[i]);
prob_diff_update(vp10_segment_tree, cm->fc->seg.tree_probs,
- cm->counts.seg.tree_mispred, MAX_SEGMENTS, w);
+ cm->counts.seg.tree_mispred, MAX_SEGMENTS, w);
} else {
prob_diff_update(vp10_segment_tree, cm->fc->seg.tree_probs,
- cm->counts.seg.tree_total, MAX_SEGMENTS, w);
+ cm->counts.seg.tree_total, MAX_SEGMENTS, w);
}
}
static void write_txfm_mode(TX_MODE mode, struct vpx_write_bit_buffer *wb) {
vpx_wb_write_bit(wb, mode == TX_MODE_SELECT);
- if (mode != TX_MODE_SELECT)
- vpx_wb_write_literal(wb, mode, 2);
+ if (mode != TX_MODE_SELECT) vpx_wb_write_literal(wb, mode, 2);
}
#else
static void write_txfm_mode(TX_MODE mode, struct vpx_writer *wb) {
vpx_write_literal(wb, VPXMIN(mode, ALLOW_32X32), 2);
- if (mode >= ALLOW_32X32)
- vpx_write_bit(wb, mode == TX_MODE_SELECT);
+ if (mode >= ALLOW_32X32) vpx_write_bit(wb, mode == TX_MODE_SELECT);
}
#endif
-
static void update_txfm_probs(VP10_COMMON *cm, vpx_writer *w,
FRAME_COUNTS *counts) {
-
if (cm->tx_mode == TX_MODE_SELECT) {
int i, j;
unsigned int ct_8x8p[TX_SIZES - 3][2];
unsigned int ct_16x16p[TX_SIZES - 2][2];
unsigned int ct_32x32p[TX_SIZES - 1][2];
-
for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
vp10_tx_counts_to_branch_counts_8x8(counts->tx.p8x8[i], ct_8x8p);
for (j = 0; j < TX_SIZES - 3; j++)
@@ -1022,14 +992,14 @@
vp10_tx_counts_to_branch_counts_16x16(counts->tx.p16x16[i], ct_16x16p);
for (j = 0; j < TX_SIZES - 2; j++)
vp10_cond_prob_diff_update(w, &cm->fc->tx_probs.p16x16[i][j],
- ct_16x16p[j]);
+ ct_16x16p[j]);
}
for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
vp10_tx_counts_to_branch_counts_32x32(counts->tx.p32x32[i], ct_32x32p);
for (j = 0; j < TX_SIZES - 1; j++)
vp10_cond_prob_diff_update(w, &cm->fc->tx_probs.p32x32[i][j],
- ct_32x32p[j]);
+ ct_32x32p[j]);
}
}
}
@@ -1037,8 +1007,7 @@
static void write_interp_filter(INTERP_FILTER filter,
struct vpx_write_bit_buffer *wb) {
vpx_wb_write_bit(wb, filter == SWITCHABLE);
- if (filter != SWITCHABLE)
- vpx_wb_write_literal(wb, filter, 2);
+ if (filter != SWITCHABLE) vpx_wb_write_literal(wb, filter, 2);
}
static void fix_interp_filter(VP10_COMMON *cm, FRAME_COUNTS *counts) {
@@ -1071,16 +1040,13 @@
// columns
ones = cm->log2_tile_cols - min_log2_tile_cols;
- while (ones--)
- vpx_wb_write_bit(wb, 1);
+ while (ones--) vpx_wb_write_bit(wb, 1);
- if (cm->log2_tile_cols < max_log2_tile_cols)
- vpx_wb_write_bit(wb, 0);
+ if (cm->log2_tile_cols < max_log2_tile_cols) vpx_wb_write_bit(wb, 0);
// rows
vpx_wb_write_bit(wb, cm->log2_tile_rows != 0);
- if (cm->log2_tile_rows != 0)
- vpx_wb_write_bit(wb, cm->log2_tile_rows != 1);
+ if (cm->log2_tile_rows != 0) vpx_wb_write_bit(wb, cm->log2_tile_rows != 1);
}
static int get_refresh_mask(VP10_COMP *cpi) {
@@ -1129,15 +1095,15 @@
TOKENEXTRA *tok = cpi->tile_tok[tile_row][tile_col];
tok_end = cpi->tile_tok[tile_row][tile_col] +
- cpi->tok_count[tile_row][tile_col];
+ cpi->tok_count[tile_row][tile_col];
if (tile_col < tile_cols - 1 || tile_row < tile_rows - 1)
vpx_start_encode(&residual_bc, data_ptr + total_size + 4);
else
vpx_start_encode(&residual_bc, data_ptr + total_size);
- write_modes(cpi, &cpi->tile_data[tile_idx].tile_info,
- &residual_bc, &tok, tok_end);
+ write_modes(cpi, &cpi->tile_data[tile_idx].tile_info, &residual_bc, &tok,
+ tok_end);
assert(tok == tok_end);
vpx_stop_encode(&residual_bc);
if (tile_col < tile_cols - 1 || tile_row < tile_rows - 1) {
@@ -1161,8 +1127,8 @@
static void write_render_size(const VP10_COMMON *cm,
struct vpx_write_bit_buffer *wb) {
- const int scaling_active = cm->width != cm->render_width ||
- cm->height != cm->render_height;
+ const int scaling_active =
+ cm->width != cm->render_width || cm->height != cm->render_height;
vpx_wb_write_bit(wb, scaling_active);
if (scaling_active) {
vpx_wb_write_literal(wb, cm->render_width - 1, 16);
@@ -1188,8 +1154,8 @@
YV12_BUFFER_CONFIG *cfg = get_ref_frame_buffer(cpi, ref_frame);
if (cfg != NULL) {
- found = cm->width == cfg->y_crop_width &&
- cm->height == cfg->y_crop_height;
+ found =
+ cm->width == cfg->y_crop_width && cm->height == cfg->y_crop_height;
#if CONFIG_MISC_FIXES
found &= cm->render_width == cfg->render_width &&
cm->render_height == cfg->render_height;
@@ -1224,20 +1190,11 @@
static void write_profile(BITSTREAM_PROFILE profile,
struct vpx_write_bit_buffer *wb) {
switch (profile) {
- case PROFILE_0:
- vpx_wb_write_literal(wb, 0, 2);
- break;
- case PROFILE_1:
- vpx_wb_write_literal(wb, 2, 2);
- break;
- case PROFILE_2:
- vpx_wb_write_literal(wb, 1, 2);
- break;
- case PROFILE_3:
- vpx_wb_write_literal(wb, 6, 3);
- break;
- default:
- assert(0);
+ case PROFILE_0: vpx_wb_write_literal(wb, 0, 2); break;
+ case PROFILE_1: vpx_wb_write_literal(wb, 2, 2); break;
+ case PROFILE_2: vpx_wb_write_literal(wb, 1, 2); break;
+ case PROFILE_3: vpx_wb_write_literal(wb, 6, 3); break;
+ default: assert(0);
}
}
@@ -1284,8 +1241,7 @@
write_bitdepth_colorspace_sampling(cm, wb);
write_frame_size(cm, wb);
} else {
- if (!cm->show_frame)
- vpx_wb_write_bit(wb, cm->intra_only);
+ if (!cm->show_frame) vpx_wb_write_bit(wb, cm->intra_only);
if (!cm->error_resilient_mode) {
#if CONFIG_MISC_FIXES
@@ -1302,8 +1258,8 @@
#else
static const int reset_frame_context_conv_tbl[3] = { 0, 2, 3 };
- vpx_wb_write_literal(wb,
- reset_frame_context_conv_tbl[cm->reset_frame_context], 2);
+ vpx_wb_write_literal(
+ wb, reset_frame_context_conv_tbl[cm->reset_frame_context], 2);
#endif
}
@@ -1346,8 +1302,8 @@
#if CONFIG_MISC_FIXES
if (cm->refresh_frame_context != REFRESH_FRAME_CONTEXT_OFF)
#endif
- vpx_wb_write_bit(wb, cm->refresh_frame_context !=
- REFRESH_FRAME_CONTEXT_BACKWARD);
+ vpx_wb_write_bit(
+ wb, cm->refresh_frame_context != REFRESH_FRAME_CONTEXT_BACKWARD);
}
vpx_wb_write_literal(wb, cm->frame_context_idx, FRAME_CONTEXTS_LOG2);
@@ -1365,8 +1321,7 @@
const int use_compound_pred = cm->reference_mode != SINGLE_REFERENCE;
vpx_wb_write_bit(wb, use_hybrid_pred);
- if (!use_hybrid_pred)
- vpx_wb_write_bit(wb, use_compound_pred);
+ if (!use_hybrid_pred) vpx_wb_write_bit(wb, use_compound_pred);
}
#endif
@@ -1427,7 +1382,7 @@
for (i = 0; i < INTRA_INTER_CONTEXTS; i++)
vp10_cond_prob_diff_update(&header_bc, &fc->intra_inter_prob[i],
- counts->intra_inter[i]);
+ counts->intra_inter[i]);
if (cpi->allow_comp_inter_inter) {
const int use_hybrid_pred = cm->reference_mode == REFERENCE_MODE_SELECT;
@@ -1440,7 +1395,7 @@
if (use_hybrid_pred)
for (i = 0; i < COMP_INTER_CONTEXTS; i++)
vp10_cond_prob_diff_update(&header_bc, &fc->comp_inter_prob[i],
- counts->comp_inter[i]);
+ counts->comp_inter[i]);
}
#else
if (use_hybrid_pred)
@@ -1453,16 +1408,16 @@
if (cm->reference_mode != COMPOUND_REFERENCE) {
for (i = 0; i < REF_CONTEXTS; i++) {
vp10_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][0],
- counts->single_ref[i][0]);
+ counts->single_ref[i][0]);
vp10_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][1],
- counts->single_ref[i][1]);
+ counts->single_ref[i][1]);
}
}
if (cm->reference_mode != SINGLE_REFERENCE)
for (i = 0; i < REF_CONTEXTS; i++)
vp10_cond_prob_diff_update(&header_bc, &fc->comp_ref_prob[i],
- counts->comp_ref[i]);
+ counts->comp_ref[i]);
for (i = 0; i < BLOCK_SIZE_GROUPS; ++i)
prob_diff_update(vp10_intra_mode_tree, cm->fc->y_mode_prob[i],
@@ -1475,7 +1430,7 @@
#endif
vp10_write_nmv_probs(cm, cm->allow_high_precision_mv, &header_bc,
- &counts->mv);
+ &counts->mv);
update_ext_tx_probs(cm, &header_bc);
}
@@ -1486,8 +1441,8 @@
}
#if CONFIG_MISC_FIXES
-static int remux_tiles(uint8_t *dest, const int sz,
- const int n_tiles, const int mag) {
+static int remux_tiles(uint8_t *dest, const int sz, const int n_tiles,
+ const int mag) {
int rpos = 0, wpos = 0, n;
for (n = 0; n < n_tiles; n++) {
@@ -1499,18 +1454,11 @@
tile_sz = mem_get_le32(&dest[rpos]) + 1;
rpos += 4;
switch (mag) {
- case 0:
- dest[wpos] = tile_sz - 1;
- break;
- case 1:
- mem_put_le16(&dest[wpos], tile_sz - 1);
- break;
- case 2:
- mem_put_le24(&dest[wpos], tile_sz - 1);
- break;
+ case 0: dest[wpos] = tile_sz - 1; break;
+ case 1: mem_put_le16(&dest[wpos], tile_sz - 1); break;
+ case 2: mem_put_le24(&dest[wpos], tile_sz - 1); break;
case 3: // remuxing should only happen if mag < 3
- default:
- assert("Invalid value for tile size magnitude" && 0);
+ default: assert("Invalid value for tile size magnitude" && 0);
}
wpos += mag + 1;
}
@@ -1530,7 +1478,7 @@
void vp10_pack_bitstream(VP10_COMP *const cpi, uint8_t *dest, size_t *size) {
uint8_t *data = dest;
size_t first_part_size, uncompressed_hdr_size, data_sz;
- struct vpx_write_bit_buffer wb = {data, 0};
+ struct vpx_write_bit_buffer wb = { data, 0 };
struct vpx_write_bit_buffer saved_wb;
unsigned int max_tile;
#if CONFIG_MISC_FIXES
@@ -1563,8 +1511,7 @@
// Choose the (tile size) magnitude
for (mag = 0, mask = 0xff; mag < 4; mag++) {
- if (max_tile <= mask)
- break;
+ if (max_tile <= mask) break;
mask <<= 8;
mask |= 0xff;
}
diff --git a/vp10/encoder/bitstream.h b/vp10/encoder/bitstream.h
index b1da89f..ab692e8 100644
--- a/vp10/encoder/bitstream.h
+++ b/vp10/encoder/bitstream.h
@@ -8,7 +8,6 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
#ifndef VP10_ENCODER_BITSTREAM_H_
#define VP10_ENCODER_BITSTREAM_H_
diff --git a/vp10/encoder/block.h b/vp10/encoder/block.h
index ab0252b..3d35485 100644
--- a/vp10/encoder/block.h
+++ b/vp10/encoder/block.h
@@ -45,7 +45,7 @@
/* The [2] dimension is for whether we skip the EOB node (i.e. if previous
* coefficient in this block was zero) or not. */
typedef unsigned int vp10_coeff_cost[PLANE_TYPES][REF_TYPES][COEF_BANDS][2]
- [COEFF_CONTEXTS][ENTROPY_TOKENS];
+ [COEFF_CONTEXTS][ENTROPY_TOKENS];
typedef struct {
int_mv ref_mvs[MAX_REF_FRAMES][MAX_MV_REF_CANDIDATES];
@@ -70,8 +70,8 @@
int rddiv;
int rdmult;
int mb_energy;
- int * m_search_count_ptr;
- int * ex_search_count_ptr;
+ int *m_search_count_ptr;
+ int *ex_search_count_ptr;
// These are set to their default values at the beginning, and then adjusted
// further in the encoding process.
@@ -122,9 +122,9 @@
// skip forward transform and quantization
uint8_t skip_txfm[MAX_MB_PLANE << 2];
- #define SKIP_TXFM_NONE 0
- #define SKIP_TXFM_AC_DC 1
- #define SKIP_TXFM_AC_ONLY 2
+#define SKIP_TXFM_NONE 0
+#define SKIP_TXFM_AC_DC 1
+#define SKIP_TXFM_AC_ONLY 2
int64_t bsse[MAX_MB_PLANE << 2];
diff --git a/vp10/encoder/blockiness.c b/vp10/encoder/blockiness.c
index ede13e0..c57e4ef 100644
--- a/vp10/encoder/blockiness.c
+++ b/vp10/encoder/blockiness.c
@@ -70,9 +70,9 @@
s_blockiness += horizontal_filter(s);
r_blockiness += horizontal_filter(r);
sum_0 += s[0];
- sum_sq_0 += s[0]*s[0];
+ sum_sq_0 += s[0] * s[0];
sum_1 += s[-1];
- sum_sq_1 += s[-1]*s[-1];
+ sum_sq_1 += s[-1] * s[-1];
}
var_0 = variance(sum_0, sum_sq_0, size);
var_1 = variance(sum_1, sum_sq_1, size);
@@ -120,19 +120,19 @@
// This function returns the blockiness for the entire frame currently by
// looking at all borders in steps of 4.
double vp10_get_blockiness(const unsigned char *img1, int img1_pitch,
- const unsigned char *img2, int img2_pitch,
- int width, int height ) {
+ const unsigned char *img2, int img2_pitch, int width,
+ int height) {
double blockiness = 0;
int i, j;
vpx_clear_system_state();
- for (i = 0; i < height; i += 4, img1 += img1_pitch * 4,
- img2 += img2_pitch * 4) {
+ for (i = 0; i < height;
+ i += 4, img1 += img1_pitch * 4, img2 += img2_pitch * 4) {
for (j = 0; j < width; j += 4) {
if (i > 0 && i < height && j > 0 && j < width) {
- blockiness += blockiness_vertical(img1 + j, img1_pitch,
- img2 + j, img2_pitch, 4);
- blockiness += blockiness_horizontal(img1 + j, img1_pitch,
- img2 + j, img2_pitch, 4);
+ blockiness +=
+ blockiness_vertical(img1 + j, img1_pitch, img2 + j, img2_pitch, 4);
+ blockiness += blockiness_horizontal(img1 + j, img1_pitch, img2 + j,
+ img2_pitch, 4);
}
}
}
diff --git a/vp10/encoder/context_tree.c b/vp10/encoder/context_tree.c
index 6c056d2..07ae254 100644
--- a/vp10/encoder/context_tree.c
+++ b/vp10/encoder/context_tree.c
@@ -12,10 +12,7 @@
#include "vp10/encoder/encoder.h"
static const BLOCK_SIZE square[] = {
- BLOCK_8X8,
- BLOCK_16X16,
- BLOCK_32X32,
- BLOCK_64X64,
+ BLOCK_8X8, BLOCK_16X16, BLOCK_32X32, BLOCK_64X64,
};
static void alloc_mode_context(VP10_COMMON *cm, int num_4x4_blk,
@@ -25,8 +22,7 @@
int i, k;
ctx->num_4x4_blk = num_blk;
- CHECK_MEM_ERROR(cm, ctx->zcoeff_blk,
- vpx_calloc(num_blk, sizeof(uint8_t)));
+ CHECK_MEM_ERROR(cm, ctx->zcoeff_blk, vpx_calloc(num_blk, sizeof(uint8_t)));
for (i = 0; i < MAX_MB_PLANE; ++i) {
for (k = 0; k < 3; ++k) {
CHECK_MEM_ERROR(cm, ctx->coeff[i][k],
@@ -37,10 +33,10 @@
vpx_memalign(32, num_pix * sizeof(*ctx->dqcoeff[i][k])));
CHECK_MEM_ERROR(cm, ctx->eobs[i][k],
vpx_memalign(32, num_blk * sizeof(*ctx->eobs[i][k])));
- ctx->coeff_pbuf[i][k] = ctx->coeff[i][k];
- ctx->qcoeff_pbuf[i][k] = ctx->qcoeff[i][k];
+ ctx->coeff_pbuf[i][k] = ctx->coeff[i][k];
+ ctx->qcoeff_pbuf[i][k] = ctx->qcoeff[i][k];
ctx->dqcoeff_pbuf[i][k] = ctx->dqcoeff[i][k];
- ctx->eobs_pbuf[i][k] = ctx->eobs[i][k];
+ ctx->eobs_pbuf[i][k] = ctx->eobs[i][k];
}
}
}
@@ -71,12 +67,12 @@
static void alloc_tree_contexts(VP10_COMMON *cm, PC_TREE *tree,
int num_4x4_blk) {
alloc_mode_context(cm, num_4x4_blk, &tree->none);
- alloc_mode_context(cm, num_4x4_blk/2, &tree->horizontal[0]);
- alloc_mode_context(cm, num_4x4_blk/2, &tree->vertical[0]);
+ alloc_mode_context(cm, num_4x4_blk / 2, &tree->horizontal[0]);
+ alloc_mode_context(cm, num_4x4_blk / 2, &tree->vertical[0]);
if (num_4x4_blk > 4) {
- alloc_mode_context(cm, num_4x4_blk/2, &tree->horizontal[1]);
- alloc_mode_context(cm, num_4x4_blk/2, &tree->vertical[1]);
+ alloc_mode_context(cm, num_4x4_blk / 2, &tree->horizontal[1]);
+ alloc_mode_context(cm, num_4x4_blk / 2, &tree->vertical[1]);
} else {
memset(&tree->horizontal[1], 0, sizeof(tree->horizontal[1]));
memset(&tree->vertical[1], 0, sizeof(tree->vertical[1]));
@@ -106,19 +102,18 @@
int nodes;
vpx_free(td->leaf_tree);
- CHECK_MEM_ERROR(cm, td->leaf_tree, vpx_calloc(leaf_nodes,
- sizeof(*td->leaf_tree)));
+ CHECK_MEM_ERROR(cm, td->leaf_tree,
+ vpx_calloc(leaf_nodes, sizeof(*td->leaf_tree)));
vpx_free(td->pc_tree);
- CHECK_MEM_ERROR(cm, td->pc_tree, vpx_calloc(tree_nodes,
- sizeof(*td->pc_tree)));
+ CHECK_MEM_ERROR(cm, td->pc_tree,
+ vpx_calloc(tree_nodes, sizeof(*td->pc_tree)));
this_pc = &td->pc_tree[0];
this_leaf = &td->leaf_tree[0];
// 4x4 blocks smaller than 8x8 but in the same 8x8 block share the same
// context so we only need to allocate 1 for each 8x8 block.
- for (i = 0; i < leaf_nodes; ++i)
- alloc_mode_context(cm, 1, &td->leaf_tree[i]);
+ for (i = 0; i < leaf_nodes; ++i) alloc_mode_context(cm, 1, &td->leaf_tree[i]);
// Sets up all the leaf nodes in the tree.
for (pc_tree_index = 0; pc_tree_index < leaf_nodes; ++pc_tree_index) {
@@ -126,8 +121,7 @@
tree->block_size = square[0];
alloc_tree_contexts(cm, tree, 4);
tree->leaf_split[0] = this_leaf++;
- for (j = 1; j < 4; j++)
- tree->leaf_split[j] = tree->leaf_split[0];
+ for (j = 1; j < 4; j++) tree->leaf_split[j] = tree->leaf_split[0];
}
// Each node has 4 leaf nodes, fill each block_size level of the tree
@@ -137,8 +131,7 @@
PC_TREE *const tree = &td->pc_tree[pc_tree_index];
alloc_tree_contexts(cm, tree, 4 << (2 * square_index));
tree->block_size = square[square_index];
- for (j = 0; j < 4; j++)
- tree->split[j] = this_pc++;
+ for (j = 0; j < 4; j++) tree->split[j] = this_pc++;
++pc_tree_index;
}
++square_index;
@@ -152,12 +145,10 @@
int i;
// Set up all 4x4 mode contexts
- for (i = 0; i < 64; ++i)
- free_mode_context(&td->leaf_tree[i]);
+ for (i = 0; i < 64; ++i) free_mode_context(&td->leaf_tree[i]);
// Sets up all the leaf nodes in the tree.
- for (i = 0; i < tree_nodes; ++i)
- free_tree_contexts(&td->pc_tree[i]);
+ for (i = 0; i < tree_nodes; ++i) free_tree_contexts(&td->pc_tree[i]);
vpx_free(td->pc_tree);
td->pc_tree = NULL;
diff --git a/vp10/encoder/cost.c b/vp10/encoder/cost.c
index aab8263..a4cd43a 100644
--- a/vp10/encoder/cost.c
+++ b/vp10/encoder/cost.c
@@ -12,31 +12,30 @@
#include "vp10/encoder/cost.h"
const unsigned int vp10_prob_cost[256] = {
- 2047, 2047, 1791, 1641, 1535, 1452, 1385, 1328, 1279, 1235, 1196, 1161,
- 1129, 1099, 1072, 1046, 1023, 1000, 979, 959, 940, 922, 905, 889,
- 873, 858, 843, 829, 816, 803, 790, 778, 767, 755, 744, 733,
- 723, 713, 703, 693, 684, 675, 666, 657, 649, 641, 633, 625,
- 617, 609, 602, 594, 587, 580, 573, 567, 560, 553, 547, 541,
- 534, 528, 522, 516, 511, 505, 499, 494, 488, 483, 477, 472,
- 467, 462, 457, 452, 447, 442, 437, 433, 428, 424, 419, 415,
- 410, 406, 401, 397, 393, 389, 385, 381, 377, 373, 369, 365,
- 361, 357, 353, 349, 346, 342, 338, 335, 331, 328, 324, 321,
- 317, 314, 311, 307, 304, 301, 297, 294, 291, 288, 285, 281,
- 278, 275, 272, 269, 266, 263, 260, 257, 255, 252, 249, 246,
- 243, 240, 238, 235, 232, 229, 227, 224, 221, 219, 216, 214,
- 211, 208, 206, 203, 201, 198, 196, 194, 191, 189, 186, 184,
- 181, 179, 177, 174, 172, 170, 168, 165, 163, 161, 159, 156,
- 154, 152, 150, 148, 145, 143, 141, 139, 137, 135, 133, 131,
- 129, 127, 125, 123, 121, 119, 117, 115, 113, 111, 109, 107,
- 105, 103, 101, 99, 97, 95, 93, 92, 90, 88, 86, 84,
- 82, 81, 79, 77, 75, 73, 72, 70, 68, 66, 65, 63,
- 61, 60, 58, 56, 55, 53, 51, 50, 48, 46, 45, 43,
- 41, 40, 38, 37, 35, 33, 32, 30, 29, 27, 25, 24,
- 22, 21, 19, 18, 16, 15, 13, 12, 10, 9, 7, 6,
- 4, 3, 1, 1};
+ 2047, 2047, 1791, 1641, 1535, 1452, 1385, 1328, 1279, 1235, 1196, 1161, 1129,
+ 1099, 1072, 1046, 1023, 1000, 979, 959, 940, 922, 905, 889, 873, 858,
+ 843, 829, 816, 803, 790, 778, 767, 755, 744, 733, 723, 713, 703,
+ 693, 684, 675, 666, 657, 649, 641, 633, 625, 617, 609, 602, 594,
+ 587, 580, 573, 567, 560, 553, 547, 541, 534, 528, 522, 516, 511,
+ 505, 499, 494, 488, 483, 477, 472, 467, 462, 457, 452, 447, 442,
+ 437, 433, 428, 424, 419, 415, 410, 406, 401, 397, 393, 389, 385,
+ 381, 377, 373, 369, 365, 361, 357, 353, 349, 346, 342, 338, 335,
+ 331, 328, 324, 321, 317, 314, 311, 307, 304, 301, 297, 294, 291,
+ 288, 285, 281, 278, 275, 272, 269, 266, 263, 260, 257, 255, 252,
+ 249, 246, 243, 240, 238, 235, 232, 229, 227, 224, 221, 219, 216,
+ 214, 211, 208, 206, 203, 201, 198, 196, 194, 191, 189, 186, 184,
+ 181, 179, 177, 174, 172, 170, 168, 165, 163, 161, 159, 156, 154,
+ 152, 150, 148, 145, 143, 141, 139, 137, 135, 133, 131, 129, 127,
+ 125, 123, 121, 119, 117, 115, 113, 111, 109, 107, 105, 103, 101,
+ 99, 97, 95, 93, 92, 90, 88, 86, 84, 82, 81, 79, 77,
+ 75, 73, 72, 70, 68, 66, 65, 63, 61, 60, 58, 56, 55,
+ 53, 51, 50, 48, 46, 45, 43, 41, 40, 38, 37, 35, 33,
+ 32, 30, 29, 27, 25, 24, 22, 21, 19, 18, 16, 15, 13,
+ 12, 10, 9, 7, 6, 4, 3, 1, 1
+};
-static void cost(int *costs, vpx_tree tree, const vpx_prob *probs,
- int i, int c) {
+static void cost(int *costs, vpx_tree tree, const vpx_prob *probs, int i,
+ int c) {
const vpx_prob prob = probs[i / 2];
int b;
diff --git a/vp10/encoder/cost.h b/vp10/encoder/cost.h
index b9619c6..702f0a4 100644
--- a/vp10/encoder/cost.h
+++ b/vp10/encoder/cost.h
@@ -23,16 +23,16 @@
#define vp10_cost_one(prob) vp10_cost_zero(vpx_complement(prob))
-#define vp10_cost_bit(prob, bit) vp10_cost_zero((bit) ? vpx_complement(prob) \
- : (prob))
+#define vp10_cost_bit(prob, bit) \
+ vp10_cost_zero((bit) ? vpx_complement(prob) : (prob))
static INLINE unsigned int cost_branch256(const unsigned int ct[2],
vpx_prob p) {
return ct[0] * vp10_cost_zero(p) + ct[1] * vp10_cost_one(p);
}
-static INLINE int treed_cost(vpx_tree tree, const vpx_prob *probs,
- int bits, int len) {
+static INLINE int treed_cost(vpx_tree tree, const vpx_prob *probs, int bits,
+ int len) {
int cost = 0;
vpx_tree_index i = 0;
diff --git a/vp10/encoder/dct.c b/vp10/encoder/dct.c
index 6fb7870..2a7ba7e 100644
--- a/vp10/encoder/dct.c
+++ b/vp10/encoder/dct.c
@@ -165,11 +165,11 @@
input[3] = in[3] + in[12];
input[4] = in[4] + in[11];
input[5] = in[5] + in[10];
- input[6] = in[6] + in[ 9];
- input[7] = in[7] + in[ 8];
+ input[6] = in[6] + in[9];
+ input[7] = in[7] + in[8];
- step1[0] = in[7] - in[ 8];
- step1[1] = in[6] - in[ 9];
+ step1[0] = in[7] - in[8];
+ step1[1] = in[6] - in[9];
step1[2] = in[5] - in[10];
step1[3] = in[4] - in[11];
step1[4] = in[3] - in[12];
@@ -292,7 +292,6 @@
out[15] = (tran_low_t)fdct_round_shift(temp2);
}
-
/* TODO(angiebird): Unify this with vp10_fwd_txfm.c: vp10_fdct32
static void fdct32(const tran_low_t *input, tran_low_t *output) {
tran_high_t temp;
@@ -746,14 +745,14 @@
tran_high_t x7 = input[6];
// stage 1
- s0 = cospi_2_64 * x0 + cospi_30_64 * x1;
- s1 = cospi_30_64 * x0 - cospi_2_64 * x1;
+ s0 = cospi_2_64 * x0 + cospi_30_64 * x1;
+ s1 = cospi_30_64 * x0 - cospi_2_64 * x1;
s2 = cospi_10_64 * x2 + cospi_22_64 * x3;
s3 = cospi_22_64 * x2 - cospi_10_64 * x3;
s4 = cospi_18_64 * x4 + cospi_14_64 * x5;
s5 = cospi_14_64 * x4 - cospi_18_64 * x5;
- s6 = cospi_26_64 * x6 + cospi_6_64 * x7;
- s7 = cospi_6_64 * x6 - cospi_26_64 * x7;
+ s6 = cospi_26_64 * x6 + cospi_6_64 * x7;
+ s7 = cospi_6_64 * x6 - cospi_26_64 * x7;
x0 = fdct_round_shift(s0 + s4);
x1 = fdct_round_shift(s1 + s5);
@@ -769,10 +768,10 @@
s1 = x1;
s2 = x2;
s3 = x3;
- s4 = cospi_8_64 * x4 + cospi_24_64 * x5;
- s5 = cospi_24_64 * x4 - cospi_8_64 * x5;
- s6 = - cospi_24_64 * x6 + cospi_8_64 * x7;
- s7 = cospi_8_64 * x6 + cospi_24_64 * x7;
+ s4 = cospi_8_64 * x4 + cospi_24_64 * x5;
+ s5 = cospi_24_64 * x4 - cospi_8_64 * x5;
+ s6 = -cospi_24_64 * x6 + cospi_8_64 * x7;
+ s7 = cospi_8_64 * x6 + cospi_24_64 * x7;
x0 = s0 + s2;
x1 = s1 + s3;
@@ -826,11 +825,11 @@
tran_high_t x15 = input[14];
// stage 1
- s0 = x0 * cospi_1_64 + x1 * cospi_31_64;
+ s0 = x0 * cospi_1_64 + x1 * cospi_31_64;
s1 = x0 * cospi_31_64 - x1 * cospi_1_64;
- s2 = x2 * cospi_5_64 + x3 * cospi_27_64;
+ s2 = x2 * cospi_5_64 + x3 * cospi_27_64;
s3 = x2 * cospi_27_64 - x3 * cospi_5_64;
- s4 = x4 * cospi_9_64 + x5 * cospi_23_64;
+ s4 = x4 * cospi_9_64 + x5 * cospi_23_64;
s5 = x4 * cospi_23_64 - x5 * cospi_9_64;
s6 = x6 * cospi_13_64 + x7 * cospi_19_64;
s7 = x6 * cospi_19_64 - x7 * cospi_13_64;
@@ -839,9 +838,9 @@
s10 = x10 * cospi_21_64 + x11 * cospi_11_64;
s11 = x10 * cospi_11_64 - x11 * cospi_21_64;
s12 = x12 * cospi_25_64 + x13 * cospi_7_64;
- s13 = x12 * cospi_7_64 - x13 * cospi_25_64;
+ s13 = x12 * cospi_7_64 - x13 * cospi_25_64;
s14 = x14 * cospi_29_64 + x15 * cospi_3_64;
- s15 = x14 * cospi_3_64 - x15 * cospi_29_64;
+ s15 = x14 * cospi_3_64 - x15 * cospi_29_64;
x0 = fdct_round_shift(s0 + s8);
x1 = fdct_round_shift(s1 + s9);
@@ -851,8 +850,8 @@
x5 = fdct_round_shift(s5 + s13);
x6 = fdct_round_shift(s6 + s14);
x7 = fdct_round_shift(s7 + s15);
- x8 = fdct_round_shift(s0 - s8);
- x9 = fdct_round_shift(s1 - s9);
+ x8 = fdct_round_shift(s0 - s8);
+ x9 = fdct_round_shift(s1 - s9);
x10 = fdct_round_shift(s2 - s10);
x11 = fdct_round_shift(s3 - s11);
x12 = fdct_round_shift(s4 - s12);
@@ -869,14 +868,14 @@
s5 = x5;
s6 = x6;
s7 = x7;
- s8 = x8 * cospi_4_64 + x9 * cospi_28_64;
- s9 = x8 * cospi_28_64 - x9 * cospi_4_64;
- s10 = x10 * cospi_20_64 + x11 * cospi_12_64;
- s11 = x10 * cospi_12_64 - x11 * cospi_20_64;
- s12 = - x12 * cospi_28_64 + x13 * cospi_4_64;
- s13 = x12 * cospi_4_64 + x13 * cospi_28_64;
- s14 = - x14 * cospi_12_64 + x15 * cospi_20_64;
- s15 = x14 * cospi_20_64 + x15 * cospi_12_64;
+ s8 = x8 * cospi_4_64 + x9 * cospi_28_64;
+ s9 = x8 * cospi_28_64 - x9 * cospi_4_64;
+ s10 = x10 * cospi_20_64 + x11 * cospi_12_64;
+ s11 = x10 * cospi_12_64 - x11 * cospi_20_64;
+ s12 = -x12 * cospi_28_64 + x13 * cospi_4_64;
+ s13 = x12 * cospi_4_64 + x13 * cospi_28_64;
+ s14 = -x14 * cospi_12_64 + x15 * cospi_20_64;
+ s15 = x14 * cospi_20_64 + x15 * cospi_12_64;
x0 = s0 + s4;
x1 = s1 + s5;
@@ -900,18 +899,18 @@
s1 = x1;
s2 = x2;
s3 = x3;
- s4 = x4 * cospi_8_64 + x5 * cospi_24_64;
+ s4 = x4 * cospi_8_64 + x5 * cospi_24_64;
s5 = x4 * cospi_24_64 - x5 * cospi_8_64;
- s6 = - x6 * cospi_24_64 + x7 * cospi_8_64;
- s7 = x6 * cospi_8_64 + x7 * cospi_24_64;
+ s6 = -x6 * cospi_24_64 + x7 * cospi_8_64;
+ s7 = x6 * cospi_8_64 + x7 * cospi_24_64;
s8 = x8;
s9 = x9;
s10 = x10;
s11 = x11;
- s12 = x12 * cospi_8_64 + x13 * cospi_24_64;
+ s12 = x12 * cospi_8_64 + x13 * cospi_24_64;
s13 = x12 * cospi_24_64 - x13 * cospi_8_64;
- s14 = - x14 * cospi_24_64 + x15 * cospi_8_64;
- s15 = x14 * cospi_8_64 + x15 * cospi_24_64;
+ s14 = -x14 * cospi_24_64 + x15 * cospi_8_64;
+ s15 = x14 * cospi_8_64 + x15 * cospi_24_64;
x0 = s0 + s2;
x1 = s1 + s3;
@@ -931,13 +930,13 @@
x15 = fdct_round_shift(s13 - s15);
// stage 4
- s2 = (- cospi_16_64) * (x2 + x3);
+ s2 = (-cospi_16_64) * (x2 + x3);
s3 = cospi_16_64 * (x2 - x3);
s6 = cospi_16_64 * (x6 + x7);
- s7 = cospi_16_64 * (- x6 + x7);
+ s7 = cospi_16_64 * (-x6 + x7);
s10 = cospi_16_64 * (x10 + x11);
- s11 = cospi_16_64 * (- x10 + x11);
- s14 = (- cospi_16_64) * (x14 + x15);
+ s11 = cospi_16_64 * (-x10 + x11);
+ s14 = (-cospi_16_64) * (x14 + x15);
s15 = cospi_16_64 * (x14 - x15);
x2 = fdct_round_shift(s2);
@@ -968,28 +967,28 @@
}
static const transform_2d FHT_4[] = {
- { fdct4, fdct4 }, // DCT_DCT = 0
- { fadst4, fdct4 }, // ADST_DCT = 1
- { fdct4, fadst4 }, // DCT_ADST = 2
- { fadst4, fadst4 } // ADST_ADST = 3
+ { fdct4, fdct4 }, // DCT_DCT = 0
+ { fadst4, fdct4 }, // ADST_DCT = 1
+ { fdct4, fadst4 }, // DCT_ADST = 2
+ { fadst4, fadst4 } // ADST_ADST = 3
};
static const transform_2d FHT_8[] = {
- { fdct8, fdct8 }, // DCT_DCT = 0
- { fadst8, fdct8 }, // ADST_DCT = 1
- { fdct8, fadst8 }, // DCT_ADST = 2
- { fadst8, fadst8 } // ADST_ADST = 3
+ { fdct8, fdct8 }, // DCT_DCT = 0
+ { fadst8, fdct8 }, // ADST_DCT = 1
+ { fdct8, fadst8 }, // DCT_ADST = 2
+ { fadst8, fadst8 } // ADST_ADST = 3
};
static const transform_2d FHT_16[] = {
- { fdct16, fdct16 }, // DCT_DCT = 0
- { fadst16, fdct16 }, // ADST_DCT = 1
- { fdct16, fadst16 }, // DCT_ADST = 2
- { fadst16, fadst16 } // ADST_ADST = 3
+ { fdct16, fdct16 }, // DCT_DCT = 0
+ { fadst16, fdct16 }, // ADST_DCT = 1
+ { fdct16, fadst16 }, // DCT_ADST = 2
+ { fadst16, fadst16 } // ADST_ADST = 3
};
-void vp10_fht4x4_c(const int16_t *input, tran_low_t *output,
- int stride, int tx_type) {
+void vp10_fht4x4_c(const int16_t *input, tran_low_t *output, int stride,
+ int tx_type) {
if (tx_type == DCT_DCT) {
vpx_fdct4x4_c(input, output, stride);
} else {
@@ -1000,36 +999,29 @@
// Columns
for (i = 0; i < 4; ++i) {
- for (j = 0; j < 4; ++j)
- temp_in[j] = input[j * stride + i] * 16;
- if (i == 0 && temp_in[0])
- temp_in[0] += 1;
+ for (j = 0; j < 4; ++j) temp_in[j] = input[j * stride + i] * 16;
+ if (i == 0 && temp_in[0]) temp_in[0] += 1;
ht.cols(temp_in, temp_out);
- for (j = 0; j < 4; ++j)
- out[j * 4 + i] = temp_out[j];
+ for (j = 0; j < 4; ++j) out[j * 4 + i] = temp_out[j];
}
// Rows
for (i = 0; i < 4; ++i) {
- for (j = 0; j < 4; ++j)
- temp_in[j] = out[j + i * 4];
+ for (j = 0; j < 4; ++j) temp_in[j] = out[j + i * 4];
ht.rows(temp_in, temp_out);
- for (j = 0; j < 4; ++j)
- output[j + i * 4] = (temp_out[j] + 1) >> 2;
+ for (j = 0; j < 4; ++j) output[j + i * 4] = (temp_out[j] + 1) >> 2;
}
}
}
void vp10_fdct8x8_quant_c(const int16_t *input, int stride,
- tran_low_t *coeff_ptr, intptr_t n_coeffs,
- int skip_block,
- const int16_t *zbin_ptr, const int16_t *round_ptr,
- const int16_t *quant_ptr,
- const int16_t *quant_shift_ptr,
- tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
- const int16_t *dequant_ptr,
- uint16_t *eob_ptr,
- const int16_t *scan, const int16_t *iscan) {
+ tran_low_t *coeff_ptr, intptr_t n_coeffs,
+ int skip_block, const int16_t *zbin_ptr,
+ const int16_t *round_ptr, const int16_t *quant_ptr,
+ const int16_t *quant_shift_ptr,
+ tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
+ const int16_t *dequant_ptr, uint16_t *eob_ptr,
+ const int16_t *scan, const int16_t *iscan) {
int eob = -1;
int i, j;
@@ -1061,8 +1053,8 @@
x3 = s0 - s3;
t0 = (x0 + x1) * cospi_16_64;
t1 = (x0 - x1) * cospi_16_64;
- t2 = x2 * cospi_24_64 + x3 * cospi_8_64;
- t3 = -x2 * cospi_8_64 + x3 * cospi_24_64;
+ t2 = x2 * cospi_24_64 + x3 * cospi_8_64;
+ t3 = -x2 * cospi_8_64 + x3 * cospi_24_64;
output[0 * 8] = (tran_low_t)fdct_round_shift(t0);
output[2 * 8] = (tran_low_t)fdct_round_shift(t2);
output[4 * 8] = (tran_low_t)fdct_round_shift(t1);
@@ -1081,10 +1073,10 @@
x3 = s7 + t3;
// stage 4
- t0 = x0 * cospi_28_64 + x3 * cospi_4_64;
- t1 = x1 * cospi_12_64 + x2 * cospi_20_64;
+ t0 = x0 * cospi_28_64 + x3 * cospi_4_64;
+ t1 = x1 * cospi_12_64 + x2 * cospi_20_64;
t2 = x2 * cospi_12_64 + x1 * -cospi_20_64;
- t3 = x3 * cospi_28_64 + x0 * -cospi_4_64;
+ t3 = x3 * cospi_28_64 + x0 * -cospi_4_64;
output[1 * 8] = (tran_low_t)fdct_round_shift(t0);
output[3 * 8] = (tran_low_t)fdct_round_shift(t2);
output[5 * 8] = (tran_low_t)fdct_round_shift(t1);
@@ -1097,8 +1089,7 @@
// Rows
for (i = 0; i < 8; ++i) {
fdct8(&intermediate[i * 8], &coeff_ptr[i * 8]);
- for (j = 0; j < 8; ++j)
- coeff_ptr[j + i * 8] /= 2;
+ for (j = 0; j < 8; ++j) coeff_ptr[j + i * 8] /= 2;
}
// TODO(jingning) Decide the need of these arguments after the
@@ -1125,15 +1116,14 @@
qcoeff_ptr[rc] = (tmp ^ coeff_sign) - coeff_sign;
dqcoeff_ptr[rc] = qcoeff_ptr[rc] * dequant_ptr[rc != 0];
- if (tmp)
- eob = i;
+ if (tmp) eob = i;
}
}
*eob_ptr = eob + 1;
}
-void vp10_fht8x8_c(const int16_t *input, tran_low_t *output,
- int stride, int tx_type) {
+void vp10_fht8x8_c(const int16_t *input, tran_low_t *output, int stride,
+ int tx_type) {
if (tx_type == DCT_DCT) {
vpx_fdct8x8_c(input, output, stride);
} else {
@@ -1144,17 +1134,14 @@
// Columns
for (i = 0; i < 8; ++i) {
- for (j = 0; j < 8; ++j)
- temp_in[j] = input[j * stride + i] * 4;
+ for (j = 0; j < 8; ++j) temp_in[j] = input[j * stride + i] * 4;
ht.cols(temp_in, temp_out);
- for (j = 0; j < 8; ++j)
- out[j * 8 + i] = temp_out[j];
+ for (j = 0; j < 8; ++j) out[j * 8 + i] = temp_out[j];
}
// Rows
for (i = 0; i < 8; ++i) {
- for (j = 0; j < 8; ++j)
- temp_in[j] = out[j + i * 8];
+ for (j = 0; j < 8; ++j) temp_in[j] = out[j + i * 8];
ht.rows(temp_in, temp_out);
for (j = 0; j < 8; ++j)
output[j + i * 8] = (temp_out[j] + (temp_out[j] < 0)) >> 1;
@@ -1218,8 +1205,8 @@
}
}
-void vp10_fht16x16_c(const int16_t *input, tran_low_t *output,
- int stride, int tx_type) {
+void vp10_fht16x16_c(const int16_t *input, tran_low_t *output, int stride,
+ int tx_type) {
if (tx_type == DCT_DCT) {
vpx_fdct16x16_c(input, output, stride);
} else {
@@ -1230,8 +1217,7 @@
// Columns
for (i = 0; i < 16; ++i) {
- for (j = 0; j < 16; ++j)
- temp_in[j] = input[j * stride + i] * 4;
+ for (j = 0; j < 16; ++j) temp_in[j] = input[j * stride + i] * 4;
ht.cols(temp_in, temp_out);
for (j = 0; j < 16; ++j)
out[j * 16 + i] = (temp_out[j] + 1 + (temp_out[j] < 0)) >> 2;
@@ -1239,33 +1225,31 @@
// Rows
for (i = 0; i < 16; ++i) {
- for (j = 0; j < 16; ++j)
- temp_in[j] = out[j + i * 16];
+ for (j = 0; j < 16; ++j) temp_in[j] = out[j + i * 16];
ht.rows(temp_in, temp_out);
- for (j = 0; j < 16; ++j)
- output[j + i * 16] = temp_out[j];
+ for (j = 0; j < 16; ++j) output[j + i * 16] = temp_out[j];
}
}
}
#if CONFIG_VPX_HIGHBITDEPTH
-void vp10_highbd_fht4x4_c(const int16_t *input, tran_low_t *output,
- int stride, int tx_type) {
+void vp10_highbd_fht4x4_c(const int16_t *input, tran_low_t *output, int stride,
+ int tx_type) {
vp10_fht4x4_c(input, output, stride, tx_type);
}
-void vp10_highbd_fht8x8_c(const int16_t *input, tran_low_t *output,
- int stride, int tx_type) {
+void vp10_highbd_fht8x8_c(const int16_t *input, tran_low_t *output, int stride,
+ int tx_type) {
vp10_fht8x8_c(input, output, stride, tx_type);
}
void vp10_highbd_fwht4x4_c(const int16_t *input, tran_low_t *output,
- int stride) {
+ int stride) {
vp10_fwht4x4_c(input, output, stride);
}
void vp10_highbd_fht16x16_c(const int16_t *input, tran_low_t *output,
- int stride, int tx_type) {
+ int stride, int tx_type) {
vp10_fht16x16_c(input, output, stride, tx_type);
}
#endif // CONFIG_VPX_HIGHBITDEPTH
diff --git a/vp10/encoder/encodeframe.c b/vp10/encoder/encodeframe.c
index 99c8d79..f9fa104 100644
--- a/vp10/encoder/encodeframe.c
+++ b/vp10/encoder/encodeframe.c
@@ -46,90 +46,86 @@
#include "vp10/encoder/segmentation.h"
#include "vp10/encoder/tokenize.h"
-static void encode_superblock(VP10_COMP *cpi, ThreadData * td,
- TOKENEXTRA **t, int output_enabled,
- int mi_row, int mi_col, BLOCK_SIZE bsize,
- PICK_MODE_CONTEXT *ctx);
+static void encode_superblock(VP10_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
+ int output_enabled, int mi_row, int mi_col,
+ BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx);
// This is used as a reference when computing the source variance for the
// purposes of activity masking.
// Eventually this should be replaced by custom no-reference routines,
// which will be faster.
static const uint8_t VP9_VAR_OFFS[64] = {
- 128, 128, 128, 128, 128, 128, 128, 128,
- 128, 128, 128, 128, 128, 128, 128, 128,
- 128, 128, 128, 128, 128, 128, 128, 128,
- 128, 128, 128, 128, 128, 128, 128, 128,
- 128, 128, 128, 128, 128, 128, 128, 128,
- 128, 128, 128, 128, 128, 128, 128, 128,
- 128, 128, 128, 128, 128, 128, 128, 128,
- 128, 128, 128, 128, 128, 128, 128, 128
+ 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128
};
#if CONFIG_VPX_HIGHBITDEPTH
static const uint16_t VP9_HIGH_VAR_OFFS_8[64] = {
- 128, 128, 128, 128, 128, 128, 128, 128,
- 128, 128, 128, 128, 128, 128, 128, 128,
- 128, 128, 128, 128, 128, 128, 128, 128,
- 128, 128, 128, 128, 128, 128, 128, 128,
- 128, 128, 128, 128, 128, 128, 128, 128,
- 128, 128, 128, 128, 128, 128, 128, 128,
- 128, 128, 128, 128, 128, 128, 128, 128,
- 128, 128, 128, 128, 128, 128, 128, 128
+ 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128
};
static const uint16_t VP9_HIGH_VAR_OFFS_10[64] = {
- 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
- 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
- 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
- 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
- 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
- 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
- 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
- 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4
+ 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
+ 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
+ 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
+ 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
+ 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
+ 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
+ 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
+ 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4
};
static const uint16_t VP9_HIGH_VAR_OFFS_12[64] = {
- 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
- 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
- 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
- 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
- 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
- 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
- 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
- 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16
+ 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
+ 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
+ 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
+ 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
+ 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
+ 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
+ 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
+ 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
+ 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
+ 128 * 16
};
#endif // CONFIG_VPX_HIGHBITDEPTH
unsigned int vp10_get_sby_perpixel_variance(VP10_COMP *cpi,
- const struct buf_2d *ref,
- BLOCK_SIZE bs) {
+ const struct buf_2d *ref,
+ BLOCK_SIZE bs) {
unsigned int sse;
- const unsigned int var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
- VP9_VAR_OFFS, 0, &sse);
+ const unsigned int var =
+ cpi->fn_ptr[bs].vf(ref->buf, ref->stride, VP9_VAR_OFFS, 0, &sse);
return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]);
}
#if CONFIG_VPX_HIGHBITDEPTH
-unsigned int vp10_high_get_sby_perpixel_variance(
- VP10_COMP *cpi, const struct buf_2d *ref, BLOCK_SIZE bs, int bd) {
+unsigned int vp10_high_get_sby_perpixel_variance(VP10_COMP *cpi,
+ const struct buf_2d *ref,
+ BLOCK_SIZE bs, int bd) {
unsigned int var, sse;
switch (bd) {
case 10:
- var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
- CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_10),
- 0, &sse);
+ var =
+ cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
+ CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_10), 0, &sse);
break;
case 12:
- var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
- CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_12),
- 0, &sse);
+ var =
+ cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
+ CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_12), 0, &sse);
break;
case 8:
default:
- var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
- CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_8),
- 0, &sse);
+ var =
+ cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
+ CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_8), 0, &sse);
break;
}
return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]);
@@ -152,12 +148,10 @@
}
static BLOCK_SIZE get_rd_var_based_fixed_partition(VP10_COMP *cpi,
- MACROBLOCK *x,
- int mi_row,
+ MACROBLOCK *x, int mi_row,
int mi_col) {
- unsigned int var = get_sby_perpixel_diff_variance(cpi, &x->plane[0].src,
- mi_row, mi_col,
- BLOCK_64X64);
+ unsigned int var = get_sby_perpixel_diff_variance(
+ cpi, &x->plane[0].src, mi_row, mi_col, BLOCK_64X64);
if (var < 8)
return BLOCK_64X64;
else if (var < 128)
@@ -172,8 +166,7 @@
// pointers.
static INLINE void set_mode_info_offsets(VP10_COMP *const cpi,
MACROBLOCK *const x,
- MACROBLOCKD *const xd,
- int mi_row,
+ MACROBLOCKD *const xd, int mi_row,
int mi_col) {
VP10_COMMON *const cm = &cpi->common;
const int idx_str = xd->mi_stride * mi_row + mi_col;
@@ -210,8 +203,8 @@
// Set up distance of MB to edge of frame in 1/8th pel units.
assert(!(mi_col & (mi_width - 1)) && !(mi_row & (mi_height - 1)));
- set_mi_row_col(xd, tile, mi_row, mi_height, mi_col, mi_width,
- cm->mi_rows, cm->mi_cols);
+ set_mi_row_col(xd, tile, mi_row, mi_height, mi_col, mi_width, cm->mi_rows,
+ cm->mi_cols);
// Set up source buffers.
vp10_setup_src_planes(x, cpi->Source, mi_row, mi_col);
@@ -223,8 +216,8 @@
// Setup segment ID.
if (seg->enabled) {
if (cpi->oxcf.aq_mode != VARIANCE_AQ) {
- const uint8_t *const map = seg->update_map ? cpi->segmentation_map
- : cm->last_frame_seg_map;
+ const uint8_t *const map =
+ seg->update_map ? cpi->segmentation_map : cm->last_frame_seg_map;
mbmi->segment_id = get_segment_id(cm, map, bsize, mi_row, mi_col);
}
vp10_init_plane_quantizers(cpi, x);
@@ -239,10 +232,8 @@
xd->tile = *tile;
}
-static void set_block_size(VP10_COMP * const cpi,
- MACROBLOCK *const x,
- MACROBLOCKD *const xd,
- int mi_row, int mi_col,
+static void set_block_size(VP10_COMP *const cpi, MACROBLOCK *const x,
+ MACROBLOCKD *const xd, int mi_row, int mi_col,
BLOCK_SIZE bsize) {
if (cpi->common.mi_cols > mi_col && cpi->common.mi_rows > mi_row) {
set_mode_info_offsets(cpi, x, xd, mi_row, mi_col);
@@ -304,38 +295,37 @@
node->part_variances = NULL;
switch (bsize) {
case BLOCK_64X64: {
- v64x64 *vt = (v64x64 *) data;
+ v64x64 *vt = (v64x64 *)data;
node->part_variances = &vt->part_variances;
for (i = 0; i < 4; i++)
node->split[i] = &vt->split[i].part_variances.none;
break;
}
case BLOCK_32X32: {
- v32x32 *vt = (v32x32 *) data;
+ v32x32 *vt = (v32x32 *)data;
node->part_variances = &vt->part_variances;
for (i = 0; i < 4; i++)
node->split[i] = &vt->split[i].part_variances.none;
break;
}
case BLOCK_16X16: {
- v16x16 *vt = (v16x16 *) data;
+ v16x16 *vt = (v16x16 *)data;
node->part_variances = &vt->part_variances;
for (i = 0; i < 4; i++)
node->split[i] = &vt->split[i].part_variances.none;
break;
}
case BLOCK_8X8: {
- v8x8 *vt = (v8x8 *) data;
+ v8x8 *vt = (v8x8 *)data;
node->part_variances = &vt->part_variances;
for (i = 0; i < 4; i++)
node->split[i] = &vt->split[i].part_variances.none;
break;
}
case BLOCK_4X4: {
- v4x4 *vt = (v4x4 *) data;
+ v4x4 *vt = (v4x4 *)data;
node->part_variances = &vt->part_variances;
- for (i = 0; i < 4; i++)
- node->split[i] = &vt->split[i];
+ for (i = 0; i < 4; i++) node->split[i] = &vt->split[i];
break;
}
default: {
@@ -353,8 +343,10 @@
}
static void get_variance(var *v) {
- v->variance = (int)(256 * (v->sum_square_error -
- ((v->sum_error * v->sum_error) >> v->log2_count)) >> v->log2_count);
+ v->variance =
+ (int)(256 * (v->sum_square_error -
+ ((v->sum_error * v->sum_error) >> v->log2_count)) >>
+ v->log2_count);
}
static void sum_2_variances(const var *a, const var *b, var *r) {
@@ -375,17 +367,12 @@
&node.part_variances->none);
}
-static int set_vt_partitioning(VP10_COMP *cpi,
- MACROBLOCK *const x,
- MACROBLOCKD *const xd,
- void *data,
- BLOCK_SIZE bsize,
- int mi_row,
- int mi_col,
- int64_t threshold,
- BLOCK_SIZE bsize_min,
+static int set_vt_partitioning(VP10_COMP *cpi, MACROBLOCK *const x,
+ MACROBLOCKD *const xd, void *data,
+ BLOCK_SIZE bsize, int mi_row, int mi_col,
+ int64_t threshold, BLOCK_SIZE bsize_min,
int force_split) {
- VP10_COMMON * const cm = &cpi->common;
+ VP10_COMMON *const cm = &cpi->common;
variance_node vt;
const int block_width = num_8x8_blocks_wide_lookup[bsize];
const int block_height = num_8x8_blocks_high_lookup[bsize];
@@ -394,8 +381,7 @@
assert(block_height == block_width);
tree_to_node(data, bsize, &vt);
- if (force_split == 1)
- return 0;
+ if (force_split == 1) return 0;
// For bsize=bsize_min (16x16/8x8 for 8x8/4x4 downsampling), select if
// variance is below threshold, otherwise split will be selected.
@@ -418,7 +404,7 @@
// For key frame: take split for bsize above 32X32 or very high variance.
if (cm->frame_type == KEY_FRAME &&
(bsize > BLOCK_32X32 ||
- vt.part_variances->none.variance > (threshold << 4))) {
+ vt.part_variances->none.variance > (threshold << 4))) {
return 0;
}
// If variance is low, take the bsize (no split).
@@ -469,8 +455,8 @@
VP10_COMMON *const cm = &cpi->common;
const int is_key_frame = (cm->frame_type == KEY_FRAME);
const int threshold_multiplier = is_key_frame ? 20 : 1;
- const int64_t threshold_base = (int64_t)(threshold_multiplier *
- cpi->y_dequant[q][1]);
+ const int64_t threshold_base =
+ (int64_t)(threshold_multiplier * cpi->y_dequant[q][1]);
if (is_key_frame) {
thresholds[0] = threshold_base;
thresholds[1] = threshold_base >> 2;
@@ -508,8 +494,9 @@
if (cm->width <= 352 && cm->height <= 288)
cpi->vbp_threshold_sad = 100;
else
- cpi->vbp_threshold_sad = (cpi->y_dequant[q][1] << 1) > 1000 ?
- (cpi->y_dequant[q][1] << 1) : 1000;
+ cpi->vbp_threshold_sad = (cpi->y_dequant[q][1] << 1) > 1000
+ ? (cpi->y_dequant[q][1] << 1)
+ : 1000;
cpi->vbp_bsize_min = BLOCK_16X16;
}
cpi->vbp_threshold_minmax = 15 + (q >> 3);
@@ -522,8 +509,7 @@
#if CONFIG_VPX_HIGHBITDEPTH
int highbd_flag,
#endif
- int pixels_wide,
- int pixels_high) {
+ int pixels_wide, int pixels_high) {
int k;
int minmax_max = 0;
int minmax_min = 255;
@@ -537,22 +523,17 @@
#if CONFIG_VPX_HIGHBITDEPTH
if (highbd_flag & YV12_FLAG_HIGHBITDEPTH) {
vpx_highbd_minmax_8x8(s + y8_idx * sp + x8_idx, sp,
- d + y8_idx * dp + x8_idx, dp,
- &min, &max);
+ d + y8_idx * dp + x8_idx, dp, &min, &max);
} else {
- vpx_minmax_8x8(s + y8_idx * sp + x8_idx, sp,
- d + y8_idx * dp + x8_idx, dp,
- &min, &max);
+ vpx_minmax_8x8(s + y8_idx * sp + x8_idx, sp, d + y8_idx * dp + x8_idx,
+ dp, &min, &max);
}
#else
- vpx_minmax_8x8(s + y8_idx * sp + x8_idx, sp,
- d + y8_idx * dp + x8_idx, dp,
+ vpx_minmax_8x8(s + y8_idx * sp + x8_idx, sp, d + y8_idx * dp + x8_idx, dp,
&min, &max);
#endif
- if ((max - min) > minmax_max)
- minmax_max = (max - min);
- if ((max - min) < minmax_min)
- minmax_min = (max - min);
+ if ((max - min) > minmax_max) minmax_max = (max - min);
+ if ((max - min) < minmax_min) minmax_min = (max - min);
}
}
return (minmax_max - minmax_min);
@@ -563,8 +544,7 @@
#if CONFIG_VPX_HIGHBITDEPTH
int highbd_flag,
#endif
- int pixels_wide,
- int pixels_high,
+ int pixels_wide, int pixels_high,
int is_key_frame) {
int k;
for (k = 0; k < 4; k++) {
@@ -582,13 +562,11 @@
d_avg = vpx_highbd_avg_4x4(d + y4_idx * dp + x4_idx, dp);
} else {
s_avg = vpx_avg_4x4(s + y4_idx * sp + x4_idx, sp);
- if (!is_key_frame)
- d_avg = vpx_avg_4x4(d + y4_idx * dp + x4_idx, dp);
+ if (!is_key_frame) d_avg = vpx_avg_4x4(d + y4_idx * dp + x4_idx, dp);
}
#else
s_avg = vpx_avg_4x4(s + y4_idx * sp + x4_idx, sp);
- if (!is_key_frame)
- d_avg = vpx_avg_4x4(d + y4_idx * dp + x4_idx, dp);
+ if (!is_key_frame) d_avg = vpx_avg_4x4(d + y4_idx * dp + x4_idx, dp);
#endif
sum = s_avg - d_avg;
sse = sum * sum;
@@ -602,8 +580,7 @@
#if CONFIG_VPX_HIGHBITDEPTH
int highbd_flag,
#endif
- int pixels_wide,
- int pixels_high,
+ int pixels_wide, int pixels_high,
int is_key_frame) {
int k;
for (k = 0; k < 4; k++) {
@@ -621,13 +598,11 @@
d_avg = vpx_highbd_avg_8x8(d + y8_idx * dp + x8_idx, dp);
} else {
s_avg = vpx_avg_8x8(s + y8_idx * sp + x8_idx, sp);
- if (!is_key_frame)
- d_avg = vpx_avg_8x8(d + y8_idx * dp + x8_idx, dp);
+ if (!is_key_frame) d_avg = vpx_avg_8x8(d + y8_idx * dp + x8_idx, dp);
}
#else
s_avg = vpx_avg_8x8(s + y8_idx * sp + x8_idx, sp);
- if (!is_key_frame)
- d_avg = vpx_avg_8x8(d + y8_idx * dp + x8_idx, dp);
+ if (!is_key_frame) d_avg = vpx_avg_8x8(d + y8_idx * dp + x8_idx, dp);
#endif
sum = s_avg - d_avg;
sse = sum * sum;
@@ -638,11 +613,9 @@
// This function chooses partitioning based on the variance between source and
// reconstructed last, where variance is computed for down-sampled inputs.
-static int choose_partitioning(VP10_COMP *cpi,
- const TileInfo *const tile,
- MACROBLOCK *x,
- int mi_row, int mi_col) {
- VP10_COMMON * const cm = &cpi->common;
+static int choose_partitioning(VP10_COMP *cpi, const TileInfo *const tile,
+ MACROBLOCK *x, int mi_row, int mi_col) {
+ VP10_COMMON *const cm = &cpi->common;
MACROBLOCKD *xd = &x->e_mbd;
int i, j, k, m;
v64x64 vt;
@@ -653,8 +626,8 @@
int sp;
int dp;
int pixels_wide = 64, pixels_high = 64;
- int64_t thresholds[4] = {cpi->vbp_thresholds[0], cpi->vbp_thresholds[1],
- cpi->vbp_thresholds[2], cpi->vbp_thresholds[3]};
+ int64_t thresholds[4] = { cpi->vbp_thresholds[0], cpi->vbp_thresholds[1],
+ cpi->vbp_thresholds[2], cpi->vbp_thresholds[3] };
// Always use 4x4 partition for key frame.
const int is_key_frame = (cm->frame_type == KEY_FRAME);
@@ -664,8 +637,8 @@
int segment_id = CR_SEGMENT_ID_BASE;
if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->seg.enabled) {
- const uint8_t *const map = cm->seg.update_map ? cpi->segmentation_map :
- cm->last_frame_seg_map;
+ const uint8_t *const map =
+ cm->seg.update_map ? cpi->segmentation_map : cm->last_frame_seg_map;
segment_id = get_segment_id(cm, map, BLOCK_64X64, mi_row, mi_col);
if (cyclic_refresh_segment_id_boosted(segment_id)) {
@@ -676,10 +649,8 @@
set_offsets(cpi, tile, x, mi_row, mi_col, BLOCK_64X64);
- if (xd->mb_to_right_edge < 0)
- pixels_wide += (xd->mb_to_right_edge >> 3);
- if (xd->mb_to_bottom_edge < 0)
- pixels_high += (xd->mb_to_bottom_edge >> 3);
+ if (xd->mb_to_right_edge < 0) pixels_wide += (xd->mb_to_right_edge >> 3);
+ if (xd->mb_to_bottom_edge < 0) pixels_high += (xd->mb_to_bottom_edge >> 3);
s = x->plane[0].src.buf;
sp = x->plane[0].src.stride;
@@ -691,25 +662,24 @@
const YV12_BUFFER_CONFIG *yv12_g = NULL;
unsigned int y_sad, y_sad_g;
- const BLOCK_SIZE bsize = BLOCK_32X32
- + (mi_col + 4 < cm->mi_cols) * 2 + (mi_row + 4 < cm->mi_rows);
+ const BLOCK_SIZE bsize = BLOCK_32X32 + (mi_col + 4 < cm->mi_cols) * 2 +
+ (mi_row + 4 < cm->mi_rows);
assert(yv12 != NULL);
yv12_g = get_ref_frame_buffer(cpi, GOLDEN_FRAME);
if (yv12_g && yv12_g != yv12) {
vp10_setup_pre_planes(xd, 0, yv12_g, mi_row, mi_col,
- &cm->frame_refs[GOLDEN_FRAME - 1].sf);
- y_sad_g = cpi->fn_ptr[bsize].sdf(x->plane[0].src.buf,
- x->plane[0].src.stride,
- xd->plane[0].pre[0].buf,
- xd->plane[0].pre[0].stride);
+ &cm->frame_refs[GOLDEN_FRAME - 1].sf);
+ y_sad_g = cpi->fn_ptr[bsize].sdf(
+ x->plane[0].src.buf, x->plane[0].src.stride, xd->plane[0].pre[0].buf,
+ xd->plane[0].pre[0].stride);
} else {
y_sad_g = UINT_MAX;
}
vp10_setup_pre_planes(xd, 0, yv12, mi_row, mi_col,
- &cm->frame_refs[LAST_FRAME - 1].sf);
+ &cm->frame_refs[LAST_FRAME - 1].sf);
mbmi->ref_frame[0] = LAST_FRAME;
mbmi->ref_frame[1] = NONE;
mbmi->sb_type = BLOCK_64X64;
@@ -719,7 +689,7 @@
y_sad = vp10_int_pro_motion_estimation(cpi, x, bsize, mi_row, mi_col);
if (y_sad_g < y_sad) {
vp10_setup_pre_planes(xd, 0, yv12_g, mi_row, mi_col,
- &cm->frame_refs[GOLDEN_FRAME - 1].sf);
+ &cm->frame_refs[GOLDEN_FRAME - 1].sf);
mbmi->ref_frame[0] = GOLDEN_FRAME;
mbmi->mv[0].as_int = 0;
y_sad = y_sad_g;
@@ -730,15 +700,15 @@
vp10_build_inter_predictors_sb(xd, mi_row, mi_col, BLOCK_64X64);
for (i = 1; i <= 2; ++i) {
- struct macroblock_plane *p = &x->plane[i];
+ struct macroblock_plane *p = &x->plane[i];
struct macroblockd_plane *pd = &xd->plane[i];
const BLOCK_SIZE bs = get_plane_block_size(bsize, pd);
if (bs == BLOCK_INVALID)
uv_sad = UINT_MAX;
else
- uv_sad = cpi->fn_ptr[bs].sdf(p->src.buf, p->src.stride,
- pd->dst.buf, pd->dst.stride);
+ uv_sad = cpi->fn_ptr[bs].sdf(p->src.buf, p->src.stride, pd->dst.buf,
+ pd->dst.stride);
x->color_sensitivity[i - 1] = uv_sad > (y_sad >> 2);
}
@@ -748,8 +718,7 @@
// If the y_sad is very small, take 64x64 as partition and exit.
// Don't check on boosted segment for now, as 64x64 is suppressed there.
- if (segment_id == CR_SEGMENT_ID_BASE &&
- y_sad < cpi->vbp_threshold_sad) {
+ if (segment_id == CR_SEGMENT_ID_BASE && y_sad < cpi->vbp_threshold_sad) {
const int block_width = num_8x8_blocks_wide_lookup[BLOCK_64X64];
const int block_height = num_8x8_blocks_high_lookup[BLOCK_64X64];
if (mi_col + block_width / 2 < cm->mi_cols &&
@@ -764,16 +733,10 @@
#if CONFIG_VPX_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
switch (xd->bd) {
- case 10:
- d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_10);
- break;
- case 12:
- d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_12);
- break;
+ case 10: d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_10); break;
+ case 12: d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_12); break;
case 8:
- default:
- d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_8);
- break;
+ default: d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_8); break;
}
}
#endif // CONFIG_VPX_HIGHBITDEPTH
@@ -799,22 +762,19 @@
if (!is_key_frame) {
fill_variance_8x8avg(s, sp, d, dp, x16_idx, y16_idx, vst,
#if CONFIG_VPX_HIGHBITDEPTH
- xd->cur_buf->flags,
+ xd->cur_buf->flags,
#endif
- pixels_wide,
- pixels_high,
- is_key_frame);
+ pixels_wide, pixels_high, is_key_frame);
fill_variance_tree(&vt.split[i].split[j], BLOCK_16X16);
get_variance(&vt.split[i].split[j].part_variances.none);
- if (vt.split[i].split[j].part_variances.none.variance >
- thresholds[2]) {
+ if (vt.split[i].split[j].part_variances.none.variance > thresholds[2]) {
// 16X16 variance is above threshold for split, so force split to 8x8
// for this 16x16 block (this also forces splits for upper levels).
force_split[split_index] = 1;
force_split[i + 1] = 1;
force_split[0] = 1;
} else if (vt.split[i].split[j].part_variances.none.variance >
- thresholds[1] &&
+ thresholds[1] &&
!cyclic_refresh_segment_id_boosted(segment_id)) {
// We have some nominal amount of 16x16 variance (based on average),
// compute the minmax over the 8x8 sub-blocks, and if above threshold,
@@ -832,23 +792,20 @@
}
}
if (is_key_frame || (low_res &&
- vt.split[i].split[j].part_variances.none.variance >
- (thresholds[1] << 1))) {
+ vt.split[i].split[j].part_variances.none.variance >
+ (thresholds[1] << 1))) {
force_split[split_index] = 0;
// Go down to 4x4 down-sampling for variance.
variance4x4downsample[i2 + j] = 1;
for (k = 0; k < 4; k++) {
int x8_idx = x16_idx + ((k & 1) << 3);
int y8_idx = y16_idx + ((k >> 1) << 3);
- v8x8 *vst2 = is_key_frame ? &vst->split[k] :
- &vt2[i2 + j].split[k];
+ v8x8 *vst2 = is_key_frame ? &vst->split[k] : &vt2[i2 + j].split[k];
fill_variance_4x4avg(s, sp, d, dp, x8_idx, y8_idx, vst2,
#if CONFIG_VPX_HIGHBITDEPTH
xd->cur_buf->flags,
#endif
- pixels_wide,
- pixels_high,
- is_key_frame);
+ pixels_wide, pixels_high, is_key_frame);
}
}
}
@@ -859,10 +816,8 @@
const int i2 = i << 2;
for (j = 0; j < 4; j++) {
if (variance4x4downsample[i2 + j] == 1) {
- v16x16 *vtemp = (!is_key_frame) ? &vt2[i2 + j] :
- &vt.split[i].split[j];
- for (m = 0; m < 4; m++)
- fill_variance_tree(&vtemp->split[m], BLOCK_8X8);
+ v16x16 *vtemp = (!is_key_frame) ? &vt2[i2 + j] : &vt.split[i].split[j];
+ for (m = 0; m < 4; m++) fill_variance_tree(&vtemp->split[m], BLOCK_8X8);
fill_variance_tree(vtemp, BLOCK_16X16);
}
}
@@ -884,7 +839,7 @@
// Now go through the entire structure, splitting every block size until
// we get to one that's got a variance lower than our threshold.
- if ( mi_col + 8 > cm->mi_cols || mi_row + 8 > cm->mi_rows ||
+ if (mi_col + 8 > cm->mi_cols || mi_row + 8 > cm->mi_rows ||
!set_vt_partitioning(cpi, x, xd, &vt, BLOCK_64X64, mi_row, mi_col,
thresholds[0], BLOCK_16X16, force_split[0])) {
for (i = 0; i < 4; ++i) {
@@ -901,15 +856,13 @@
// For inter frames: if variance4x4downsample[] == 1 for this 16x16
// block, then the variance is based on 4x4 down-sampling, so use vt2
// in set_vt_partioning(), otherwise use vt.
- v16x16 *vtemp = (!is_key_frame &&
- variance4x4downsample[i2 + j] == 1) ?
- &vt2[i2 + j] : &vt.split[i].split[j];
- if (!set_vt_partitioning(cpi, x, xd, vtemp, BLOCK_16X16,
- mi_row + y32_idx + y16_idx,
- mi_col + x32_idx + x16_idx,
- thresholds[2],
- cpi->vbp_bsize_min,
- force_split[5 + i2 + j])) {
+ v16x16 *vtemp = (!is_key_frame && variance4x4downsample[i2 + j] == 1)
+ ? &vt2[i2 + j]
+ : &vt.split[i].split[j];
+ if (!set_vt_partitioning(
+ cpi, x, xd, vtemp, BLOCK_16X16, mi_row + y32_idx + y16_idx,
+ mi_col + x32_idx + x16_idx, thresholds[2], cpi->vbp_bsize_min,
+ force_split[5 + i2 + j])) {
for (k = 0; k < 4; ++k) {
const int x8_idx = (k & 1);
const int y8_idx = (k >> 1);
@@ -919,16 +872,14 @@
mi_row + y32_idx + y16_idx + y8_idx,
mi_col + x32_idx + x16_idx + x8_idx,
thresholds[3], BLOCK_8X8, 0)) {
- set_block_size(cpi, x, xd,
- (mi_row + y32_idx + y16_idx + y8_idx),
- (mi_col + x32_idx + x16_idx + x8_idx),
- BLOCK_4X4);
+ set_block_size(
+ cpi, x, xd, (mi_row + y32_idx + y16_idx + y8_idx),
+ (mi_col + x32_idx + x16_idx + x8_idx), BLOCK_4X4);
}
} else {
- set_block_size(cpi, x, xd,
- (mi_row + y32_idx + y16_idx + y8_idx),
- (mi_col + x32_idx + x16_idx + x8_idx),
- BLOCK_8X8);
+ set_block_size(
+ cpi, x, xd, (mi_row + y32_idx + y16_idx + y8_idx),
+ (mi_col + x32_idx + x16_idx + x8_idx), BLOCK_8X8);
}
}
}
@@ -939,8 +890,7 @@
return 0;
}
-static void update_state(VP10_COMP *cpi, ThreadData *td,
- PICK_MODE_CONTEXT *ctx,
+static void update_state(VP10_COMP *cpi, ThreadData *td, PICK_MODE_CONTEXT *ctx,
int mi_row, int mi_col, BLOCK_SIZE bsize,
int output_enabled) {
int i, x_idx, y;
@@ -958,8 +908,7 @@
const int bh = num_8x8_blocks_high_lookup[mi->mbmi.sb_type];
const int x_mis = VPXMIN(bw, cm->mi_cols - mi_col);
const int y_mis = VPXMIN(bh, cm->mi_rows - mi_row);
- MV_REF *const frame_mvs =
- cm->cur_frame->mvs + mi_row * cm->mi_cols + mi_col;
+ MV_REF *const frame_mvs = cm->cur_frame->mvs + mi_row * cm->mi_cols + mi_col;
int w, h;
const int mis = cm->mi_stride;
@@ -976,17 +925,15 @@
if (seg->enabled) {
// For in frame complexity AQ copy the segment id from the segment map.
if (cpi->oxcf.aq_mode == COMPLEXITY_AQ) {
- const uint8_t *const map = seg->update_map ? cpi->segmentation_map
- : cm->last_frame_seg_map;
- mi_addr->mbmi.segment_id =
- get_segment_id(cm, map, bsize, mi_row, mi_col);
+ const uint8_t *const map =
+ seg->update_map ? cpi->segmentation_map : cm->last_frame_seg_map;
+ mi_addr->mbmi.segment_id = get_segment_id(cm, map, bsize, mi_row, mi_col);
}
// Else for cyclic refresh mode update the segment map, set the segment id
// and then update the quantizer.
if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) {
- vp10_cyclic_refresh_update_segment(cpi, &xd->mi[0]->mbmi, mi_row,
- mi_col, bsize, ctx->rate, ctx->dist,
- x->skip);
+ vp10_cyclic_refresh_update_segment(cpi, &xd->mi[0]->mbmi, mi_row, mi_col,
+ bsize, ctx->rate, ctx->dist, x->skip);
}
}
@@ -1005,20 +952,18 @@
p[i].eobs = ctx->eobs_pbuf[i][2];
}
- for (i = 0; i < 2; ++i)
- pd[i].color_index_map = ctx->color_index_map[i];
+ for (i = 0; i < 2; ++i) pd[i].color_index_map = ctx->color_index_map[i];
// Restore the coding context of the MB to that that was in place
// when the mode was picked for it
for (y = 0; y < mi_height; y++)
for (x_idx = 0; x_idx < mi_width; x_idx++)
- if ((xd->mb_to_right_edge >> (3 + MI_SIZE_LOG2)) + mi_width > x_idx
- && (xd->mb_to_bottom_edge >> (3 + MI_SIZE_LOG2)) + mi_height > y) {
+ if ((xd->mb_to_right_edge >> (3 + MI_SIZE_LOG2)) + mi_width > x_idx &&
+ (xd->mb_to_bottom_edge >> (3 + MI_SIZE_LOG2)) + mi_height > y) {
xd->mi[x_idx + y * mis] = mi_addr;
}
- if (cpi->oxcf.aq_mode)
- vp10_init_plane_quantizers(cpi, x);
+ if (cpi->oxcf.aq_mode) vp10_init_plane_quantizers(cpi, x);
if (is_inter_block(mbmi) && mbmi->sb_type < BLOCK_8X8) {
mbmi->mv[0].as_int = mi->bmi[3].as_mv[0].as_int;
@@ -1029,22 +974,16 @@
memcpy(x->zcoeff_blk[mbmi->tx_size], ctx->zcoeff_blk,
sizeof(ctx->zcoeff_blk[0]) * ctx->num_4x4_blk);
- if (!output_enabled)
- return;
+ if (!output_enabled) return;
#if CONFIG_INTERNAL_STATS
if (frame_is_intra_only(cm)) {
static const int kf_mode_index[] = {
- THR_DC /*DC_PRED*/,
- THR_V_PRED /*V_PRED*/,
- THR_H_PRED /*H_PRED*/,
- THR_D45_PRED /*D45_PRED*/,
- THR_D135_PRED /*D135_PRED*/,
- THR_D117_PRED /*D117_PRED*/,
- THR_D153_PRED /*D153_PRED*/,
- THR_D207_PRED /*D207_PRED*/,
- THR_D63_PRED /*D63_PRED*/,
- THR_TM /*TM_PRED*/,
+ THR_DC /*DC_PRED*/, THR_V_PRED /*V_PRED*/,
+ THR_H_PRED /*H_PRED*/, THR_D45_PRED /*D45_PRED*/,
+ THR_D135_PRED /*D135_PRED*/, THR_D117_PRED /*D117_PRED*/,
+ THR_D153_PRED /*D153_PRED*/, THR_D207_PRED /*D207_PRED*/,
+ THR_D63_PRED /*D63_PRED*/, THR_TM /*TM_PRED*/,
};
++cpi->mode_chosen_counts[kf_mode_index[mbmi->mode]];
} else {
@@ -1083,9 +1022,9 @@
}
void vp10_setup_src_planes(MACROBLOCK *x, const YV12_BUFFER_CONFIG *src,
- int mi_row, int mi_col) {
- uint8_t *const buffers[3] = {src->y_buffer, src->u_buffer, src->v_buffer };
- const int strides[3] = {src->y_stride, src->uv_stride, src->uv_stride };
+ int mi_row, int mi_col) {
+ uint8_t *const buffers[3] = { src->y_buffer, src->u_buffer, src->v_buffer };
+ const int strides[3] = { src->y_stride, src->uv_stride, src->uv_stride };
int i;
// Set current frame pointer.
@@ -1097,24 +1036,20 @@
x->e_mbd.plane[i].subsampling_y);
}
-static int set_segment_rdmult(VP10_COMP *const cpi,
- MACROBLOCK *const x,
- int8_t segment_id) {
+static int set_segment_rdmult(VP10_COMP *const cpi, MACROBLOCK *const x,
+ int8_t segment_id) {
int segment_qindex;
VP10_COMMON *const cm = &cpi->common;
vp10_init_plane_quantizers(cpi, x);
vpx_clear_system_state();
- segment_qindex = vp10_get_qindex(&cm->seg, segment_id,
- cm->base_qindex);
+ segment_qindex = vp10_get_qindex(&cm->seg, segment_id, cm->base_qindex);
return vp10_compute_rd_mult(cpi, segment_qindex + cm->y_dc_delta_q);
}
-static void rd_pick_sb_modes(VP10_COMP *cpi,
- TileDataEnc *tile_data,
- MACROBLOCK *const x,
- int mi_row, int mi_col, RD_COST *rd_cost,
- BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
- int64_t best_rd) {
+static void rd_pick_sb_modes(VP10_COMP *cpi, TileDataEnc *tile_data,
+ MACROBLOCK *const x, int mi_row, int mi_col,
+ RD_COST *rd_cost, BLOCK_SIZE bsize,
+ PICK_MODE_CONTEXT *ctx, int64_t best_rd) {
VP10_COMMON *const cm = &cpi->common;
TileInfo *const tile_info = &tile_data->tile_info;
MACROBLOCKD *const xd = &x->e_mbd;
@@ -1140,8 +1075,7 @@
p[i].eobs = ctx->eobs_pbuf[i][0];
}
- for (i = 0; i < 2; ++i)
- pd[i].color_index_map = ctx->color_index_map[i];
+ for (i = 0; i < 2; ++i) pd[i].color_index_map = ctx->color_index_map[i];
ctx->is_coded = 0;
ctx->skippable = 0;
@@ -1153,39 +1087,37 @@
#if CONFIG_VPX_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
- x->source_variance =
- vp10_high_get_sby_perpixel_variance(cpi, &x->plane[0].src,
- bsize, xd->bd);
+ x->source_variance = vp10_high_get_sby_perpixel_variance(
+ cpi, &x->plane[0].src, bsize, xd->bd);
} else {
x->source_variance =
- vp10_get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize);
+ vp10_get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize);
}
#else
x->source_variance =
- vp10_get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize);
+ vp10_get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize);
#endif // CONFIG_VPX_HIGHBITDEPTH
// Save rdmult before it might be changed, so it can be restored later.
orig_rdmult = x->rdmult;
if (aq_mode == VARIANCE_AQ) {
- const int energy = bsize <= BLOCK_16X16 ? x->mb_energy
- : vp10_block_energy(cpi, x, bsize);
- if (cm->frame_type == KEY_FRAME ||
- cpi->refresh_alt_ref_frame ||
+ const int energy =
+ bsize <= BLOCK_16X16 ? x->mb_energy : vp10_block_energy(cpi, x, bsize);
+ if (cm->frame_type == KEY_FRAME || cpi->refresh_alt_ref_frame ||
(cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref)) {
mbmi->segment_id = vp10_vaq_segment_id(energy);
} else {
- const uint8_t *const map = cm->seg.update_map ? cpi->segmentation_map
- : cm->last_frame_seg_map;
+ const uint8_t *const map =
+ cm->seg.update_map ? cpi->segmentation_map : cm->last_frame_seg_map;
mbmi->segment_id = get_segment_id(cm, map, bsize, mi_row, mi_col);
}
x->rdmult = set_segment_rdmult(cpi, x, mbmi->segment_id);
} else if (aq_mode == COMPLEXITY_AQ) {
x->rdmult = set_segment_rdmult(cpi, x, mbmi->segment_id);
} else if (aq_mode == CYCLIC_REFRESH_AQ) {
- const uint8_t *const map = cm->seg.update_map ? cpi->segmentation_map
- : cm->last_frame_seg_map;
+ const uint8_t *const map =
+ cm->seg.update_map ? cpi->segmentation_map : cm->last_frame_seg_map;
// If segment is boosted, use rdmult for that segment.
if (cyclic_refresh_segment_id_boosted(
get_segment_id(cm, map, bsize, mi_row, mi_col)))
@@ -1200,22 +1132,20 @@
if (bsize >= BLOCK_8X8) {
if (segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP))
vp10_rd_pick_inter_mode_sb_seg_skip(cpi, tile_data, x, rd_cost, bsize,
- ctx, best_rd);
+ ctx, best_rd);
else
- vp10_rd_pick_inter_mode_sb(cpi, tile_data, x, mi_row, mi_col,
- rd_cost, bsize, ctx, best_rd);
+ vp10_rd_pick_inter_mode_sb(cpi, tile_data, x, mi_row, mi_col, rd_cost,
+ bsize, ctx, best_rd);
} else {
- vp10_rd_pick_inter_mode_sub8x8(cpi, tile_data, x, mi_row, mi_col,
- rd_cost, bsize, ctx, best_rd);
+ vp10_rd_pick_inter_mode_sub8x8(cpi, tile_data, x, mi_row, mi_col, rd_cost,
+ bsize, ctx, best_rd);
}
}
-
// Examine the resulting rate and for AQ mode 2 make a segment choice.
- if ((rd_cost->rate != INT_MAX) &&
- (aq_mode == COMPLEXITY_AQ) && (bsize >= BLOCK_16X16) &&
- (cm->frame_type == KEY_FRAME ||
- cpi->refresh_alt_ref_frame ||
+ if ((rd_cost->rate != INT_MAX) && (aq_mode == COMPLEXITY_AQ) &&
+ (bsize >= BLOCK_16X16) &&
+ (cm->frame_type == KEY_FRAME || cpi->refresh_alt_ref_frame ||
(cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref))) {
vp10_caq_select_segment(cpi, x, bsize, mi_row, mi_col, rd_cost->rate);
}
@@ -1224,8 +1154,7 @@
// TODO(jingning) The rate-distortion optimization flow needs to be
// refactored to provide proper exit/return handle.
- if (rd_cost->rate == INT_MAX)
- rd_cost->rdcost = INT64_MAX;
+ if (rd_cost->rate == INT_MAX) rd_cost->rdcost = INT64_MAX;
ctx->rate = rd_cost->rate;
ctx->dist = rd_cost->dist;
@@ -1242,8 +1171,8 @@
if (!frame_is_intra_only(cm)) {
FRAME_COUNTS *const counts = td->counts;
const int inter_block = is_inter_block(mbmi);
- const int seg_ref_active = segfeature_active(&cm->seg, mbmi->segment_id,
- SEG_LVL_REF_FRAME);
+ const int seg_ref_active =
+ segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_REF_FRAME);
if (!seg_ref_active) {
counts->intra_inter[vp10_get_intra_inter_context(xd)][inter_block]++;
// If the segment reference feature is enabled we have only a single
@@ -1252,18 +1181,18 @@
if (inter_block) {
const MV_REFERENCE_FRAME ref0 = mbmi->ref_frame[0];
if (cm->reference_mode == REFERENCE_MODE_SELECT)
- counts->comp_inter[vp10_get_reference_mode_context(cm, xd)]
- [has_second_ref(mbmi)]++;
+ counts->comp_inter[vp10_get_reference_mode_context(
+ cm, xd)][has_second_ref(mbmi)]++;
if (has_second_ref(mbmi)) {
- counts->comp_ref[vp10_get_pred_context_comp_ref_p(cm, xd)]
- [ref0 == GOLDEN_FRAME]++;
+ counts->comp_ref[vp10_get_pred_context_comp_ref_p(
+ cm, xd)][ref0 == GOLDEN_FRAME]++;
} else {
- counts->single_ref[vp10_get_pred_context_single_ref_p1(xd)][0]
- [ref0 != LAST_FRAME]++;
+ counts->single_ref[vp10_get_pred_context_single_ref_p1(
+ xd)][0][ref0 != LAST_FRAME]++;
if (ref0 != LAST_FRAME)
- counts->single_ref[vp10_get_pred_context_single_ref_p2(xd)][1]
- [ref0 != GOLDEN_FRAME]++;
+ counts->single_ref[vp10_get_pred_context_single_ref_p2(
+ xd)][1][ref0 != GOLDEN_FRAME]++;
}
}
}
@@ -1301,17 +1230,15 @@
int mi_width = num_8x8_blocks_wide_lookup[bsize];
int mi_height = num_8x8_blocks_high_lookup[bsize];
for (p = 0; p < MAX_MB_PLANE; p++) {
- memcpy(
- xd->above_context[p] + ((mi_col * 2) >> xd->plane[p].subsampling_x),
- a + num_4x4_blocks_wide * p,
- (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_wide) >>
- xd->plane[p].subsampling_x);
- memcpy(
- xd->left_context[p]
- + ((mi_row & MI_MASK) * 2 >> xd->plane[p].subsampling_y),
- l + num_4x4_blocks_high * p,
- (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_high) >>
- xd->plane[p].subsampling_y);
+ memcpy(xd->above_context[p] + ((mi_col * 2) >> xd->plane[p].subsampling_x),
+ a + num_4x4_blocks_wide * p,
+ (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_wide) >>
+ xd->plane[p].subsampling_x);
+ memcpy(xd->left_context[p] +
+ ((mi_row & MI_MASK) * 2 >> xd->plane[p].subsampling_y),
+ l + num_4x4_blocks_high * p,
+ (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_high) >>
+ xd->plane[p].subsampling_y);
}
memcpy(xd->above_seg_context + mi_col, sa,
sizeof(*xd->above_seg_context) * mi_width);
@@ -1333,17 +1260,15 @@
// buffer the above/left context information of the block in search.
for (p = 0; p < MAX_MB_PLANE; ++p) {
- memcpy(
- a + num_4x4_blocks_wide * p,
- xd->above_context[p] + (mi_col * 2 >> xd->plane[p].subsampling_x),
- (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_wide) >>
- xd->plane[p].subsampling_x);
- memcpy(
- l + num_4x4_blocks_high * p,
- xd->left_context[p]
- + ((mi_row & MI_MASK) * 2 >> xd->plane[p].subsampling_y),
- (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_high) >>
- xd->plane[p].subsampling_y);
+ memcpy(a + num_4x4_blocks_wide * p,
+ xd->above_context[p] + (mi_col * 2 >> xd->plane[p].subsampling_x),
+ (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_wide) >>
+ xd->plane[p].subsampling_x);
+ memcpy(l + num_4x4_blocks_high * p,
+ xd->left_context[p] +
+ ((mi_row & MI_MASK) * 2 >> xd->plane[p].subsampling_y),
+ (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_high) >>
+ xd->plane[p].subsampling_y);
}
memcpy(sa, xd->above_seg_context + mi_col,
sizeof(*xd->above_seg_context) * mi_width);
@@ -1351,8 +1276,7 @@
sizeof(xd->left_seg_context[0]) * mi_height);
}
-static void encode_b(VP10_COMP *cpi, const TileInfo *const tile,
- ThreadData *td,
+static void encode_b(VP10_COMP *cpi, const TileInfo *const tile, ThreadData *td,
TOKENEXTRA **tp, int mi_row, int mi_col,
int output_enabled, BLOCK_SIZE bsize,
PICK_MODE_CONTEXT *ctx) {
@@ -1367,9 +1291,8 @@
}
static void encode_sb(VP10_COMP *cpi, ThreadData *td,
- const TileInfo *const tile,
- TOKENEXTRA **tp, int mi_row, int mi_col,
- int output_enabled, BLOCK_SIZE bsize,
+ const TileInfo *const tile, TOKENEXTRA **tp, int mi_row,
+ int mi_col, int output_enabled, BLOCK_SIZE bsize,
PC_TREE *pc_tree) {
VP10_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &td->mb;
@@ -1380,8 +1303,7 @@
PARTITION_TYPE partition;
BLOCK_SIZE subsize = bsize;
- if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
- return;
+ if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
if (bsize >= BLOCK_8X8) {
ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
@@ -1431,9 +1353,7 @@
subsize, pc_tree->split[3]);
}
break;
- default:
- assert(0 && "Invalid partition type.");
- break;
+ default: assert(0 && "Invalid partition type."); break;
}
if (partition != PARTITION_SPLIT || bsize == BLOCK_8X8)
@@ -1443,9 +1363,8 @@
// Check to see if the given partition size is allowed for a specified number
// of 8x8 block rows and columns remaining in the image.
// If not then return the largest allowed partition size
-static BLOCK_SIZE find_partition_size(BLOCK_SIZE bsize,
- int rows_left, int cols_left,
- int *bh, int *bw) {
+static BLOCK_SIZE find_partition_size(BLOCK_SIZE bsize, int rows_left,
+ int cols_left, int *bh, int *bw) {
if (rows_left <= 0 || cols_left <= 0) {
return VPXMIN(bsize, BLOCK_8X8);
} else {
@@ -1460,9 +1379,10 @@
return bsize;
}
-static void set_partial_b64x64_partition(MODE_INFO *mi, int mis,
- int bh_in, int bw_in, int row8x8_remaining, int col8x8_remaining,
- BLOCK_SIZE bsize, MODE_INFO **mi_8x8) {
+static void set_partial_b64x64_partition(MODE_INFO *mi, int mis, int bh_in,
+ int bw_in, int row8x8_remaining,
+ int col8x8_remaining, BLOCK_SIZE bsize,
+ MODE_INFO **mi_8x8) {
int bh = bh_in;
int r, c;
for (r = 0; r < MI_BLOCK_SIZE; r += bh) {
@@ -1470,8 +1390,8 @@
for (c = 0; c < MI_BLOCK_SIZE; c += bw) {
const int index = r * mis + c;
mi_8x8[index] = mi + index;
- mi_8x8[index]->mbmi.sb_type = find_partition_size(bsize,
- row8x8_remaining - r, col8x8_remaining - c, &bh, &bw);
+ mi_8x8[index]->mbmi.sb_type = find_partition_size(
+ bsize, row8x8_remaining - r, col8x8_remaining - c, &bh, &bw);
}
}
}
@@ -1508,17 +1428,14 @@
} else {
// Else this is a partial SB64.
set_partial_b64x64_partition(mi_upper_left, mis, bh, bw, row8x8_remaining,
- col8x8_remaining, bsize, mi_8x8);
+ col8x8_remaining, bsize, mi_8x8);
}
}
-static void rd_use_partition(VP10_COMP *cpi,
- ThreadData *td,
- TileDataEnc *tile_data,
- MODE_INFO **mi_8x8, TOKENEXTRA **tp,
- int mi_row, int mi_col,
- BLOCK_SIZE bsize,
- int *rate, int64_t *dist,
+static void rd_use_partition(VP10_COMP *cpi, ThreadData *td,
+ TileDataEnc *tile_data, MODE_INFO **mi_8x8,
+ TOKENEXTRA **tp, int mi_row, int mi_col,
+ BLOCK_SIZE bsize, int *rate, int64_t *dist,
int do_recon, PC_TREE *pc_tree) {
VP10_COMMON *const cm = &cpi->common;
TileInfo *const tile_info = &tile_data->tile_info;
@@ -1540,8 +1457,7 @@
int do_partition_search = 1;
PICK_MODE_CONTEXT *ctx = &pc_tree->none;
- if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
- return;
+ if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
assert(num_4x4_blocks_wide_lookup[bsize] ==
num_4x4_blocks_high_lookup[bsize]);
@@ -1583,15 +1499,15 @@
mi_row + (mi_step >> 1) < cm->mi_rows &&
mi_col + (mi_step >> 1) < cm->mi_cols) {
pc_tree->partitioning = PARTITION_NONE;
- rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &none_rdc, bsize,
- ctx, INT64_MAX);
+ rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &none_rdc, bsize, ctx,
+ INT64_MAX);
pl = partition_plane_context(xd, mi_row, mi_col, bsize);
if (none_rdc.rate < INT_MAX) {
none_rdc.rate += cpi->partition_cost[pl][PARTITION_NONE];
- none_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, none_rdc.rate,
- none_rdc.dist);
+ none_rdc.rdcost =
+ RDCOST(x->rdmult, x->rddiv, none_rdc.rate, none_rdc.dist);
}
restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
@@ -1602,23 +1518,21 @@
switch (partition) {
case PARTITION_NONE:
- rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &last_part_rdc,
- bsize, ctx, INT64_MAX);
+ rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &last_part_rdc, bsize,
+ ctx, INT64_MAX);
break;
case PARTITION_HORZ:
rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &last_part_rdc,
- subsize, &pc_tree->horizontal[0],
- INT64_MAX);
- if (last_part_rdc.rate != INT_MAX &&
- bsize >= BLOCK_8X8 && mi_row + (mi_step >> 1) < cm->mi_rows) {
+ subsize, &pc_tree->horizontal[0], INT64_MAX);
+ if (last_part_rdc.rate != INT_MAX && bsize >= BLOCK_8X8 &&
+ mi_row + (mi_step >> 1) < cm->mi_rows) {
RD_COST tmp_rdc;
PICK_MODE_CONTEXT *ctx = &pc_tree->horizontal[0];
vp10_rd_cost_init(&tmp_rdc);
update_state(cpi, td, ctx, mi_row, mi_col, subsize, 0);
encode_superblock(cpi, td, tp, 0, mi_row, mi_col, subsize, ctx);
- rd_pick_sb_modes(cpi, tile_data, x,
- mi_row + (mi_step >> 1), mi_col, &tmp_rdc,
- subsize, &pc_tree->horizontal[1], INT64_MAX);
+ rd_pick_sb_modes(cpi, tile_data, x, mi_row + (mi_step >> 1), mi_col,
+ &tmp_rdc, subsize, &pc_tree->horizontal[1], INT64_MAX);
if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
vp10_rd_cost_reset(&last_part_rdc);
break;
@@ -1631,17 +1545,16 @@
case PARTITION_VERT:
rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &last_part_rdc,
subsize, &pc_tree->vertical[0], INT64_MAX);
- if (last_part_rdc.rate != INT_MAX &&
- bsize >= BLOCK_8X8 && mi_col + (mi_step >> 1) < cm->mi_cols) {
+ if (last_part_rdc.rate != INT_MAX && bsize >= BLOCK_8X8 &&
+ mi_col + (mi_step >> 1) < cm->mi_cols) {
RD_COST tmp_rdc;
PICK_MODE_CONTEXT *ctx = &pc_tree->vertical[0];
vp10_rd_cost_init(&tmp_rdc);
update_state(cpi, td, ctx, mi_row, mi_col, subsize, 0);
encode_superblock(cpi, td, tp, 0, mi_row, mi_col, subsize, ctx);
- rd_pick_sb_modes(cpi, tile_data, x,
- mi_row, mi_col + (mi_step >> 1), &tmp_rdc,
- subsize, &pc_tree->vertical[bsize > BLOCK_8X8],
- INT64_MAX);
+ rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + (mi_step >> 1),
+ &tmp_rdc, subsize,
+ &pc_tree->vertical[bsize > BLOCK_8X8], INT64_MAX);
if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
vp10_rd_cost_reset(&last_part_rdc);
break;
@@ -1669,11 +1582,10 @@
continue;
vp10_rd_cost_init(&tmp_rdc);
- rd_use_partition(cpi, td, tile_data,
- mi_8x8 + jj * bss * mis + ii * bss, tp,
- mi_row + y_idx, mi_col + x_idx, subsize,
- &tmp_rdc.rate, &tmp_rdc.dist,
- i != 3, pc_tree->split[i]);
+ rd_use_partition(cpi, td, tile_data, mi_8x8 + jj * bss * mis + ii * bss,
+ tp, mi_row + y_idx, mi_col + x_idx, subsize,
+ &tmp_rdc.rate, &tmp_rdc.dist, i != 3,
+ pc_tree->split[i]);
if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
vp10_rd_cost_reset(&last_part_rdc);
break;
@@ -1682,26 +1594,23 @@
last_part_rdc.dist += tmp_rdc.dist;
}
break;
- default:
- assert(0);
- break;
+ default: assert(0); break;
}
pl = partition_plane_context(xd, mi_row, mi_col, bsize);
if (last_part_rdc.rate < INT_MAX) {
last_part_rdc.rate += cpi->partition_cost[pl][partition];
- last_part_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
- last_part_rdc.rate, last_part_rdc.dist);
+ last_part_rdc.rdcost =
+ RDCOST(x->rdmult, x->rddiv, last_part_rdc.rate, last_part_rdc.dist);
}
- if (do_partition_search
- && cpi->sf.adjust_partitioning_from_last_frame
- && cpi->sf.partition_search_type == SEARCH_PARTITION
- && partition != PARTITION_SPLIT && bsize > BLOCK_8X8
- && (mi_row + mi_step < cm->mi_rows ||
- mi_row + (mi_step >> 1) == cm->mi_rows)
- && (mi_col + mi_step < cm->mi_cols ||
- mi_col + (mi_step >> 1) == cm->mi_cols)) {
+ if (do_partition_search && cpi->sf.adjust_partitioning_from_last_frame &&
+ cpi->sf.partition_search_type == SEARCH_PARTITION &&
+ partition != PARTITION_SPLIT && bsize > BLOCK_8X8 &&
+ (mi_row + mi_step < cm->mi_rows ||
+ mi_row + (mi_step >> 1) == cm->mi_rows) &&
+ (mi_col + mi_step < cm->mi_cols ||
+ mi_col + (mi_step >> 1) == cm->mi_cols)) {
BLOCK_SIZE split_subsize = get_subsize(bsize, PARTITION_SPLIT);
chosen_rdc.rate = 0;
chosen_rdc.dist = 0;
@@ -1721,9 +1630,9 @@
save_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
pc_tree->split[i]->partitioning = PARTITION_NONE;
- rd_pick_sb_modes(cpi, tile_data, x,
- mi_row + y_idx, mi_col + x_idx, &tmp_rdc,
- split_subsize, &pc_tree->split[i]->none, INT64_MAX);
+ rd_pick_sb_modes(cpi, tile_data, x, mi_row + y_idx, mi_col + x_idx,
+ &tmp_rdc, split_subsize, &pc_tree->split[i]->none,
+ INT64_MAX);
restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
@@ -1736,7 +1645,7 @@
chosen_rdc.dist += tmp_rdc.dist;
if (i != 3)
- encode_sb(cpi, td, tile_info, tp, mi_row + y_idx, mi_col + x_idx, 0,
+ encode_sb(cpi, td, tile_info, tp, mi_row + y_idx, mi_col + x_idx, 0,
split_subsize, pc_tree->split[i]);
pl = partition_plane_context(xd, mi_row + y_idx, mi_col + x_idx,
@@ -1746,22 +1655,20 @@
pl = partition_plane_context(xd, mi_row, mi_col, bsize);
if (chosen_rdc.rate < INT_MAX) {
chosen_rdc.rate += cpi->partition_cost[pl][PARTITION_SPLIT];
- chosen_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
- chosen_rdc.rate, chosen_rdc.dist);
+ chosen_rdc.rdcost =
+ RDCOST(x->rdmult, x->rddiv, chosen_rdc.rate, chosen_rdc.dist);
}
}
// If last_part is better set the partitioning to that.
if (last_part_rdc.rdcost < chosen_rdc.rdcost) {
mi_8x8[0]->mbmi.sb_type = bsize;
- if (bsize >= BLOCK_8X8)
- pc_tree->partitioning = partition;
+ if (bsize >= BLOCK_8X8) pc_tree->partitioning = partition;
chosen_rdc = last_part_rdc;
}
// If none was better set the partitioning to that.
if (none_rdc.rdcost < chosen_rdc.rdcost) {
- if (bsize >= BLOCK_8X8)
- pc_tree->partitioning = PARTITION_NONE;
+ if (bsize >= BLOCK_8X8) pc_tree->partitioning = PARTITION_NONE;
chosen_rdc = none_rdc;
}
@@ -1783,22 +1690,17 @@
}
static const BLOCK_SIZE min_partition_size[BLOCK_SIZES] = {
- BLOCK_4X4, BLOCK_4X4, BLOCK_4X4,
- BLOCK_4X4, BLOCK_4X4, BLOCK_4X4,
- BLOCK_8X8, BLOCK_8X8, BLOCK_8X8,
- BLOCK_16X16, BLOCK_16X16, BLOCK_16X16,
- BLOCK_16X16
+ BLOCK_4X4, BLOCK_4X4, BLOCK_4X4, BLOCK_4X4, BLOCK_4X4,
+ BLOCK_4X4, BLOCK_8X8, BLOCK_8X8, BLOCK_8X8, BLOCK_16X16,
+ BLOCK_16X16, BLOCK_16X16, BLOCK_16X16
};
static const BLOCK_SIZE max_partition_size[BLOCK_SIZES] = {
- BLOCK_8X8, BLOCK_16X16, BLOCK_16X16,
- BLOCK_16X16, BLOCK_32X32, BLOCK_32X32,
- BLOCK_32X32, BLOCK_64X64, BLOCK_64X64,
- BLOCK_64X64, BLOCK_64X64, BLOCK_64X64,
- BLOCK_64X64
+ BLOCK_8X8, BLOCK_16X16, BLOCK_16X16, BLOCK_16X16, BLOCK_32X32,
+ BLOCK_32X32, BLOCK_32X32, BLOCK_64X64, BLOCK_64X64, BLOCK_64X64,
+ BLOCK_64X64, BLOCK_64X64, BLOCK_64X64
};
-
// Look at all the mode_info entries for blocks that are part of this
// partition and find the min and max values for sb_type.
// At the moment this is designed to work on a 64x64 SB but could be
@@ -1811,14 +1713,14 @@
BLOCK_SIZE *max_block_size,
int bs_hist[BLOCK_SIZES]) {
int sb_width_in_blocks = MI_BLOCK_SIZE;
- int sb_height_in_blocks = MI_BLOCK_SIZE;
+ int sb_height_in_blocks = MI_BLOCK_SIZE;
int i, j;
int index = 0;
// Check the sb_type for each block that belongs to this region.
for (i = 0; i < sb_height_in_blocks; ++i) {
for (j = 0; j < sb_width_in_blocks; ++j) {
- MODE_INFO *mi = mi_8x8[index+j];
+ MODE_INFO *mi = mi_8x8[index + j];
BLOCK_SIZE sb_type = mi ? mi->mbmi.sb_type : 0;
bs_hist[sb_type]++;
*min_block_size = VPXMIN(*min_block_size, sb_type);
@@ -1830,19 +1732,16 @@
// Next square block size less or equal than current block size.
static const BLOCK_SIZE next_square_size[BLOCK_SIZES] = {
- BLOCK_4X4, BLOCK_4X4, BLOCK_4X4,
- BLOCK_8X8, BLOCK_8X8, BLOCK_8X8,
- BLOCK_16X16, BLOCK_16X16, BLOCK_16X16,
- BLOCK_32X32, BLOCK_32X32, BLOCK_32X32,
- BLOCK_64X64
+ BLOCK_4X4, BLOCK_4X4, BLOCK_4X4, BLOCK_8X8, BLOCK_8X8,
+ BLOCK_8X8, BLOCK_16X16, BLOCK_16X16, BLOCK_16X16, BLOCK_32X32,
+ BLOCK_32X32, BLOCK_32X32, BLOCK_64X64
};
// Look at neighboring blocks and set a min and max partition size based on
// what they chose.
static void rd_auto_partition_range(VP10_COMP *cpi, const TileInfo *const tile,
- MACROBLOCKD *const xd,
- int mi_row, int mi_col,
- BLOCK_SIZE *min_block_size,
+ MACROBLOCKD *const xd, int mi_row,
+ int mi_col, BLOCK_SIZE *min_block_size,
BLOCK_SIZE *max_block_size) {
VP10_COMMON *const cm = &cpi->common;
MODE_INFO **mi = xd->mi;
@@ -1853,7 +1752,7 @@
int bh, bw;
BLOCK_SIZE min_size = BLOCK_4X4;
BLOCK_SIZE max_size = BLOCK_64X64;
- int bs_hist[BLOCK_SIZES] = {0};
+ int bs_hist[BLOCK_SIZES] = { 0 };
// Trap case where we do not have a prediction.
if (left_in_image || above_in_image || cm->frame_type != KEY_FRAME) {
@@ -1890,8 +1789,7 @@
}
// Check border cases where max and min from neighbors may not be legal.
- max_size = find_partition_size(max_size,
- row8x8_remaining, col8x8_remaining,
+ max_size = find_partition_size(max_size, row8x8_remaining, col8x8_remaining,
&bh, &bw);
// Test for blocks at the edge of the active image.
// This may be the actual edge of the image or where there are formatting
@@ -1908,7 +1806,7 @@
// *min_block_size.
if (cpi->sf.use_square_partition_only &&
next_square_size[max_size] < min_size) {
- min_size = next_square_size[max_size];
+ min_size = next_square_size[max_size];
}
*min_block_size = min_size;
@@ -1916,10 +1814,10 @@
}
// TODO(jingning) refactor functions setting partition search range
-static void set_partition_range(VP10_COMMON *cm, MACROBLOCKD *xd,
- int mi_row, int mi_col, BLOCK_SIZE bsize,
+static void set_partition_range(VP10_COMMON *cm, MACROBLOCKD *xd, int mi_row,
+ int mi_col, BLOCK_SIZE bsize,
BLOCK_SIZE *min_bs, BLOCK_SIZE *max_bs) {
- int mi_width = num_8x8_blocks_wide_lookup[bsize];
+ int mi_width = num_8x8_blocks_wide_lookup[bsize];
int mi_height = num_8x8_blocks_high_lookup[bsize];
int idx, idy;
@@ -1978,16 +1876,19 @@
}
#if CONFIG_FP_MB_STATS
-const int num_16x16_blocks_wide_lookup[BLOCK_SIZES] =
- {1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 4, 4};
-const int num_16x16_blocks_high_lookup[BLOCK_SIZES] =
- {1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 4, 2, 4};
-const int qindex_skip_threshold_lookup[BLOCK_SIZES] =
- {0, 10, 10, 30, 40, 40, 60, 80, 80, 90, 100, 100, 120};
-const int qindex_split_threshold_lookup[BLOCK_SIZES] =
- {0, 3, 3, 7, 15, 15, 30, 40, 40, 60, 80, 80, 120};
-const int complexity_16x16_blocks_threshold[BLOCK_SIZES] =
- {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 4, 4, 6};
+const int num_16x16_blocks_wide_lookup[BLOCK_SIZES] = { 1, 1, 1, 1, 1, 1, 1,
+ 1, 2, 2, 2, 4, 4 };
+const int num_16x16_blocks_high_lookup[BLOCK_SIZES] = { 1, 1, 1, 1, 1, 1, 1,
+ 2, 1, 2, 4, 2, 4 };
+const int qindex_skip_threshold_lookup[BLOCK_SIZES] = {
+ 0, 10, 10, 30, 40, 40, 60, 80, 80, 90, 100, 100, 120
+};
+const int qindex_split_threshold_lookup[BLOCK_SIZES] = {
+ 0, 3, 3, 7, 15, 15, 30, 40, 40, 60, 80, 80, 120
+};
+const int complexity_16x16_blocks_threshold[BLOCK_SIZES] = {
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 4, 4, 6
+};
typedef enum {
MV_ZERO = 0,
@@ -2026,10 +1927,10 @@
// unlikely to be selected depending on previous rate-distortion optimization
// results, for encoding speed-up.
static void rd_pick_partition(VP10_COMP *cpi, ThreadData *td,
- TileDataEnc *tile_data,
- TOKENEXTRA **tp, int mi_row, int mi_col,
- BLOCK_SIZE bsize, RD_COST *rd_cost,
- int64_t best_rd, PC_TREE *pc_tree) {
+ TileDataEnc *tile_data, TOKENEXTRA **tp,
+ int mi_row, int mi_col, BLOCK_SIZE bsize,
+ RD_COST *rd_cost, int64_t best_rd,
+ PC_TREE *pc_tree) {
VP10_COMMON *const cm = &cpi->common;
TileInfo *const tile_info = &tile_data->tile_info;
MACROBLOCK *const x = &td->mb;
@@ -2060,14 +1961,14 @@
#endif
int partition_none_allowed = !force_horz_split && !force_vert_split;
- int partition_horz_allowed = !force_vert_split && yss <= xss &&
- bsize >= BLOCK_8X8;
- int partition_vert_allowed = !force_horz_split && xss <= yss &&
- bsize >= BLOCK_8X8;
- (void) *tp_orig;
+ int partition_horz_allowed =
+ !force_vert_split && yss <= xss && bsize >= BLOCK_8X8;
+ int partition_vert_allowed =
+ !force_horz_split && xss <= yss && bsize >= BLOCK_8X8;
+ (void)*tp_orig;
assert(num_8x8_blocks_wide_lookup[bsize] ==
- num_8x8_blocks_high_lookup[bsize]);
+ num_8x8_blocks_high_lookup[bsize]);
vp10_rd_cost_init(&this_rdc);
vp10_rd_cost_init(&sum_rdc);
@@ -2080,8 +1981,10 @@
x->mb_energy = vp10_block_energy(cpi, x, bsize);
if (cpi->sf.cb_partition_search && bsize == BLOCK_16X16) {
- int cb_partition_search_ctrl = ((pc_tree->index == 0 || pc_tree->index == 3)
- + get_chessboard_index(cm->current_video_frame)) & 0x1;
+ int cb_partition_search_ctrl =
+ ((pc_tree->index == 0 || pc_tree->index == 3) +
+ get_chessboard_index(cm->current_video_frame)) &
+ 0x1;
if (cb_partition_search_ctrl && bsize > min_size && bsize < max_size)
set_partition_range(cm, xd, mi_row, mi_col, bsize, &min_size, &max_size);
@@ -2091,10 +1994,10 @@
// The threshold set here has to be of square block size.
if (cpi->sf.auto_min_max_partition_size) {
partition_none_allowed &= (bsize <= max_size && bsize >= min_size);
- partition_horz_allowed &= ((bsize <= max_size && bsize > min_size) ||
- force_horz_split);
- partition_vert_allowed &= ((bsize <= max_size && bsize > min_size) ||
- force_vert_split);
+ partition_horz_allowed &=
+ ((bsize <= max_size && bsize > min_size) || force_horz_split);
+ partition_vert_allowed &=
+ ((bsize <= max_size && bsize > min_size) || force_vert_split);
do_split &= bsize > min_size;
}
if (cpi->sf.use_square_partition_only) {
@@ -2107,8 +2010,8 @@
#if CONFIG_FP_MB_STATS
if (cpi->use_fp_mb_stats) {
set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize);
- src_diff_var = get_sby_perpixel_diff_variance(cpi, &x->plane[0].src,
- mi_row, mi_col, bsize);
+ src_diff_var = get_sby_perpixel_diff_variance(cpi, &x->plane[0].src, mi_row,
+ mi_col, bsize);
}
#endif
@@ -2128,7 +2031,7 @@
// compute a complexity measure, basically measure inconsistency of motion
// vectors obtained from the first pass in the current block
- for (r = mb_row; r < mb_row_end ; r++) {
+ for (r = mb_row; r < mb_row_end; r++) {
for (c = mb_col; c < mb_col_end; c++) {
const int mb_index = r * cm->mb_cols + c;
@@ -2165,14 +2068,14 @@
// PARTITION_NONE
if (partition_none_allowed) {
- rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col,
- &this_rdc, bsize, ctx, best_rdc.rdcost);
+ rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &this_rdc, bsize, ctx,
+ best_rdc.rdcost);
if (this_rdc.rate != INT_MAX) {
if (bsize >= BLOCK_8X8) {
pl = partition_plane_context(xd, mi_row, mi_col, bsize);
this_rdc.rate += cpi->partition_cost[pl][PARTITION_NONE];
- this_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
- this_rdc.rate, this_rdc.dist);
+ this_rdc.rdcost =
+ RDCOST(x->rdmult, x->rddiv, this_rdc.rate, this_rdc.dist);
}
if (this_rdc.rdcost < best_rdc.rdcost) {
@@ -2180,12 +2083,11 @@
int rate_breakout_thr = cpi->sf.partition_search_breakout_rate_thr;
best_rdc = this_rdc;
- if (bsize >= BLOCK_8X8)
- pc_tree->partitioning = PARTITION_NONE;
+ if (bsize >= BLOCK_8X8) pc_tree->partitioning = PARTITION_NONE;
// Adjust dist breakout threshold according to the partition size.
- dist_breakout_thr >>= 8 - (b_width_log2_lookup[bsize] +
- b_height_log2_lookup[bsize]);
+ dist_breakout_thr >>=
+ 8 - (b_width_log2_lookup[bsize] + b_height_log2_lookup[bsize]);
rate_breakout_thr *= num_pels_log2_lookup[bsize];
@@ -2196,7 +2098,7 @@
// early termination at that speed.
if (!x->e_mbd.lossless[xd->mi[0]->mbmi.segment_id] &&
(ctx->skippable && best_rdc.dist < dist_breakout_thr &&
- best_rdc.rate < rate_breakout_thr)) {
+ best_rdc.rate < rate_breakout_thr)) {
do_split = 0;
do_rect = 0;
}
@@ -2252,8 +2154,7 @@
}
// store estimated motion vector
- if (cpi->sf.adaptive_motion_search)
- store_pred_mv(x, ctx);
+ if (cpi->sf.adaptive_motion_search) store_pred_mv(x, ctx);
// PARTITION_SPLIT
// TODO(jingning): use the motion vectors given by the above search as
@@ -2267,23 +2168,20 @@
ctx->mic.mbmi.interp_filter;
rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc, subsize,
pc_tree->leaf_split[0], best_rdc.rdcost);
- if (sum_rdc.rate == INT_MAX)
- sum_rdc.rdcost = INT64_MAX;
+ if (sum_rdc.rate == INT_MAX) sum_rdc.rdcost = INT64_MAX;
} else {
for (i = 0; i < 4 && sum_rdc.rdcost < best_rdc.rdcost; ++i) {
- const int x_idx = (i & 1) * mi_step;
- const int y_idx = (i >> 1) * mi_step;
+ const int x_idx = (i & 1) * mi_step;
+ const int y_idx = (i >> 1) * mi_step;
if (mi_row + y_idx >= cm->mi_rows || mi_col + x_idx >= cm->mi_cols)
continue;
- if (cpi->sf.adaptive_motion_search)
- load_pred_mv(x, ctx);
+ if (cpi->sf.adaptive_motion_search) load_pred_mv(x, ctx);
pc_tree->split[i]->index = i;
- rd_pick_partition(cpi, td, tile_data, tp,
- mi_row + y_idx, mi_col + x_idx,
- subsize, &this_rdc,
+ rd_pick_partition(cpi, td, tile_data, tp, mi_row + y_idx,
+ mi_col + x_idx, subsize, &this_rdc,
best_rdc.rdcost - sum_rdc.rdcost, pc_tree->split[i]);
if (this_rdc.rate == INT_MAX) {
@@ -2300,8 +2198,7 @@
if (sum_rdc.rdcost < best_rdc.rdcost && i == 4) {
pl = partition_plane_context(xd, mi_row, mi_col, bsize);
sum_rdc.rate += cpi->partition_cost[pl][PARTITION_SPLIT];
- sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
- sum_rdc.rate, sum_rdc.dist);
+ sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, sum_rdc.rate, sum_rdc.dist);
if (sum_rdc.rdcost < best_rdc.rdcost) {
best_rdc = sum_rdc;
@@ -2310,8 +2207,7 @@
} else {
// skip rectangular partition test when larger block size
// gives better rd cost
- if (cpi->sf.less_rectangular_check)
- do_rect &= !partition_none_allowed;
+ if (cpi->sf.less_rectangular_check) do_rect &= !partition_none_allowed;
}
restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
}
@@ -2319,13 +2215,11 @@
// PARTITION_HORZ
if (partition_horz_allowed &&
(do_rect || vp10_active_h_edge(cpi, mi_row, mi_step))) {
- subsize = get_subsize(bsize, PARTITION_HORZ);
- if (cpi->sf.adaptive_motion_search)
- load_pred_mv(x, ctx);
+ subsize = get_subsize(bsize, PARTITION_HORZ);
+ if (cpi->sf.adaptive_motion_search) load_pred_mv(x, ctx);
if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
partition_none_allowed)
- pc_tree->horizontal[0].pred_interp_filter =
- ctx->mic.mbmi.interp_filter;
+ pc_tree->horizontal[0].pred_interp_filter = ctx->mic.mbmi.interp_filter;
rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc, subsize,
&pc_tree->horizontal[0], best_rdc.rdcost);
@@ -2335,14 +2229,12 @@
update_state(cpi, td, ctx, mi_row, mi_col, subsize, 0);
encode_superblock(cpi, td, tp, 0, mi_row, mi_col, subsize, ctx);
- if (cpi->sf.adaptive_motion_search)
- load_pred_mv(x, ctx);
+ if (cpi->sf.adaptive_motion_search) load_pred_mv(x, ctx);
if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
partition_none_allowed)
- pc_tree->horizontal[1].pred_interp_filter =
- ctx->mic.mbmi.interp_filter;
- rd_pick_sb_modes(cpi, tile_data, x, mi_row + mi_step, mi_col,
- &this_rdc, subsize, &pc_tree->horizontal[1],
+ pc_tree->horizontal[1].pred_interp_filter = ctx->mic.mbmi.interp_filter;
+ rd_pick_sb_modes(cpi, tile_data, x, mi_row + mi_step, mi_col, &this_rdc,
+ subsize, &pc_tree->horizontal[1],
best_rdc.rdcost - sum_rdc.rdcost);
if (this_rdc.rate == INT_MAX) {
sum_rdc.rdcost = INT64_MAX;
@@ -2367,14 +2259,12 @@
// PARTITION_VERT
if (partition_vert_allowed &&
(do_rect || vp10_active_v_edge(cpi, mi_col, mi_step))) {
- subsize = get_subsize(bsize, PARTITION_VERT);
+ subsize = get_subsize(bsize, PARTITION_VERT);
- if (cpi->sf.adaptive_motion_search)
- load_pred_mv(x, ctx);
+ if (cpi->sf.adaptive_motion_search) load_pred_mv(x, ctx);
if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
partition_none_allowed)
- pc_tree->vertical[0].pred_interp_filter =
- ctx->mic.mbmi.interp_filter;
+ pc_tree->vertical[0].pred_interp_filter = ctx->mic.mbmi.interp_filter;
rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc, subsize,
&pc_tree->vertical[0], best_rdc.rdcost);
if (sum_rdc.rdcost < best_rdc.rdcost && mi_col + mi_step < cm->mi_cols &&
@@ -2383,15 +2273,13 @@
encode_superblock(cpi, td, tp, 0, mi_row, mi_col, subsize,
&pc_tree->vertical[0]);
- if (cpi->sf.adaptive_motion_search)
- load_pred_mv(x, ctx);
+ if (cpi->sf.adaptive_motion_search) load_pred_mv(x, ctx);
if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
partition_none_allowed)
- pc_tree->vertical[1].pred_interp_filter =
- ctx->mic.mbmi.interp_filter;
- rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + mi_step,
- &this_rdc, subsize,
- &pc_tree->vertical[1], best_rdc.rdcost - sum_rdc.rdcost);
+ pc_tree->vertical[1].pred_interp_filter = ctx->mic.mbmi.interp_filter;
+ rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + mi_step, &this_rdc,
+ subsize, &pc_tree->vertical[1],
+ best_rdc.rdcost - sum_rdc.rdcost);
if (this_rdc.rate == INT_MAX) {
sum_rdc.rdcost = INT64_MAX;
} else {
@@ -2404,8 +2292,7 @@
if (sum_rdc.rdcost < best_rdc.rdcost) {
pl = partition_plane_context(xd, mi_row, mi_col, bsize);
sum_rdc.rate += cpi->partition_cost[pl][PARTITION_VERT];
- sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
- sum_rdc.rate, sum_rdc.dist);
+ sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, sum_rdc.rate, sum_rdc.dist);
if (sum_rdc.rdcost < best_rdc.rdcost) {
best_rdc = sum_rdc;
pc_tree->partitioning = PARTITION_VERT;
@@ -2418,15 +2305,14 @@
// warning related to the fact that best_rd isn't used after this
// point. This code should be refactored so that the duplicate
// checks occur in some sub function and thus are used...
- (void) best_rd;
+ (void)best_rd;
*rd_cost = best_rdc;
-
if (best_rdc.rate < INT_MAX && best_rdc.dist < INT64_MAX &&
pc_tree->index != 3) {
int output_enabled = (bsize == BLOCK_64X64);
- encode_sb(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled,
- bsize, pc_tree);
+ encode_sb(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled, bsize,
+ pc_tree);
}
if (bsize == BLOCK_64X64) {
@@ -2438,10 +2324,8 @@
}
}
-static void encode_rd_sb_row(VP10_COMP *cpi,
- ThreadData *td,
- TileDataEnc *tile_data,
- int mi_row,
+static void encode_rd_sb_row(VP10_COMP *cpi, ThreadData *td,
+ TileDataEnc *tile_data, int mi_row,
TOKENEXTRA **tp) {
VP10_COMMON *const cm = &cpi->common;
TileInfo *const tile_info = &tile_data->tile_info;
@@ -2468,8 +2352,7 @@
MODE_INFO **mi = cm->mi_grid_visible + idx_str;
if (sf->adaptive_pred_interp_filter) {
- for (i = 0; i < 64; ++i)
- td->leaf_tree[i].pred_interp_filter = SWITCHABLE;
+ for (i = 0; i < 64; ++i) td->leaf_tree[i].pred_interp_filter = SWITCHABLE;
for (i = 0; i < 64; ++i) {
td->pc_tree[i].vertical[0].pred_interp_filter = SWITCHABLE;
@@ -2483,8 +2366,8 @@
td->pc_root->index = 0;
if (seg->enabled) {
- const uint8_t *const map = seg->update_map ? cpi->segmentation_map
- : cm->last_frame_seg_map;
+ const uint8_t *const map =
+ seg->update_map ? cpi->segmentation_map : cm->last_frame_seg_map;
int segment_id = get_segment_id(cm, map, BLOCK_64X64, mi_row, mi_col);
seg_skip = segfeature_active(seg, segment_id, SEG_LVL_SKIP);
}
@@ -2495,27 +2378,26 @@
seg_skip ? BLOCK_64X64 : sf->always_this_block_size;
set_offsets(cpi, tile_info, x, mi_row, mi_col, BLOCK_64X64);
set_fixed_partitioning(cpi, tile_info, mi, mi_row, mi_col, bsize);
- rd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
- BLOCK_64X64, &dummy_rate, &dummy_dist, 1, td->pc_root);
+ rd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col, BLOCK_64X64,
+ &dummy_rate, &dummy_dist, 1, td->pc_root);
} else if (cpi->partition_search_skippable_frame) {
BLOCK_SIZE bsize;
set_offsets(cpi, tile_info, x, mi_row, mi_col, BLOCK_64X64);
bsize = get_rd_var_based_fixed_partition(cpi, x, mi_row, mi_col);
set_fixed_partitioning(cpi, tile_info, mi, mi_row, mi_col, bsize);
- rd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
- BLOCK_64X64, &dummy_rate, &dummy_dist, 1, td->pc_root);
+ rd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col, BLOCK_64X64,
+ &dummy_rate, &dummy_dist, 1, td->pc_root);
} else if (sf->partition_search_type == VAR_BASED_PARTITION &&
cm->frame_type != KEY_FRAME) {
choose_partitioning(cpi, tile_info, x, mi_row, mi_col);
- rd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
- BLOCK_64X64, &dummy_rate, &dummy_dist, 1, td->pc_root);
+ rd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col, BLOCK_64X64,
+ &dummy_rate, &dummy_dist, 1, td->pc_root);
} else {
// If required set upper and lower partition size limits
if (sf->auto_min_max_partition_size) {
set_offsets(cpi, tile_info, x, mi_row, mi_col, BLOCK_64X64);
rd_auto_partition_range(cpi, tile_info, xd, mi_row, mi_col,
- &x->min_partition_size,
- &x->max_partition_size);
+ &x->min_partition_size, &x->max_partition_size);
}
rd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col, BLOCK_64X64,
&dummy_rdc, INT64_MAX, td->pc_root);
@@ -2537,8 +2419,7 @@
// Note: this memset assumes above_context[0], [1] and [2]
// are allocated as part of the same buffer.
memset(xd->above_context[0], 0,
- sizeof(*xd->above_context[0]) *
- 2 * aligned_mi_cols * MAX_MB_PLANE);
+ sizeof(*xd->above_context[0]) * 2 * aligned_mi_cols * MAX_MB_PLANE);
memset(xd->above_seg_context, 0,
sizeof(*xd->above_seg_context) * aligned_mi_cols);
}
@@ -2549,8 +2430,8 @@
if (segfeature_active(&cpi->common.seg, 1, SEG_LVL_REF_FRAME)) {
return 0;
} else {
- return (!!(ref_flags & VPX_GOLD_FLAG) + !!(ref_flags & VPX_LAST_FLAG)
- + !!(ref_flags & VPX_ALT_FLAG)) >= 2;
+ return (!!(ref_flags & VPX_GOLD_FLAG) + !!(ref_flags & VPX_LAST_FLAG) +
+ !!(ref_flags & VPX_ALT_FLAG)) >= 2;
}
}
@@ -2579,11 +2460,10 @@
}
static TX_MODE select_tx_mode(const VP10_COMP *cpi, MACROBLOCKD *const xd) {
- if (xd->lossless[0])
- return ONLY_4X4;
+ if (xd->lossless[0]) return ONLY_4X4;
if (cpi->sf.tx_size_search_method == USE_LARGESTALL)
return ALLOW_32X32;
- else if (cpi->sf.tx_size_search_method == USE_FULL_RD||
+ else if (cpi->sf.tx_size_search_method == USE_FULL_RD ||
cpi->sf.tx_size_search_method == USE_TX_8X8)
return TX_MODE_SELECT;
else
@@ -2599,10 +2479,9 @@
int tile_tok = 0;
if (cpi->tile_data == NULL || cpi->allocated_tiles < tile_cols * tile_rows) {
- if (cpi->tile_data != NULL)
- vpx_free(cpi->tile_data);
- CHECK_MEM_ERROR(cm, cpi->tile_data,
- vpx_malloc(tile_cols * tile_rows * sizeof(*cpi->tile_data)));
+ if (cpi->tile_data != NULL) vpx_free(cpi->tile_data);
+ CHECK_MEM_ERROR(cm, cpi->tile_data, vpx_malloc(tile_cols * tile_rows *
+ sizeof(*cpi->tile_data)));
cpi->allocated_tiles = tile_cols * tile_rows;
for (tile_row = 0; tile_row < tile_rows; ++tile_row)
@@ -2632,13 +2511,12 @@
}
}
-void vp10_encode_tile(VP10_COMP *cpi, ThreadData *td,
- int tile_row, int tile_col) {
+void vp10_encode_tile(VP10_COMP *cpi, ThreadData *td, int tile_row,
+ int tile_col) {
VP10_COMMON *const cm = &cpi->common;
const int tile_cols = 1 << cm->log2_tile_cols;
- TileDataEnc *this_tile =
- &cpi->tile_data[tile_row * tile_cols + tile_col];
- const TileInfo * const tile_info = &this_tile->tile_info;
+ TileDataEnc *this_tile = &cpi->tile_data[tile_row * tile_cols + tile_col];
+ const TileInfo *const tile_info = &this_tile->tile_info;
TOKENEXTRA *tok = cpi->tile_tok[tile_row][tile_col];
int mi_row;
@@ -2653,7 +2531,7 @@
cpi->tok_count[tile_row][tile_col] =
(unsigned int)(tok - cpi->tile_tok[tile_row][tile_col]);
assert(tok - cpi->tile_tok[tile_row][tile_col] <=
- allocated_tokens(*tile_info));
+ allocated_tokens(*tile_info));
}
static void encode_tiles(VP10_COMP *cpi) {
@@ -2673,10 +2551,9 @@
static int input_fpmb_stats(FIRSTPASS_MB_STATS *firstpass_mb_stats,
VP10_COMMON *cm, uint8_t **this_frame_mb_stats) {
uint8_t *mb_stats_in = firstpass_mb_stats->mb_stats_start +
- cm->current_video_frame * cm->MBs * sizeof(uint8_t);
+ cm->current_video_frame * cm->MBs * sizeof(uint8_t);
- if (mb_stats_in > firstpass_mb_stats->mb_stats_end)
- return EOF;
+ if (mb_stats_in > firstpass_mb_stats->mb_stats_end) return EOF;
*this_frame_mb_stats = mb_stats_in;
@@ -2703,17 +2580,14 @@
rdc->ex_search_count = 0; // Exhaustive mesh search hits.
for (i = 0; i < MAX_SEGMENTS; ++i) {
- const int qindex = CONFIG_MISC_FIXES && cm->seg.enabled ?
- vp10_get_qindex(&cm->seg, i, cm->base_qindex) :
- cm->base_qindex;
- xd->lossless[i] = qindex == 0 &&
- cm->y_dc_delta_q == 0 &&
- cm->uv_dc_delta_q == 0 &&
- cm->uv_ac_delta_q == 0;
+ const int qindex = CONFIG_MISC_FIXES && cm->seg.enabled
+ ? vp10_get_qindex(&cm->seg, i, cm->base_qindex)
+ : cm->base_qindex;
+ xd->lossless[i] = qindex == 0 && cm->y_dc_delta_q == 0 &&
+ cm->uv_dc_delta_q == 0 && cm->uv_ac_delta_q == 0;
}
- if (!cm->seg.enabled && xd->lossless[0])
- x->optimize = 0;
+ if (!cm->seg.enabled && xd->lossless[0]) x->optimize = 0;
cm->tx_mode = select_tx_mode(cpi, xd);
@@ -2722,15 +2596,13 @@
vp10_initialize_rd_consts(cpi);
vp10_initialize_me_consts(cpi, x, cm->base_qindex);
init_encode_frame_mb_context(cpi);
- cm->use_prev_frame_mvs = !cm->error_resilient_mode &&
- cm->width == cm->last_width &&
- cm->height == cm->last_height &&
- !cm->intra_only &&
- cm->last_show_frame;
+ cm->use_prev_frame_mvs =
+ !cm->error_resilient_mode && cm->width == cm->last_width &&
+ cm->height == cm->last_height && !cm->intra_only && cm->last_show_frame;
// Special case: set prev_mi to NULL when the previous mode info
// context cannot be used.
- cm->prev_mi = cm->use_prev_frame_mvs ?
- cm->prev_mip + cm->mi_stride + 1 : NULL;
+ cm->prev_mi =
+ cm->use_prev_frame_mvs ? cm->prev_mip + cm->mi_stride + 1 : NULL;
x->quant_fp = cpi->sf.use_quant_fp;
vp10_zero(x->skip_txfm);
@@ -2740,10 +2612,10 @@
vpx_usec_timer_start(&emr_timer);
#if CONFIG_FP_MB_STATS
- if (cpi->use_fp_mb_stats) {
- input_fpmb_stats(&cpi->twopass.firstpass_mb_stats, cm,
- &cpi->twopass.this_frame_mb_stats);
- }
+ if (cpi->use_fp_mb_stats) {
+ input_fpmb_stats(&cpi->twopass.firstpass_mb_stats, cm,
+ &cpi->twopass.this_frame_mb_stats);
+ }
#endif
// If allowed, encoding tiles in parallel with one thread handling one tile.
@@ -2764,8 +2636,7 @@
static INTERP_FILTER get_interp_filter(
const int64_t threshes[SWITCHABLE_FILTER_CONTEXTS], int is_alt_ref) {
- if (!is_alt_ref &&
- threshes[EIGHTTAP_SMOOTH] > threshes[EIGHTTAP] &&
+ if (!is_alt_ref && threshes[EIGHTTAP_SMOOTH] > threshes[EIGHTTAP] &&
threshes[EIGHTTAP_SMOOTH] > threshes[EIGHTTAP_SHARP] &&
threshes[EIGHTTAP_SMOOTH] > threshes[SWITCHABLE - 1]) {
return EIGHTTAP_SMOOTH;
@@ -2790,9 +2661,9 @@
// the other two.
if (!frame_is_intra_only(cm)) {
if ((cm->ref_frame_sign_bias[ALTREF_FRAME] ==
- cm->ref_frame_sign_bias[GOLDEN_FRAME]) ||
+ cm->ref_frame_sign_bias[GOLDEN_FRAME]) ||
(cm->ref_frame_sign_bias[ALTREF_FRAME] ==
- cm->ref_frame_sign_bias[LAST_FRAME])) {
+ cm->ref_frame_sign_bias[LAST_FRAME])) {
cpi->allow_comp_inter_inter = 0;
} else {
cpi->allow_comp_inter_inter = 1;
@@ -2826,10 +2697,8 @@
if (is_alt_ref || !cpi->allow_comp_inter_inter)
cm->reference_mode = SINGLE_REFERENCE;
else if (mode_thrs[COMPOUND_REFERENCE] > mode_thrs[SINGLE_REFERENCE] &&
- mode_thrs[COMPOUND_REFERENCE] >
- mode_thrs[REFERENCE_MODE_SELECT] &&
- check_dual_ref_flags(cpi) &&
- cpi->static_mb_pct == 100)
+ mode_thrs[COMPOUND_REFERENCE] > mode_thrs[REFERENCE_MODE_SELECT] &&
+ check_dual_ref_flags(cpi) && cpi->static_mb_pct == 100)
cm->reference_mode = COMPOUND_REFERENCE;
else if (mode_thrs[SINGLE_REFERENCE] > mode_thrs[REFERENCE_MODE_SELECT])
cm->reference_mode = SINGLE_REFERENCE;
@@ -2941,18 +2810,17 @@
++counts->uv_mode[y_mode][uv_mode];
}
-static void encode_superblock(VP10_COMP *cpi, ThreadData *td,
- TOKENEXTRA **t, int output_enabled,
- int mi_row, int mi_col, BLOCK_SIZE bsize,
- PICK_MODE_CONTEXT *ctx) {
+static void encode_superblock(VP10_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
+ int output_enabled, int mi_row, int mi_col,
+ BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx) {
VP10_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &td->mb;
MACROBLOCKD *const xd = &x->e_mbd;
MODE_INFO **mi_8x8 = xd->mi;
MODE_INFO *mi = mi_8x8[0];
MB_MODE_INFO *mbmi = &mi->mbmi;
- const int seg_skip = segfeature_active(&cm->seg, mbmi->segment_id,
- SEG_LVL_SKIP);
+ const int seg_skip =
+ segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP);
const int mis = cm->mi_stride;
const int mi_width = num_8x8_blocks_wide_lookup[bsize];
const int mi_height = num_8x8_blocks_high_lookup[bsize];
@@ -2962,8 +2830,7 @@
cpi->oxcf.aq_mode != CYCLIC_REFRESH_AQ &&
cpi->sf.allow_skip_recode;
- if (!x->skip_recode)
- memset(x->skip_txfm, 0, sizeof(x->skip_txfm));
+ if (!x->skip_recode) memset(x->skip_txfm, 0, sizeof(x->skip_txfm));
x->skip_optimize = ctx->is_coded;
ctx->is_coded = 1;
@@ -2983,11 +2850,10 @@
const int is_compound = has_second_ref(mbmi);
set_ref_ptrs(cm, xd, mbmi->ref_frame[0], mbmi->ref_frame[1]);
for (ref = 0; ref < 1 + is_compound; ++ref) {
- YV12_BUFFER_CONFIG *cfg = get_ref_frame_buffer(cpi,
- mbmi->ref_frame[ref]);
+ YV12_BUFFER_CONFIG *cfg = get_ref_frame_buffer(cpi, mbmi->ref_frame[ref]);
assert(cfg != NULL);
vp10_setup_pre_planes(xd, ref, cfg, mi_row, mi_col,
- &xd->block_refs[ref]->sf);
+ &xd->block_refs[ref]->sf);
}
if (!(cpi->sf.reuse_inter_pred_sby && ctx->pred_pixel_ready) || seg_skip)
vp10_build_inter_predictors_sby(xd, mi_row, mi_col,
@@ -3001,8 +2867,7 @@
}
if (output_enabled) {
- if (cm->tx_mode == TX_MODE_SELECT &&
- mbmi->sb_type >= BLOCK_8X8 &&
+ if (cm->tx_mode == TX_MODE_SELECT && mbmi->sb_type >= BLOCK_8X8 &&
!(is_inter_block(mbmi) && (mbmi->skip || seg_skip))) {
++get_tx_counts(max_txsize_lookup[bsize], get_tx_size_context(xd),
&td->counts->tx)[mbmi->tx_size];
@@ -3024,15 +2889,14 @@
}
++td->counts->tx.tx_totals[mbmi->tx_size];
++td->counts->tx.tx_totals[get_uv_tx_size(mbmi, &xd->plane[1])];
- if (mbmi->tx_size < TX_32X32 &&
- cm->base_qindex > 0 && !mbmi->skip &&
+ if (mbmi->tx_size < TX_32X32 && cm->base_qindex > 0 && !mbmi->skip &&
!segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
if (is_inter_block(mbmi)) {
++td->counts->inter_ext_tx[mbmi->tx_size][mbmi->tx_type];
} else {
- ++td->counts->intra_ext_tx[mbmi->tx_size]
- [intra_mode_to_tx_type_context[mbmi->mode]]
- [mbmi->tx_type];
+ ++td->counts
+ ->intra_ext_tx[mbmi->tx_size][intra_mode_to_tx_type_context
+ [mbmi->mode]][mbmi->tx_type];
}
}
}
diff --git a/vp10/encoder/encodeframe.h b/vp10/encoder/encodeframe.h
index fbb81f8..f24a572 100644
--- a/vp10/encoder/encodeframe.h
+++ b/vp10/encoder/encodeframe.h
@@ -8,7 +8,6 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
#ifndef VP10_ENCODER_ENCODEFRAME_H_
#define VP10_ENCODER_ENCODEFRAME_H_
@@ -31,14 +30,14 @@
#define VAR_HIST_SMALL_CUT_OFF 45
void vp10_setup_src_planes(struct macroblock *x,
- const struct yv12_buffer_config *src,
- int mi_row, int mi_col);
+ const struct yv12_buffer_config *src, int mi_row,
+ int mi_col);
void vp10_encode_frame(struct VP10_COMP *cpi);
void vp10_init_tile_data(struct VP10_COMP *cpi);
void vp10_encode_tile(struct VP10_COMP *cpi, struct ThreadData *td,
- int tile_row, int tile_col);
+ int tile_row, int tile_col);
void vp10_set_variance_partition_thresholds(struct VP10_COMP *cpi, int q);
diff --git a/vp10/encoder/encodemb.c b/vp10/encoder/encodemb.c
index 0a23d35..b2fbf13 100644
--- a/vp10/encoder/encodemb.c
+++ b/vp10/encoder/encodemb.c
@@ -8,7 +8,6 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
#include "./vp10_rtcd.h"
#include "./vpx_config.h"
#include "./vpx_dsp_rtcd.h"
@@ -53,32 +52,30 @@
#define RDTRUNC(RM, DM, R, D) ((128 + (R) * (RM)) & 0xFF)
typedef struct vp10_token_state {
- int rate;
- int error;
- int next;
- int16_t token;
- short qc;
+ int rate;
+ int error;
+ int next;
+ int16_t token;
+ short qc;
} vp10_token_state;
// TODO(jimbankoski): experiment to find optimal RD numbers.
static const int plane_rd_mult[PLANE_TYPES] = { 4, 2 };
-#define UPDATE_RD_COST()\
-{\
- rd_cost0 = RDCOST(rdmult, rddiv, rate0, error0);\
- rd_cost1 = RDCOST(rdmult, rddiv, rate1, error1);\
- if (rd_cost0 == rd_cost1) {\
- rd_cost0 = RDTRUNC(rdmult, rddiv, rate0, error0);\
- rd_cost1 = RDTRUNC(rdmult, rddiv, rate1, error1);\
- }\
-}
+#define UPDATE_RD_COST() \
+ { \
+ rd_cost0 = RDCOST(rdmult, rddiv, rate0, error0); \
+ rd_cost1 = RDCOST(rdmult, rddiv, rate1, error1); \
+ if (rd_cost0 == rd_cost1) { \
+ rd_cost0 = RDTRUNC(rdmult, rddiv, rate0, error0); \
+ rd_cost1 = RDTRUNC(rdmult, rddiv, rate1, error1); \
+ } \
+ }
// This function is a place holder for now but may ultimately need
// to scan previous tokens to work out the correct context.
-static int trellis_get_coeff_context(const int16_t *scan,
- const int16_t *nb,
- int idx, int token,
- uint8_t *token_cache) {
+static int trellis_get_coeff_context(const int16_t *scan, const int16_t *nb,
+ int idx, int token, uint8_t *token_cache) {
int bak = token_cache[scan[idx]], pt;
token_cache[scan[idx]] = vp10_pt_energy_class[token];
pt = get_coef_context(nb, token_cache, idx + 1);
@@ -86,8 +83,8 @@
return pt;
}
-static int optimize_b(MACROBLOCK *mb, int plane, int block,
- TX_SIZE tx_size, int ctx) {
+static int optimize_b(MACROBLOCK *mb, int plane, int block, TX_SIZE tx_size,
+ int ctx) {
MACROBLOCKD *const xd = &mb->e_mbd;
struct macroblock_plane *const p = &mb->plane[plane];
struct macroblockd_plane *const pd = &xd->plane[plane];
@@ -125,8 +122,7 @@
assert(eob <= default_eob);
/* Now set up a Viterbi trellis to evaluate alternative roundings. */
- if (!ref)
- rdmult = (rdmult * 9) >> 4;
+ if (!ref) rdmult = (rdmult * 9) >> 4;
/* Initialize the sentinel node of the trellis. */
tokens[eob][0].rate = 0;
@@ -157,10 +153,12 @@
if (next < default_eob) {
band = band_translate[i + 1];
pt = trellis_get_coeff_context(scan, nb, i, t0, token_cache);
- rate0 += mb->token_costs[tx_size][type][ref][band][0][pt]
- [tokens[next][0].token];
- rate1 += mb->token_costs[tx_size][type][ref][band][0][pt]
- [tokens[next][1].token];
+ rate0 +=
+ mb->token_costs[tx_size][type][ref][band][0][pt][tokens[next][0]
+ .token];
+ rate1 +=
+ mb->token_costs[tx_size][type][ref][band][0][pt][tokens[next][1]
+ .token];
}
UPDATE_RD_COST();
/* And pick the best. */
@@ -185,8 +183,8 @@
rate1 = tokens[next][1].rate;
if ((abs(x) * dequant_ptr[rc != 0] > abs(coeff[rc]) * mul) &&
- (abs(x) * dequant_ptr[rc != 0] < abs(coeff[rc]) * mul +
- dequant_ptr[rc != 0]))
+ (abs(x) * dequant_ptr[rc != 0] <
+ abs(coeff[rc]) * mul + dequant_ptr[rc != 0]))
shortcut = 1;
else
shortcut = 0;
@@ -212,13 +210,15 @@
band = band_translate[i + 1];
if (t0 != EOB_TOKEN) {
pt = trellis_get_coeff_context(scan, nb, i, t0, token_cache);
- rate0 += mb->token_costs[tx_size][type][ref][band][!x][pt]
- [tokens[next][0].token];
+ rate0 +=
+ mb->token_costs[tx_size][type][ref][band][!x][pt][tokens[next][0]
+ .token];
}
if (t1 != EOB_TOKEN) {
pt = trellis_get_coeff_context(scan, nb, i, t1, token_cache);
- rate1 += mb->token_costs[tx_size][type][ref][band][!x][pt]
- [tokens[next][1].token];
+ rate1 +=
+ mb->token_costs[tx_size][type][ref][band][!x][pt][tokens[next][1]
+ .token];
}
}
@@ -304,9 +304,8 @@
return final_eob;
}
-static INLINE void fdct32x32(int rd_transform,
- const int16_t *src, tran_low_t *dst,
- int src_stride) {
+static INLINE void fdct32x32(int rd_transform, const int16_t *src,
+ tran_low_t *dst, int src_stride) {
if (rd_transform)
vpx_fdct32x32_rd(src, dst, src_stride);
else
@@ -329,17 +328,11 @@
vp10_fwht4x4(src_diff, coeff, diff_stride);
} else {
switch (tx_type) {
- case DCT_DCT:
- vpx_fdct4x4(src_diff, coeff, diff_stride);
- break;
+ case DCT_DCT: vpx_fdct4x4(src_diff, coeff, diff_stride); break;
case ADST_DCT:
case DCT_ADST:
- case ADST_ADST:
- vp10_fht4x4(src_diff, coeff, diff_stride, tx_type);
- break;
- default:
- assert(0);
- break;
+ case ADST_ADST: vp10_fht4x4(src_diff, coeff, diff_stride, tx_type); break;
+ default: assert(0); break;
}
}
}
@@ -350,12 +343,8 @@
case DCT_DCT:
case ADST_DCT:
case DCT_ADST:
- case ADST_ADST:
- vp10_fht8x8(src_diff, coeff, diff_stride, tx_type);
- break;
- default:
- assert(0);
- break;
+ case ADST_ADST: vp10_fht8x8(src_diff, coeff, diff_stride, tx_type); break;
+ default: assert(0); break;
}
}
@@ -365,12 +354,8 @@
case DCT_DCT:
case ADST_DCT:
case DCT_ADST:
- case ADST_ADST:
- vp10_fht16x16(src_diff, coeff, diff_stride, tx_type);
- break;
- default:
- assert(0);
- break;
+ case ADST_ADST: vp10_fht16x16(src_diff, coeff, diff_stride, tx_type); break;
+ default: assert(0); break;
}
}
@@ -378,17 +363,11 @@
tran_low_t *coeff, int diff_stride,
TX_TYPE tx_type) {
switch (tx_type) {
- case DCT_DCT:
- fdct32x32(rd_transform, src_diff, coeff, diff_stride);
- break;
+ case DCT_DCT: fdct32x32(rd_transform, src_diff, coeff, diff_stride); break;
case ADST_DCT:
case DCT_ADST:
- case ADST_ADST:
- assert(0);
- break;
- default:
- assert(0);
- break;
+ case ADST_ADST: assert(0); break;
+ default: assert(0); break;
}
}
@@ -400,52 +379,40 @@
vp10_highbd_fwht4x4(src_diff, coeff, diff_stride);
} else {
switch (tx_type) {
- case DCT_DCT:
- vpx_highbd_fdct4x4(src_diff, coeff, diff_stride);
- break;
+ case DCT_DCT: vpx_highbd_fdct4x4(src_diff, coeff, diff_stride); break;
case ADST_DCT:
case DCT_ADST:
case ADST_ADST:
vp10_highbd_fht4x4(src_diff, coeff, diff_stride, tx_type);
break;
- default:
- assert(0);
- break;
+ default: assert(0); break;
}
}
}
static void highbd_fwd_txfm_8x8(const int16_t *src_diff, tran_low_t *coeff,
- int diff_stride, TX_TYPE tx_type) {
+ int diff_stride, TX_TYPE tx_type) {
switch (tx_type) {
- case DCT_DCT:
- vpx_highbd_fdct8x8(src_diff, coeff, diff_stride);
- break;
+ case DCT_DCT: vpx_highbd_fdct8x8(src_diff, coeff, diff_stride); break;
case ADST_DCT:
case DCT_ADST:
case ADST_ADST:
vp10_highbd_fht8x8(src_diff, coeff, diff_stride, tx_type);
break;
- default:
- assert(0);
- break;
+ default: assert(0); break;
}
}
static void highbd_fwd_txfm_16x16(const int16_t *src_diff, tran_low_t *coeff,
- int diff_stride, TX_TYPE tx_type) {
+ int diff_stride, TX_TYPE tx_type) {
switch (tx_type) {
- case DCT_DCT:
- vpx_highbd_fdct16x16(src_diff, coeff, diff_stride);
- break;
+ case DCT_DCT: vpx_highbd_fdct16x16(src_diff, coeff, diff_stride); break;
case ADST_DCT:
case DCT_ADST:
case ADST_ADST:
vp10_highbd_fht16x16(src_diff, coeff, diff_stride, tx_type);
break;
- default:
- assert(0);
- break;
+ default: assert(0); break;
}
}
@@ -458,19 +425,14 @@
break;
case ADST_DCT:
case DCT_ADST:
- case ADST_ADST:
- assert(0);
- break;
- default:
- assert(0);
- break;
+ case ADST_ADST: assert(0); break;
+ default: assert(0); break;
}
}
#endif // CONFIG_VPX_HIGHBITDEPTH
-void vp10_xform_quant_fp(MACROBLOCK *x, int plane, int block,
- int blk_row, int blk_col,
- BLOCK_SIZE plane_bsize, TX_SIZE tx_size) {
+void vp10_xform_quant_fp(MACROBLOCK *x, int plane, int block, int blk_row,
+ int blk_col, BLOCK_SIZE plane_bsize, TX_SIZE tx_size) {
MACROBLOCKD *const xd = &x->e_mbd;
const struct macroblock_plane *const p = &x->plane[plane];
const struct macroblockd_plane *const pd = &xd->plane[plane];
@@ -491,24 +453,23 @@
case TX_32X32:
highbd_fdct32x32(x->use_lp32x32fdct, src_diff, coeff, diff_stride);
vp10_highbd_quantize_fp_32x32(coeff, 1024, x->skip_block, p->zbin,
- p->round_fp, p->quant_fp, p->quant_shift,
- qcoeff, dqcoeff, pd->dequant,
- eob, scan_order->scan,
- scan_order->iscan);
+ p->round_fp, p->quant_fp, p->quant_shift,
+ qcoeff, dqcoeff, pd->dequant, eob,
+ scan_order->scan, scan_order->iscan);
break;
case TX_16X16:
vpx_highbd_fdct16x16(src_diff, coeff, diff_stride);
vp10_highbd_quantize_fp(coeff, 256, x->skip_block, p->zbin, p->round_fp,
- p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, eob,
- scan_order->scan, scan_order->iscan);
+ p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
+ pd->dequant, eob, scan_order->scan,
+ scan_order->iscan);
break;
case TX_8X8:
vpx_highbd_fdct8x8(src_diff, coeff, diff_stride);
vp10_highbd_quantize_fp(coeff, 64, x->skip_block, p->zbin, p->round_fp,
- p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, eob,
- scan_order->scan, scan_order->iscan);
+ p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
+ pd->dequant, eob, scan_order->scan,
+ scan_order->iscan);
break;
case TX_4X4:
if (xd->lossless[xd->mi[0]->mbmi.segment_id]) {
@@ -517,12 +478,11 @@
vpx_highbd_fdct4x4(src_diff, coeff, diff_stride);
}
vp10_highbd_quantize_fp(coeff, 16, x->skip_block, p->zbin, p->round_fp,
- p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, eob,
- scan_order->scan, scan_order->iscan);
+ p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
+ pd->dequant, eob, scan_order->scan,
+ scan_order->iscan);
break;
- default:
- assert(0);
+ default: assert(0);
}
return;
}
@@ -532,23 +492,21 @@
case TX_32X32:
fdct32x32(x->use_lp32x32fdct, src_diff, coeff, diff_stride);
vp10_quantize_fp_32x32(coeff, 1024, x->skip_block, p->zbin, p->round_fp,
- p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, eob, scan_order->scan,
- scan_order->iscan);
+ p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
+ pd->dequant, eob, scan_order->scan,
+ scan_order->iscan);
break;
case TX_16X16:
vpx_fdct16x16(src_diff, coeff, diff_stride);
vp10_quantize_fp(coeff, 256, x->skip_block, p->zbin, p->round_fp,
- p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, eob,
- scan_order->scan, scan_order->iscan);
+ p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
+ pd->dequant, eob, scan_order->scan, scan_order->iscan);
break;
case TX_8X8:
- vp10_fdct8x8_quant(src_diff, diff_stride, coeff, 64,
- x->skip_block, p->zbin, p->round_fp,
- p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, eob,
- scan_order->scan, scan_order->iscan);
+ vp10_fdct8x8_quant(src_diff, diff_stride, coeff, 64, x->skip_block,
+ p->zbin, p->round_fp, p->quant_fp, p->quant_shift,
+ qcoeff, dqcoeff, pd->dequant, eob, scan_order->scan,
+ scan_order->iscan);
break;
case TX_4X4:
if (xd->lossless[xd->mi[0]->mbmi.segment_id]) {
@@ -557,19 +515,15 @@
vpx_fdct4x4(src_diff, coeff, diff_stride);
}
vp10_quantize_fp(coeff, 16, x->skip_block, p->zbin, p->round_fp,
- p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, eob,
- scan_order->scan, scan_order->iscan);
+ p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
+ pd->dequant, eob, scan_order->scan, scan_order->iscan);
break;
- default:
- assert(0);
- break;
+ default: assert(0); break;
}
}
-void vp10_xform_quant_dc(MACROBLOCK *x, int plane, int block,
- int blk_row, int blk_col,
- BLOCK_SIZE plane_bsize, TX_SIZE tx_size) {
+void vp10_xform_quant_dc(MACROBLOCK *x, int plane, int block, int blk_row,
+ int blk_col, BLOCK_SIZE plane_bsize, TX_SIZE tx_size) {
MACROBLOCKD *const xd = &x->e_mbd;
const struct macroblock_plane *const p = &x->plane[plane];
const struct macroblockd_plane *const pd = &xd->plane[plane];
@@ -593,14 +547,14 @@
case TX_16X16:
vpx_highbd_fdct16x16_1(src_diff, coeff, diff_stride);
vpx_highbd_quantize_dc(coeff, 256, x->skip_block, p->round,
- p->quant_fp[0], qcoeff, dqcoeff,
- pd->dequant[0], eob);
+ p->quant_fp[0], qcoeff, dqcoeff, pd->dequant[0],
+ eob);
break;
case TX_8X8:
vpx_highbd_fdct8x8_1(src_diff, coeff, diff_stride);
vpx_highbd_quantize_dc(coeff, 64, x->skip_block, p->round,
- p->quant_fp[0], qcoeff, dqcoeff,
- pd->dequant[0], eob);
+ p->quant_fp[0], qcoeff, dqcoeff, pd->dequant[0],
+ eob);
break;
case TX_4X4:
if (xd->lossless[xd->mi[0]->mbmi.segment_id]) {
@@ -609,11 +563,10 @@
vpx_highbd_fdct4x4(src_diff, coeff, diff_stride);
}
vpx_highbd_quantize_dc(coeff, 16, x->skip_block, p->round,
- p->quant_fp[0], qcoeff, dqcoeff,
- pd->dequant[0], eob);
+ p->quant_fp[0], qcoeff, dqcoeff, pd->dequant[0],
+ eob);
break;
- default:
- assert(0);
+ default: assert(0);
}
return;
}
@@ -622,21 +575,18 @@
switch (tx_size) {
case TX_32X32:
vpx_fdct32x32_1(src_diff, coeff, diff_stride);
- vpx_quantize_dc_32x32(coeff, x->skip_block, p->round,
- p->quant_fp[0], qcoeff, dqcoeff,
- pd->dequant[0], eob);
+ vpx_quantize_dc_32x32(coeff, x->skip_block, p->round, p->quant_fp[0],
+ qcoeff, dqcoeff, pd->dequant[0], eob);
break;
case TX_16X16:
vpx_fdct16x16_1(src_diff, coeff, diff_stride);
- vpx_quantize_dc(coeff, 256, x->skip_block, p->round,
- p->quant_fp[0], qcoeff, dqcoeff,
- pd->dequant[0], eob);
+ vpx_quantize_dc(coeff, 256, x->skip_block, p->round, p->quant_fp[0],
+ qcoeff, dqcoeff, pd->dequant[0], eob);
break;
case TX_8X8:
vpx_fdct8x8_1(src_diff, coeff, diff_stride);
- vpx_quantize_dc(coeff, 64, x->skip_block, p->round,
- p->quant_fp[0], qcoeff, dqcoeff,
- pd->dequant[0], eob);
+ vpx_quantize_dc(coeff, 64, x->skip_block, p->round, p->quant_fp[0],
+ qcoeff, dqcoeff, pd->dequant[0], eob);
break;
case TX_4X4:
if (xd->lossless[xd->mi[0]->mbmi.segment_id]) {
@@ -644,21 +594,15 @@
} else {
vpx_fdct4x4(src_diff, coeff, diff_stride);
}
- vpx_quantize_dc(coeff, 16, x->skip_block, p->round,
- p->quant_fp[0], qcoeff, dqcoeff,
- pd->dequant[0], eob);
+ vpx_quantize_dc(coeff, 16, x->skip_block, p->round, p->quant_fp[0],
+ qcoeff, dqcoeff, pd->dequant[0], eob);
break;
- default:
- assert(0);
- break;
+ default: assert(0); break;
}
}
-
-
-void vp10_xform_quant(MACROBLOCK *x, int plane, int block,
- int blk_row, int blk_col,
- BLOCK_SIZE plane_bsize, TX_SIZE tx_size) {
+void vp10_xform_quant(MACROBLOCK *x, int plane, int block, int blk_row,
+ int blk_col, BLOCK_SIZE plane_bsize, TX_SIZE tx_size) {
MACROBLOCKD *const xd = &x->e_mbd;
const struct macroblock_plane *const p = &x->plane[plane];
const struct macroblockd_plane *const pd = &xd->plane[plane];
@@ -675,39 +619,38 @@
#if CONFIG_VPX_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
- switch (tx_size) {
+ switch (tx_size) {
case TX_32X32:
highbd_fwd_txfm_32x32(x->use_lp32x32fdct, src_diff, coeff, diff_stride,
- tx_type);
+ tx_type);
vpx_highbd_quantize_b_32x32(coeff, 1024, x->skip_block, p->zbin,
p->round, p->quant, p->quant_shift, qcoeff,
- dqcoeff, pd->dequant, eob,
- scan_order->scan, scan_order->iscan);
+ dqcoeff, pd->dequant, eob, scan_order->scan,
+ scan_order->iscan);
break;
case TX_16X16:
highbd_fwd_txfm_16x16(src_diff, coeff, diff_stride, tx_type);
vpx_highbd_quantize_b(coeff, 256, x->skip_block, p->zbin, p->round,
p->quant, p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, eob,
- scan_order->scan, scan_order->iscan);
+ pd->dequant, eob, scan_order->scan,
+ scan_order->iscan);
break;
case TX_8X8:
highbd_fwd_txfm_8x8(src_diff, coeff, diff_stride, tx_type);
vpx_highbd_quantize_b(coeff, 64, x->skip_block, p->zbin, p->round,
p->quant, p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, eob,
- scan_order->scan, scan_order->iscan);
+ pd->dequant, eob, scan_order->scan,
+ scan_order->iscan);
break;
case TX_4X4:
vp10_highbd_fwd_txfm_4x4(src_diff, coeff, diff_stride, tx_type,
xd->lossless[xd->mi[0]->mbmi.segment_id]);
vpx_highbd_quantize_b(coeff, 16, x->skip_block, p->zbin, p->round,
p->quant, p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, eob,
- scan_order->scan, scan_order->iscan);
+ pd->dequant, eob, scan_order->scan,
+ scan_order->iscan);
break;
- default:
- assert(0);
+ default: assert(0);
}
return;
}
@@ -723,35 +666,29 @@
break;
case TX_16X16:
fwd_txfm_16x16(src_diff, coeff, diff_stride, tx_type);
- vpx_quantize_b(coeff, 256, x->skip_block, p->zbin, p->round,
- p->quant, p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, eob,
+ vpx_quantize_b(coeff, 256, x->skip_block, p->zbin, p->round, p->quant,
+ p->quant_shift, qcoeff, dqcoeff, pd->dequant, eob,
scan_order->scan, scan_order->iscan);
break;
case TX_8X8:
fwd_txfm_8x8(src_diff, coeff, diff_stride, tx_type);
- vpx_quantize_b(coeff, 64, x->skip_block, p->zbin, p->round,
- p->quant, p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, eob,
+ vpx_quantize_b(coeff, 64, x->skip_block, p->zbin, p->round, p->quant,
+ p->quant_shift, qcoeff, dqcoeff, pd->dequant, eob,
scan_order->scan, scan_order->iscan);
break;
case TX_4X4:
vp10_fwd_txfm_4x4(src_diff, coeff, diff_stride, tx_type,
xd->lossless[xd->mi[0]->mbmi.segment_id]);
- vpx_quantize_b(coeff, 16, x->skip_block, p->zbin, p->round,
- p->quant, p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, eob,
+ vpx_quantize_b(coeff, 16, x->skip_block, p->zbin, p->round, p->quant,
+ p->quant_shift, qcoeff, dqcoeff, pd->dequant, eob,
scan_order->scan, scan_order->iscan);
break;
- default:
- assert(0);
- break;
+ default: assert(0); break;
}
}
static void encode_block(int plane, int block, int blk_row, int blk_col,
- BLOCK_SIZE plane_bsize,
- TX_SIZE tx_size, void *arg) {
+ BLOCK_SIZE plane_bsize, TX_SIZE tx_size, void *arg) {
struct encode_b_args *const args = arg;
MACROBLOCK *const x = args->x;
MACROBLOCKD *const xd = &x->e_mbd;
@@ -783,20 +720,20 @@
*a = *l = 0;
return;
} else {
- vp10_xform_quant_fp(x, plane, block, blk_row, blk_col,
- plane_bsize, tx_size);
+ vp10_xform_quant_fp(x, plane, block, blk_row, blk_col, plane_bsize,
+ tx_size);
}
} else {
if (max_txsize_lookup[plane_bsize] == tx_size) {
int txfm_blk_index = (plane << 2) + (block >> (tx_size << 1));
if (x->skip_txfm[txfm_blk_index] == SKIP_TXFM_NONE) {
// full forward transform and quantization
- vp10_xform_quant(x, plane, block, blk_row, blk_col,
- plane_bsize, tx_size);
+ vp10_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize,
+ tx_size);
} else if (x->skip_txfm[txfm_blk_index] == SKIP_TXFM_AC_ONLY) {
// fast path forward transform and quantization
- vp10_xform_quant_dc(x, plane, block, blk_row, blk_col,
- plane_bsize, tx_size);
+ vp10_xform_quant_dc(x, plane, block, blk_row, blk_col, plane_bsize,
+ tx_size);
} else {
// skip forward transform
p->eobs[block] = 0;
@@ -804,8 +741,8 @@
return;
}
} else {
- vp10_xform_quant(x, plane, block, blk_row, blk_col,
- plane_bsize, tx_size);
+ vp10_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize,
+ tx_size);
}
}
}
@@ -817,11 +754,9 @@
*a = *l = p->eobs[block] > 0;
}
- if (p->eobs[block])
- *(args->skip) = 0;
+ if (p->eobs[block]) *(args->skip) = 0;
- if (p->eobs[block] == 0)
- return;
+ if (p->eobs[block] == 0) return;
#if CONFIG_VPX_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
switch (tx_size) {
@@ -845,9 +780,7 @@
p->eobs[block], xd->bd, tx_type,
xd->lossless[xd->mi[0]->mbmi.segment_id]);
break;
- default:
- assert(0 && "Invalid transform size");
- break;
+ default: assert(0 && "Invalid transform size"); break;
}
return;
@@ -874,15 +807,13 @@
vp10_inv_txfm_add_4x4(dqcoeff, dst, pd->dst.stride, p->eobs[block],
tx_type, xd->lossless[xd->mi[0]->mbmi.segment_id]);
break;
- default:
- assert(0 && "Invalid transform size");
- break;
+ default: assert(0 && "Invalid transform size"); break;
}
}
static void encode_block_pass1(int plane, int block, int blk_row, int blk_col,
- BLOCK_SIZE plane_bsize,
- TX_SIZE tx_size, void *arg) {
+ BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
+ void *arg) {
MACROBLOCK *const x = (MACROBLOCK *)arg;
MACROBLOCKD *const xd = &x->e_mbd;
struct macroblock_plane *const p = &x->plane[plane];
@@ -897,11 +828,11 @@
#if CONFIG_VPX_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
if (xd->lossless[0]) {
- vp10_highbd_iwht4x4_add(dqcoeff, dst, pd->dst.stride,
- p->eobs[block], xd->bd);
+ vp10_highbd_iwht4x4_add(dqcoeff, dst, pd->dst.stride, p->eobs[block],
+ xd->bd);
} else {
- vp10_highbd_idct4x4_add(dqcoeff, dst, pd->dst.stride,
- p->eobs[block], xd->bd);
+ vp10_highbd_idct4x4_add(dqcoeff, dst, pd->dst.stride, p->eobs[block],
+ xd->bd);
}
return;
}
@@ -917,41 +848,39 @@
void vp10_encode_sby_pass1(MACROBLOCK *x, BLOCK_SIZE bsize) {
vp10_subtract_plane(x, bsize, 0);
vp10_foreach_transformed_block_in_plane(&x->e_mbd, bsize, 0,
- encode_block_pass1, x);
+ encode_block_pass1, x);
}
void vp10_encode_sb(MACROBLOCK *x, BLOCK_SIZE bsize) {
MACROBLOCKD *const xd = &x->e_mbd;
struct optimize_ctx ctx;
MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
- struct encode_b_args arg = {x, &ctx, &mbmi->skip};
+ struct encode_b_args arg = { x, &ctx, &mbmi->skip };
int plane;
mbmi->skip = 1;
- if (x->skip)
- return;
+ if (x->skip) return;
for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
- if (!x->skip_recode)
- vp10_subtract_plane(x, bsize, plane);
+ if (!x->skip_recode) vp10_subtract_plane(x, bsize, plane);
if (x->optimize && (!x->skip_recode || !x->skip_optimize)) {
- const struct macroblockd_plane* const pd = &xd->plane[plane];
+ const struct macroblockd_plane *const pd = &xd->plane[plane];
const TX_SIZE tx_size = plane ? get_uv_tx_size(mbmi, pd) : mbmi->tx_size;
- vp10_get_entropy_contexts(bsize, tx_size, pd,
- ctx.ta[plane], ctx.tl[plane]);
+ vp10_get_entropy_contexts(bsize, tx_size, pd, ctx.ta[plane],
+ ctx.tl[plane]);
}
vp10_foreach_transformed_block_in_plane(xd, bsize, plane, encode_block,
- &arg);
+ &arg);
}
}
void vp10_encode_block_intra(int plane, int block, int blk_row, int blk_col,
- BLOCK_SIZE plane_bsize,
- TX_SIZE tx_size, void *arg) {
- struct encode_b_args* const args = arg;
+ BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
+ void *arg) {
+ struct encode_b_args *const args = arg;
MACROBLOCK *const x = args->x;
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
@@ -977,16 +906,16 @@
src_diff = &p->src_diff[4 * (blk_row * diff_stride + blk_col)];
mode = plane == 0 ? get_y_mode(xd->mi[0], block) : mbmi->uv_mode;
- vp10_predict_intra_block(xd, bwl, bhl, tx_size, mode, dst, dst_stride,
- dst, dst_stride, blk_col, blk_row, plane);
+ vp10_predict_intra_block(xd, bwl, bhl, tx_size, mode, dst, dst_stride, dst,
+ dst_stride, blk_col, blk_row, plane);
#if CONFIG_VPX_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
switch (tx_size) {
case TX_32X32:
if (!x->skip_recode) {
- vpx_highbd_subtract_block(32, 32, src_diff, diff_stride,
- src, src_stride, dst, dst_stride, xd->bd);
+ vpx_highbd_subtract_block(32, 32, src_diff, diff_stride, src,
+ src_stride, dst, dst_stride, xd->bd);
highbd_fwd_txfm_32x32(x->use_lp32x32fdct, src_diff, coeff,
diff_stride, tx_type);
vpx_highbd_quantize_b_32x32(coeff, 1024, x->skip_block, p->zbin,
@@ -1000,13 +929,13 @@
break;
case TX_16X16:
if (!x->skip_recode) {
- vpx_highbd_subtract_block(16, 16, src_diff, diff_stride,
- src, src_stride, dst, dst_stride, xd->bd);
+ vpx_highbd_subtract_block(16, 16, src_diff, diff_stride, src,
+ src_stride, dst, dst_stride, xd->bd);
highbd_fwd_txfm_16x16(src_diff, coeff, diff_stride, tx_type);
vpx_highbd_quantize_b(coeff, 256, x->skip_block, p->zbin, p->round,
p->quant, p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, eob,
- scan_order->scan, scan_order->iscan);
+ pd->dequant, eob, scan_order->scan,
+ scan_order->iscan);
}
if (*eob)
vp10_highbd_inv_txfm_add_16x16(dqcoeff, dst, dst_stride, *eob, xd->bd,
@@ -1014,13 +943,13 @@
break;
case TX_8X8:
if (!x->skip_recode) {
- vpx_highbd_subtract_block(8, 8, src_diff, diff_stride,
- src, src_stride, dst, dst_stride, xd->bd);
+ vpx_highbd_subtract_block(8, 8, src_diff, diff_stride, src,
+ src_stride, dst, dst_stride, xd->bd);
highbd_fwd_txfm_8x8(src_diff, coeff, diff_stride, tx_type);
vpx_highbd_quantize_b(coeff, 64, x->skip_block, p->zbin, p->round,
p->quant, p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, eob,
- scan_order->scan, scan_order->iscan);
+ pd->dequant, eob, scan_order->scan,
+ scan_order->iscan);
}
if (*eob)
vp10_highbd_inv_txfm_add_8x8(dqcoeff, dst, dst_stride, *eob, xd->bd,
@@ -1028,14 +957,14 @@
break;
case TX_4X4:
if (!x->skip_recode) {
- vpx_highbd_subtract_block(4, 4, src_diff, diff_stride,
- src, src_stride, dst, dst_stride, xd->bd);
+ vpx_highbd_subtract_block(4, 4, src_diff, diff_stride, src,
+ src_stride, dst, dst_stride, xd->bd);
vp10_highbd_fwd_txfm_4x4(src_diff, coeff, diff_stride, tx_type,
xd->lossless[mbmi->segment_id]);
vpx_highbd_quantize_b(coeff, 16, x->skip_block, p->zbin, p->round,
p->quant, p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, eob,
- scan_order->scan, scan_order->iscan);
+ pd->dequant, eob, scan_order->scan,
+ scan_order->iscan);
}
if (*eob)
@@ -1045,12 +974,9 @@
vp10_highbd_inv_txfm_add_4x4(dqcoeff, dst, dst_stride, *eob, xd->bd,
tx_type, xd->lossless[mbmi->segment_id]);
break;
- default:
- assert(0);
- return;
+ default: assert(0); return;
}
- if (*eob)
- *(args->skip) = 0;
+ if (*eob) *(args->skip) = 0;
return;
}
#endif // CONFIG_VPX_HIGHBITDEPTH
@@ -1058,8 +984,8 @@
switch (tx_size) {
case TX_32X32:
if (!x->skip_recode) {
- vpx_subtract_block(32, 32, src_diff, diff_stride,
- src, src_stride, dst, dst_stride);
+ vpx_subtract_block(32, 32, src_diff, diff_stride, src, src_stride, dst,
+ dst_stride);
fwd_txfm_32x32(x->use_lp32x32fdct, src_diff, coeff, diff_stride,
tx_type);
vpx_quantize_b_32x32(coeff, 1024, x->skip_block, p->zbin, p->round,
@@ -1072,40 +998,36 @@
break;
case TX_16X16:
if (!x->skip_recode) {
- vpx_subtract_block(16, 16, src_diff, diff_stride,
- src, src_stride, dst, dst_stride);
+ vpx_subtract_block(16, 16, src_diff, diff_stride, src, src_stride, dst,
+ dst_stride);
fwd_txfm_16x16(src_diff, coeff, diff_stride, tx_type);
- vpx_quantize_b(coeff, 256, x->skip_block, p->zbin, p->round,
- p->quant, p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, eob, scan_order->scan,
- scan_order->iscan);
+ vpx_quantize_b(coeff, 256, x->skip_block, p->zbin, p->round, p->quant,
+ p->quant_shift, qcoeff, dqcoeff, pd->dequant, eob,
+ scan_order->scan, scan_order->iscan);
}
if (*eob)
vp10_inv_txfm_add_16x16(dqcoeff, dst, dst_stride, *eob, tx_type);
break;
case TX_8X8:
if (!x->skip_recode) {
- vpx_subtract_block(8, 8, src_diff, diff_stride,
- src, src_stride, dst, dst_stride);
+ vpx_subtract_block(8, 8, src_diff, diff_stride, src, src_stride, dst,
+ dst_stride);
fwd_txfm_8x8(src_diff, coeff, diff_stride, tx_type);
vpx_quantize_b(coeff, 64, x->skip_block, p->zbin, p->round, p->quant,
- p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, eob, scan_order->scan,
- scan_order->iscan);
+ p->quant_shift, qcoeff, dqcoeff, pd->dequant, eob,
+ scan_order->scan, scan_order->iscan);
}
- if (*eob)
- vp10_inv_txfm_add_8x8(dqcoeff, dst, dst_stride, *eob, tx_type);
+ if (*eob) vp10_inv_txfm_add_8x8(dqcoeff, dst, dst_stride, *eob, tx_type);
break;
case TX_4X4:
if (!x->skip_recode) {
- vpx_subtract_block(4, 4, src_diff, diff_stride,
- src, src_stride, dst, dst_stride);
+ vpx_subtract_block(4, 4, src_diff, diff_stride, src, src_stride, dst,
+ dst_stride);
vp10_fwd_txfm_4x4(src_diff, coeff, diff_stride, tx_type,
xd->lossless[mbmi->segment_id]);
vpx_quantize_b(coeff, 16, x->skip_block, p->zbin, p->round, p->quant,
- p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, eob, scan_order->scan,
- scan_order->iscan);
+ p->quant_shift, qcoeff, dqcoeff, pd->dequant, eob,
+ scan_order->scan, scan_order->iscan);
}
if (*eob) {
@@ -1116,17 +1038,14 @@
xd->lossless[mbmi->segment_id]);
}
break;
- default:
- assert(0);
- break;
+ default: assert(0); break;
}
- if (*eob)
- *(args->skip) = 0;
+ if (*eob) *(args->skip) = 0;
}
void vp10_encode_intra_block_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) {
const MACROBLOCKD *const xd = &x->e_mbd;
- struct encode_b_args arg = {x, NULL, &xd->mi[0]->mbmi.skip};
+ struct encode_b_args arg = { x, NULL, &xd->mi[0]->mbmi.skip };
vp10_foreach_transformed_block_in_plane(xd, bsize, plane,
vp10_encode_block_intra, &arg);
diff --git a/vp10/encoder/encodemb.h b/vp10/encoder/encodemb.h
index b3e6f63..4d1c826 100644
--- a/vp10/encoder/encodemb.h
+++ b/vp10/encoder/encodemb.h
@@ -25,21 +25,18 @@
};
void vp10_encode_sb(MACROBLOCK *x, BLOCK_SIZE bsize);
void vp10_encode_sby_pass1(MACROBLOCK *x, BLOCK_SIZE bsize);
-void vp10_xform_quant_fp(MACROBLOCK *x, int plane, int block,
- int blk_row, int blk_col,
- BLOCK_SIZE plane_bsize, TX_SIZE tx_size);
-void vp10_xform_quant_dc(MACROBLOCK *x, int plane, int block,
- int blk_row, int blk_col,
- BLOCK_SIZE plane_bsize, TX_SIZE tx_size);
-void vp10_xform_quant(MACROBLOCK *x, int plane, int block,
- int blk_row, int blk_col,
- BLOCK_SIZE plane_bsize, TX_SIZE tx_size);
+void vp10_xform_quant_fp(MACROBLOCK *x, int plane, int block, int blk_row,
+ int blk_col, BLOCK_SIZE plane_bsize, TX_SIZE tx_size);
+void vp10_xform_quant_dc(MACROBLOCK *x, int plane, int block, int blk_row,
+ int blk_col, BLOCK_SIZE plane_bsize, TX_SIZE tx_size);
+void vp10_xform_quant(MACROBLOCK *x, int plane, int block, int blk_row,
+ int blk_col, BLOCK_SIZE plane_bsize, TX_SIZE tx_size);
void vp10_subtract_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane);
void vp10_encode_block_intra(int plane, int block, int blk_row, int blk_col,
- BLOCK_SIZE plane_bsize,
- TX_SIZE tx_size, void *arg);
+ BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
+ void *arg);
void vp10_encode_intra_block_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane);
diff --git a/vp10/encoder/encodemv.c b/vp10/encoder/encodemv.c
index 0736c65..ef670f5 100644
--- a/vp10/encoder/encodemv.c
+++ b/vp10/encoder/encodemv.c
@@ -31,15 +31,15 @@
vp10_tokens_from_tree(mv_fp_encodings, vp10_mv_fp_tree);
}
-static void encode_mv_component(vpx_writer* w, int comp,
- const nmv_component* mvcomp, int usehp) {
+static void encode_mv_component(vpx_writer *w, int comp,
+ const nmv_component *mvcomp, int usehp) {
int offset;
const int sign = comp < 0;
const int mag = sign ? -comp : comp;
const int mv_class = vp10_get_mv_class(mag - 1, &offset);
- const int d = offset >> 3; // int mv data
- const int fr = (offset >> 1) & 3; // fractional mv data
- const int hp = offset & 1; // high precision mv data
+ const int d = offset >> 3; // int mv data
+ const int fr = (offset >> 1) & 3; // fractional mv data
+ const int hp = offset & 1; // high precision mv data
assert(comp != 0);
@@ -48,33 +48,30 @@
// Class
vp10_write_token(w, vp10_mv_class_tree, mvcomp->classes,
- &mv_class_encodings[mv_class]);
+ &mv_class_encodings[mv_class]);
// Integer bits
if (mv_class == MV_CLASS_0) {
vp10_write_token(w, vp10_mv_class0_tree, mvcomp->class0,
- &mv_class0_encodings[d]);
+ &mv_class0_encodings[d]);
} else {
int i;
const int n = mv_class + CLASS0_BITS - 1; // number of bits
- for (i = 0; i < n; ++i)
- vpx_write(w, (d >> i) & 1, mvcomp->bits[i]);
+ for (i = 0; i < n; ++i) vpx_write(w, (d >> i) & 1, mvcomp->bits[i]);
}
// Fractional bits
vp10_write_token(w, vp10_mv_fp_tree,
- mv_class == MV_CLASS_0 ? mvcomp->class0_fp[d] : mvcomp->fp,
- &mv_fp_encodings[fr]);
+ mv_class == MV_CLASS_0 ? mvcomp->class0_fp[d] : mvcomp->fp,
+ &mv_fp_encodings[fr]);
// High precision bit
if (usehp)
- vpx_write(w, hp,
- mv_class == MV_CLASS_0 ? mvcomp->class0_hp : mvcomp->hp);
+ vpx_write(w, hp, mv_class == MV_CLASS_0 ? mvcomp->class0_hp : mvcomp->hp);
}
-
static void build_nmv_component_cost_table(int *mvcost,
- const nmv_component* const mvcomp,
+ const nmv_component *const mvcomp,
int usehp) {
int i, v;
int sign_cost[2], class_cost[MV_CLASSES], class0_cost[CLASS0_SIZE];
@@ -107,16 +104,15 @@
z = v - 1;
c = vp10_get_mv_class(z, &o);
cost += class_cost[c];
- d = (o >> 3); /* int mv data */
- f = (o >> 1) & 3; /* fractional pel mv data */
- e = (o & 1); /* high precision mv data */
+ d = (o >> 3); /* int mv data */
+ f = (o >> 1) & 3; /* fractional pel mv data */
+ e = (o & 1); /* high precision mv data */
if (c == MV_CLASS_0) {
cost += class0_cost[d];
} else {
int i, b;
- b = c + CLASS0_BITS - 1; /* number of bits */
- for (i = 0; i < b; ++i)
- cost += bits_cost[i][((d >> i) & 1)];
+ b = c + CLASS0_BITS - 1; /* number of bits */
+ for (i = 0; i < b; ++i) cost += bits_cost[i][((d >> i) & 1)];
}
if (c == MV_CLASS_0) {
cost += class0_fp_cost[d][f];
@@ -138,7 +134,7 @@
static void update_mv(vpx_writer *w, const unsigned int ct[2], vpx_prob *cur_p,
vpx_prob upd_p) {
#if CONFIG_MISC_FIXES
- (void) upd_p;
+ (void)upd_p;
vp10_cond_prob_diff_update(w, cur_p, ct);
#else
const vpx_prob new_p = get_binary_prob(ct[0], ct[1]) | 1;
@@ -154,8 +150,8 @@
static void write_mv_update(const vpx_tree_index *tree,
vpx_prob probs[/*n - 1*/],
- const unsigned int counts[/*n - 1*/],
- int n, vpx_writer *w) {
+ const unsigned int counts[/*n - 1*/], int n,
+ vpx_writer *w) {
int i;
unsigned int branch_ct[32][2];
@@ -168,11 +164,12 @@
}
void vp10_write_nmv_probs(VP10_COMMON *cm, int usehp, vpx_writer *w,
- nmv_context_counts *const counts) {
+ nmv_context_counts *const counts) {
int i, j;
nmv_context *const mvc = &cm->fc->nmvc;
- write_mv_update(vp10_mv_joint_tree, mvc->joints, counts->joints, MV_JOINTS, w);
+ write_mv_update(vp10_mv_joint_tree, mvc->joints, counts->joints, MV_JOINTS,
+ w);
for (i = 0; i < 2; ++i) {
nmv_component *comp = &mvc->comps[i];
@@ -205,15 +202,14 @@
}
}
-void vp10_encode_mv(VP10_COMP* cpi, vpx_writer* w,
- const MV* mv, const MV* ref,
- const nmv_context* mvctx, int usehp) {
- const MV diff = {mv->row - ref->row,
- mv->col - ref->col};
+void vp10_encode_mv(VP10_COMP *cpi, vpx_writer *w, const MV *mv, const MV *ref,
+ const nmv_context *mvctx, int usehp) {
+ const MV diff = { mv->row - ref->row, mv->col - ref->col };
const MV_JOINT_TYPE j = vp10_get_mv_joint(&diff);
usehp = usehp && vp10_use_mv_hp(ref);
- vp10_write_token(w, vp10_mv_joint_tree, mvctx->joints, &mv_joint_encodings[j]);
+ vp10_write_token(w, vp10_mv_joint_tree, mvctx->joints,
+ &mv_joint_encodings[j]);
if (mv_joint_vertical(j))
encode_mv_component(w, diff.row, &mvctx->comps[0], usehp);
@@ -229,21 +225,20 @@
}
void vp10_build_nmv_cost_table(int *mvjoint, int *mvcost[2],
- const nmv_context* ctx, int usehp) {
+ const nmv_context *ctx, int usehp) {
vp10_cost_tokens(mvjoint, ctx->joints, vp10_mv_joint_tree);
build_nmv_component_cost_table(mvcost[0], &ctx->comps[0], usehp);
build_nmv_component_cost_table(mvcost[1], &ctx->comps[1], usehp);
}
static void inc_mvs(const MB_MODE_INFO *mbmi, const MB_MODE_INFO_EXT *mbmi_ext,
- const int_mv mvs[2],
- nmv_context_counts *counts) {
+ const int_mv mvs[2], nmv_context_counts *counts) {
int i;
for (i = 0; i < 1 + has_second_ref(mbmi); ++i) {
const MV *ref = &mbmi_ext->ref_mvs[mbmi->ref_frame[i]][0].as_mv;
- const MV diff = {mvs[i].as_mv.row - ref->row,
- mvs[i].as_mv.col - ref->col};
+ const MV diff = { mvs[i].as_mv.row - ref->row,
+ mvs[i].as_mv.col - ref->col };
vp10_inc_mv(&diff, counts, vp10_use_mv_hp(ref));
}
}
@@ -267,8 +262,6 @@
}
}
} else {
- if (mbmi->mode == NEWMV)
- inc_mvs(mbmi, mbmi_ext, mbmi->mv, &td->counts->mv);
+ if (mbmi->mode == NEWMV) inc_mvs(mbmi, mbmi_ext, mbmi->mv, &td->counts->mv);
}
}
-
diff --git a/vp10/encoder/encodemv.h b/vp10/encoder/encodemv.h
index 006f6d7..6187488 100644
--- a/vp10/encoder/encodemv.h
+++ b/vp10/encoder/encodemv.h
@@ -8,7 +8,6 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
#ifndef VP10_ENCODER_ENCODEMV_H_
#define VP10_ENCODER_ENCODEMV_H_
@@ -21,13 +20,13 @@
void vp10_entropy_mv_init(void);
void vp10_write_nmv_probs(VP10_COMMON *cm, int usehp, vpx_writer *w,
- nmv_context_counts *const counts);
+ nmv_context_counts *const counts);
-void vp10_encode_mv(VP10_COMP *cpi, vpx_writer* w, const MV* mv, const MV* ref,
- const nmv_context* mvctx, int usehp);
+void vp10_encode_mv(VP10_COMP *cpi, vpx_writer *w, const MV *mv, const MV *ref,
+ const nmv_context *mvctx, int usehp);
void vp10_build_nmv_cost_table(int *mvjoint, int *mvcost[2],
- const nmv_context* mvctx, int usehp);
+ const nmv_context *mvctx, int usehp);
void vp10_update_mv_count(ThreadData *td);
diff --git a/vp10/encoder/encoder.c b/vp10/encoder/encoder.c
index 91c7e43..af68409 100644
--- a/vp10/encoder/encoder.c
+++ b/vp10/encoder/encoder.c
@@ -58,14 +58,14 @@
#define AM_SEGMENT_ID_INACTIVE 7
#define AM_SEGMENT_ID_ACTIVE 0
-#define SHARP_FILTER_QTHRESH 0 /* Q threshold for 8-tap sharp filter */
+#define SHARP_FILTER_QTHRESH 0 /* Q threshold for 8-tap sharp filter */
-#define ALTREF_HIGH_PRECISION_MV 1 // Whether to use high precision mv
- // for altref computation.
-#define HIGH_PRECISION_MV_QTHRESH 200 // Q threshold for high precision
- // mv. Choose a very high value for
- // now so that HIGH_PRECISION is always
- // chosen.
+#define ALTREF_HIGH_PRECISION_MV 1 // Whether to use high precision mv
+ // for altref computation.
+#define HIGH_PRECISION_MV_QTHRESH 200 // Q threshold for high precision
+ // mv. Choose a very high value for
+ // now so that HIGH_PRECISION is always
+ // chosen.
// #define OUTPUT_YUV_REC
#ifdef OUTPUT_YUV_DENOISED
@@ -97,15 +97,15 @@
case THREEFIVE:
*hr = 3;
*hs = 5;
- break;
+ break;
case ONETWO:
*hr = 1;
*hs = 2;
- break;
+ break;
default:
*hr = 1;
*hs = 1;
- assert(0);
+ assert(0);
break;
}
}
@@ -143,8 +143,8 @@
vp10_enable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_ALT_LF);
// Setting the data to -MAX_LOOP_FILTER will result in the computed loop
// filter level being zero regardless of the value of seg->abs_delta.
- vp10_set_segdata(seg, AM_SEGMENT_ID_INACTIVE,
- SEG_LVL_ALT_LF, -MAX_LOOP_FILTER);
+ vp10_set_segdata(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_ALT_LF,
+ -MAX_LOOP_FILTER);
} else {
vp10_disable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_SKIP);
vp10_disable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_ALT_LF);
@@ -157,10 +157,8 @@
}
}
-int vp10_set_active_map(VP10_COMP* cpi,
- unsigned char* new_map_16x16,
- int rows,
- int cols) {
+int vp10_set_active_map(VP10_COMP *cpi, unsigned char *new_map_16x16, int rows,
+ int cols) {
if (rows == cpi->common.mb_rows && cols == cpi->common.mb_cols) {
unsigned char *const active_map_8x8 = cpi->active_map.map;
const int mi_rows = cpi->common.mi_rows;
@@ -186,13 +184,11 @@
}
}
-int vp10_get_active_map(VP10_COMP* cpi,
- unsigned char* new_map_16x16,
- int rows,
- int cols) {
+int vp10_get_active_map(VP10_COMP *cpi, unsigned char *new_map_16x16, int rows,
+ int cols) {
if (rows == cpi->common.mb_rows && cols == cpi->common.mb_cols &&
new_map_16x16) {
- unsigned char* const seg_map_8x8 = cpi->segmentation_map;
+ unsigned char *const seg_map_8x8 = cpi->segmentation_map;
const int mi_rows = cpi->common.mi_rows;
const int mi_cols = cpi->common.mi_cols;
memset(new_map_16x16, !cpi->active_map.enabled, rows * cols);
@@ -268,19 +264,16 @@
static int vp10_enc_alloc_mi(VP10_COMMON *cm, int mi_size) {
cm->mip = vpx_calloc(mi_size, sizeof(*cm->mip));
- if (!cm->mip)
- return 1;
+ if (!cm->mip) return 1;
cm->prev_mip = vpx_calloc(mi_size, sizeof(*cm->prev_mip));
- if (!cm->prev_mip)
- return 1;
+ if (!cm->prev_mip) return 1;
cm->mi_alloc_size = mi_size;
- cm->mi_grid_base = (MODE_INFO **)vpx_calloc(mi_size, sizeof(MODE_INFO*));
- if (!cm->mi_grid_base)
- return 1;
- cm->prev_mi_grid_base = (MODE_INFO **)vpx_calloc(mi_size, sizeof(MODE_INFO*));
- if (!cm->prev_mi_grid_base)
- return 1;
+ cm->mi_grid_base = (MODE_INFO **)vpx_calloc(mi_size, sizeof(MODE_INFO *));
+ if (!cm->mi_grid_base) return 1;
+ cm->prev_mi_grid_base =
+ (MODE_INFO **)vpx_calloc(mi_size, sizeof(MODE_INFO *));
+ if (!cm->prev_mi_grid_base) return 1;
return 0;
}
@@ -399,7 +392,7 @@
// restored with a call to vp10_restore_coding_context. These functions are
// intended for use in a re-code loop in vp10_compress_frame where the
// quantizer value is adjusted between loop iterations.
- vp10_copy(cc->nmvjointcost, cpi->td.mb.nmvjointcost);
+ vp10_copy(cc->nmvjointcost, cpi->td.mb.nmvjointcost);
memcpy(cc->nmvcosts[0], cpi->nmvcosts[0],
MV_VALS * sizeof(*cpi->nmvcosts[0]));
@@ -414,8 +407,8 @@
vp10_copy(cc->segment_pred_probs, cm->segp.pred_probs);
#endif
- memcpy(cpi->coding_context.last_frame_seg_map_copy,
- cm->last_frame_seg_map, (cm->mi_rows * cm->mi_cols));
+ memcpy(cpi->coding_context.last_frame_seg_map_copy, cm->last_frame_seg_map,
+ (cm->mi_rows * cm->mi_cols));
vp10_copy(cc->last_ref_lf_deltas, cm->lf.last_ref_deltas);
vp10_copy(cc->last_mode_lf_deltas, cm->lf.last_mode_deltas);
@@ -442,8 +435,7 @@
vp10_copy(cm->segp.pred_probs, cc->segment_pred_probs);
#endif
- memcpy(cm->last_frame_seg_map,
- cpi->coding_context.last_frame_seg_map_copy,
+ memcpy(cm->last_frame_seg_map, cpi->coding_context.last_frame_seg_map_copy,
(cm->mi_rows * cm->mi_cols));
vp10_copy(cm->lf.last_ref_deltas, cc->last_ref_lf_deltas);
@@ -495,8 +487,8 @@
seg->update_map = 1;
seg->update_data = 1;
- qi_delta = vp10_compute_qdelta(rc, rc->avg_q, rc->avg_q * 0.875,
- cm->bit_depth);
+ qi_delta =
+ vp10_compute_qdelta(rc, rc->avg_q, rc->avg_q * 0.875, cm->bit_depth);
vp10_set_segdata(seg, 1, SEG_LVL_ALT_Q, qi_delta - 2);
vp10_set_segdata(seg, 1, SEG_LVL_ALT_LF, -2);
@@ -518,7 +510,7 @@
seg->abs_delta = SEGMENT_DELTADATA;
qi_delta = vp10_compute_qdelta(rc, rc->avg_q, rc->avg_q * 1.125,
- cm->bit_depth);
+ cm->bit_depth);
vp10_set_segdata(seg, 1, SEG_LVL_ALT_Q, qi_delta + 2);
vp10_enable_segfeature(seg, 1, SEG_LVL_ALT_Q);
@@ -598,18 +590,17 @@
if (!cpi->lookahead)
cpi->lookahead = vp10_lookahead_init(oxcf->width, oxcf->height,
- cm->subsampling_x, cm->subsampling_y,
+ cm->subsampling_x, cm->subsampling_y,
#if CONFIG_VPX_HIGHBITDEPTH
- cm->use_highbitdepth,
+ cm->use_highbitdepth,
#endif
- oxcf->lag_in_frames);
+ oxcf->lag_in_frames);
if (!cpi->lookahead)
vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
"Failed to allocate lag buffers");
// TODO(agrange) Check if ARF is enabled and skip allocation if not.
- if (vpx_realloc_frame_buffer(&cpi->alt_ref_buffer,
- oxcf->width, oxcf->height,
+ if (vpx_realloc_frame_buffer(&cpi->alt_ref_buffer, oxcf->width, oxcf->height,
cm->subsampling_x, cm->subsampling_y,
#if CONFIG_VPX_HIGHBITDEPTH
cm->use_highbitdepth,
@@ -622,8 +613,7 @@
static void alloc_util_frame_buffers(VP10_COMP *cpi) {
VP10_COMMON *const cm = &cpi->common;
- if (vpx_realloc_frame_buffer(&cpi->last_frame_uf,
- cm->width, cm->height,
+ if (vpx_realloc_frame_buffer(&cpi->last_frame_uf, cm->width, cm->height,
cm->subsampling_x, cm->subsampling_y,
#if CONFIG_VPX_HIGHBITDEPTH
cm->use_highbitdepth,
@@ -633,8 +623,7 @@
vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
"Failed to allocate last frame buffer");
- if (vpx_realloc_frame_buffer(&cpi->scaled_source,
- cm->width, cm->height,
+ if (vpx_realloc_frame_buffer(&cpi->scaled_source, cm->width, cm->height,
cm->subsampling_x, cm->subsampling_y,
#if CONFIG_VPX_HIGHBITDEPTH
cm->use_highbitdepth,
@@ -644,8 +633,7 @@
vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
"Failed to allocate scaled source buffer");
- if (vpx_realloc_frame_buffer(&cpi->scaled_last_source,
- cm->width, cm->height,
+ if (vpx_realloc_frame_buffer(&cpi->scaled_last_source, cm->width, cm->height,
cm->subsampling_x, cm->subsampling_y,
#if CONFIG_VPX_HIGHBITDEPTH
cm->use_highbitdepth,
@@ -656,14 +644,12 @@
"Failed to allocate scaled last source buffer");
}
-
static int alloc_context_buffers_ext(VP10_COMP *cpi) {
VP10_COMMON *cm = &cpi->common;
int mi_size = cm->mi_cols * cm->mi_rows;
cpi->mbmi_ext_base = vpx_calloc(mi_size, sizeof(*cpi->mbmi_ext_base));
- if (!cpi->mbmi_ext_base)
- return 1;
+ if (!cpi->mbmi_ext_base) return 1;
return 0;
}
@@ -680,7 +666,7 @@
{
unsigned int tokens = get_token_alloc(cm->mb_rows, cm->mb_cols);
CHECK_MEM_ERROR(cm, cpi->tile_tok[0][0],
- vpx_calloc(tokens, sizeof(*cpi->tile_tok[0][0])));
+ vpx_calloc(tokens, sizeof(*cpi->tile_tok[0][0])));
}
vp10_setup_pc_tree(&cpi->common, &cpi->td);
@@ -697,8 +683,8 @@
int min_log2_tile_cols, max_log2_tile_cols;
vp10_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
- cm->log2_tile_cols = clamp(cpi->oxcf.tile_columns,
- min_log2_tile_cols, max_log2_tile_cols);
+ cm->log2_tile_cols =
+ clamp(cpi->oxcf.tile_columns, min_log2_tile_cols, max_log2_tile_cols);
cm->log2_tile_rows = cpi->oxcf.tile_rows;
}
@@ -759,154 +745,122 @@
const int64_t maximum = oxcf->maximum_buffer_size_ms;
rc->starting_buffer_level = starting * bandwidth / 1000;
- rc->optimal_buffer_level = (optimal == 0) ? bandwidth / 8
- : optimal * bandwidth / 1000;
- rc->maximum_buffer_size = (maximum == 0) ? bandwidth / 8
- : maximum * bandwidth / 1000;
+ rc->optimal_buffer_level =
+ (optimal == 0) ? bandwidth / 8 : optimal * bandwidth / 1000;
+ rc->maximum_buffer_size =
+ (maximum == 0) ? bandwidth / 8 : maximum * bandwidth / 1000;
}
#if CONFIG_VPX_HIGHBITDEPTH
#define HIGHBD_BFP(BT, SDF, SDAF, VF, SVF, SVAF, SDX3F, SDX8F, SDX4DF) \
- cpi->fn_ptr[BT].sdf = SDF; \
- cpi->fn_ptr[BT].sdaf = SDAF; \
- cpi->fn_ptr[BT].vf = VF; \
- cpi->fn_ptr[BT].svf = SVF; \
- cpi->fn_ptr[BT].svaf = SVAF; \
- cpi->fn_ptr[BT].sdx3f = SDX3F; \
- cpi->fn_ptr[BT].sdx8f = SDX8F; \
- cpi->fn_ptr[BT].sdx4df = SDX4DF;
+ cpi->fn_ptr[BT].sdf = SDF; \
+ cpi->fn_ptr[BT].sdaf = SDAF; \
+ cpi->fn_ptr[BT].vf = VF; \
+ cpi->fn_ptr[BT].svf = SVF; \
+ cpi->fn_ptr[BT].svaf = SVAF; \
+ cpi->fn_ptr[BT].sdx3f = SDX3F; \
+ cpi->fn_ptr[BT].sdx8f = SDX8F; \
+ cpi->fn_ptr[BT].sdx4df = SDX4DF;
-#define MAKE_BFP_SAD_WRAPPER(fnname) \
-static unsigned int fnname##_bits8(const uint8_t *src_ptr, \
- int source_stride, \
- const uint8_t *ref_ptr, \
- int ref_stride) { \
- return fnname(src_ptr, source_stride, ref_ptr, ref_stride); \
-} \
-static unsigned int fnname##_bits10(const uint8_t *src_ptr, \
- int source_stride, \
- const uint8_t *ref_ptr, \
- int ref_stride) { \
- return fnname(src_ptr, source_stride, ref_ptr, ref_stride) >> 2; \
-} \
-static unsigned int fnname##_bits12(const uint8_t *src_ptr, \
- int source_stride, \
- const uint8_t *ref_ptr, \
- int ref_stride) { \
- return fnname(src_ptr, source_stride, ref_ptr, ref_stride) >> 4; \
-}
+#define MAKE_BFP_SAD_WRAPPER(fnname) \
+ static unsigned int fnname##_bits8(const uint8_t *src_ptr, \
+ int source_stride, \
+ const uint8_t *ref_ptr, int ref_stride) { \
+ return fnname(src_ptr, source_stride, ref_ptr, ref_stride); \
+ } \
+ static unsigned int fnname##_bits10( \
+ const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, \
+ int ref_stride) { \
+ return fnname(src_ptr, source_stride, ref_ptr, ref_stride) >> 2; \
+ } \
+ static unsigned int fnname##_bits12( \
+ const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, \
+ int ref_stride) { \
+ return fnname(src_ptr, source_stride, ref_ptr, ref_stride) >> 4; \
+ }
-#define MAKE_BFP_SADAVG_WRAPPER(fnname) static unsigned int \
-fnname##_bits8(const uint8_t *src_ptr, \
- int source_stride, \
- const uint8_t *ref_ptr, \
- int ref_stride, \
- const uint8_t *second_pred) { \
- return fnname(src_ptr, source_stride, ref_ptr, ref_stride, second_pred); \
-} \
-static unsigned int fnname##_bits10(const uint8_t *src_ptr, \
- int source_stride, \
- const uint8_t *ref_ptr, \
- int ref_stride, \
- const uint8_t *second_pred) { \
- return fnname(src_ptr, source_stride, ref_ptr, ref_stride, \
- second_pred) >> 2; \
-} \
-static unsigned int fnname##_bits12(const uint8_t *src_ptr, \
- int source_stride, \
- const uint8_t *ref_ptr, \
- int ref_stride, \
- const uint8_t *second_pred) { \
- return fnname(src_ptr, source_stride, ref_ptr, ref_stride, \
- second_pred) >> 4; \
-}
+#define MAKE_BFP_SADAVG_WRAPPER(fnname) \
+ static unsigned int fnname##_bits8( \
+ const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, \
+ int ref_stride, const uint8_t *second_pred) { \
+ return fnname(src_ptr, source_stride, ref_ptr, ref_stride, second_pred); \
+ } \
+ static unsigned int fnname##_bits10( \
+ const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, \
+ int ref_stride, const uint8_t *second_pred) { \
+ return fnname(src_ptr, source_stride, ref_ptr, ref_stride, second_pred) >> \
+ 2; \
+ } \
+ static unsigned int fnname##_bits12( \
+ const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, \
+ int ref_stride, const uint8_t *second_pred) { \
+ return fnname(src_ptr, source_stride, ref_ptr, ref_stride, second_pred) >> \
+ 4; \
+ }
-#define MAKE_BFP_SAD3_WRAPPER(fnname) \
-static void fnname##_bits8(const uint8_t *src_ptr, \
- int source_stride, \
- const uint8_t *ref_ptr, \
- int ref_stride, \
- unsigned int *sad_array) { \
- fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \
-} \
-static void fnname##_bits10(const uint8_t *src_ptr, \
- int source_stride, \
- const uint8_t *ref_ptr, \
- int ref_stride, \
- unsigned int *sad_array) { \
- int i; \
- fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \
- for (i = 0; i < 3; i++) \
- sad_array[i] >>= 2; \
-} \
-static void fnname##_bits12(const uint8_t *src_ptr, \
- int source_stride, \
- const uint8_t *ref_ptr, \
- int ref_stride, \
- unsigned int *sad_array) { \
- int i; \
- fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \
- for (i = 0; i < 3; i++) \
- sad_array[i] >>= 4; \
-}
+#define MAKE_BFP_SAD3_WRAPPER(fnname) \
+ static void fnname##_bits8(const uint8_t *src_ptr, int source_stride, \
+ const uint8_t *ref_ptr, int ref_stride, \
+ unsigned int *sad_array) { \
+ fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \
+ } \
+ static void fnname##_bits10(const uint8_t *src_ptr, int source_stride, \
+ const uint8_t *ref_ptr, int ref_stride, \
+ unsigned int *sad_array) { \
+ int i; \
+ fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \
+ for (i = 0; i < 3; i++) sad_array[i] >>= 2; \
+ } \
+ static void fnname##_bits12(const uint8_t *src_ptr, int source_stride, \
+ const uint8_t *ref_ptr, int ref_stride, \
+ unsigned int *sad_array) { \
+ int i; \
+ fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \
+ for (i = 0; i < 3; i++) sad_array[i] >>= 4; \
+ }
-#define MAKE_BFP_SAD8_WRAPPER(fnname) \
-static void fnname##_bits8(const uint8_t *src_ptr, \
- int source_stride, \
- const uint8_t *ref_ptr, \
- int ref_stride, \
- unsigned int *sad_array) { \
- fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \
-} \
-static void fnname##_bits10(const uint8_t *src_ptr, \
- int source_stride, \
- const uint8_t *ref_ptr, \
- int ref_stride, \
- unsigned int *sad_array) { \
- int i; \
- fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \
- for (i = 0; i < 8; i++) \
- sad_array[i] >>= 2; \
-} \
-static void fnname##_bits12(const uint8_t *src_ptr, \
- int source_stride, \
- const uint8_t *ref_ptr, \
- int ref_stride, \
- unsigned int *sad_array) { \
- int i; \
- fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \
- for (i = 0; i < 8; i++) \
- sad_array[i] >>= 4; \
-}
-#define MAKE_BFP_SAD4D_WRAPPER(fnname) \
-static void fnname##_bits8(const uint8_t *src_ptr, \
- int source_stride, \
- const uint8_t* const ref_ptr[], \
- int ref_stride, \
- unsigned int *sad_array) { \
- fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \
-} \
-static void fnname##_bits10(const uint8_t *src_ptr, \
- int source_stride, \
- const uint8_t* const ref_ptr[], \
- int ref_stride, \
- unsigned int *sad_array) { \
- int i; \
- fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \
- for (i = 0; i < 4; i++) \
- sad_array[i] >>= 2; \
-} \
-static void fnname##_bits12(const uint8_t *src_ptr, \
- int source_stride, \
- const uint8_t* const ref_ptr[], \
- int ref_stride, \
- unsigned int *sad_array) { \
- int i; \
- fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \
- for (i = 0; i < 4; i++) \
- sad_array[i] >>= 4; \
-}
+#define MAKE_BFP_SAD8_WRAPPER(fnname) \
+ static void fnname##_bits8(const uint8_t *src_ptr, int source_stride, \
+ const uint8_t *ref_ptr, int ref_stride, \
+ unsigned int *sad_array) { \
+ fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \
+ } \
+ static void fnname##_bits10(const uint8_t *src_ptr, int source_stride, \
+ const uint8_t *ref_ptr, int ref_stride, \
+ unsigned int *sad_array) { \
+ int i; \
+ fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \
+ for (i = 0; i < 8; i++) sad_array[i] >>= 2; \
+ } \
+ static void fnname##_bits12(const uint8_t *src_ptr, int source_stride, \
+ const uint8_t *ref_ptr, int ref_stride, \
+ unsigned int *sad_array) { \
+ int i; \
+ fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \
+ for (i = 0; i < 8; i++) sad_array[i] >>= 4; \
+ }
+#define MAKE_BFP_SAD4D_WRAPPER(fnname) \
+ static void fnname##_bits8(const uint8_t *src_ptr, int source_stride, \
+ const uint8_t *const ref_ptr[], int ref_stride, \
+ unsigned int *sad_array) { \
+ fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \
+ } \
+ static void fnname##_bits10(const uint8_t *src_ptr, int source_stride, \
+ const uint8_t *const ref_ptr[], int ref_stride, \
+ unsigned int *sad_array) { \
+ int i; \
+ fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \
+ for (i = 0; i < 4; i++) sad_array[i] >>= 2; \
+ } \
+ static void fnname##_bits12(const uint8_t *src_ptr, int source_stride, \
+ const uint8_t *const ref_ptr[], int ref_stride, \
+ unsigned int *sad_array) { \
+ int i; \
+ fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \
+ for (i = 0; i < 4; i++) sad_array[i] >>= 4; \
+ }
+/* clang-format off */
MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad32x16)
MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad32x16_avg)
MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad32x16x4d)
@@ -962,410 +916,269 @@
MAKE_BFP_SAD3_WRAPPER(vpx_highbd_sad4x4x3)
MAKE_BFP_SAD8_WRAPPER(vpx_highbd_sad4x4x8)
MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad4x4x4d)
+/* clang-format on */
-static void highbd_set_var_fns(VP10_COMP *const cpi) {
+static void highbd_set_var_fns(VP10_COMP *const cpi) {
VP10_COMMON *const cm = &cpi->common;
if (cm->use_highbitdepth) {
switch (cm->bit_depth) {
case VPX_BITS_8:
- HIGHBD_BFP(BLOCK_32X16,
- vpx_highbd_sad32x16_bits8,
- vpx_highbd_sad32x16_avg_bits8,
- vpx_highbd_8_variance32x16,
+ HIGHBD_BFP(BLOCK_32X16, vpx_highbd_sad32x16_bits8,
+ vpx_highbd_sad32x16_avg_bits8, vpx_highbd_8_variance32x16,
vpx_highbd_8_sub_pixel_variance32x16,
- vpx_highbd_8_sub_pixel_avg_variance32x16,
- NULL,
- NULL,
+ vpx_highbd_8_sub_pixel_avg_variance32x16, NULL, NULL,
vpx_highbd_sad32x16x4d_bits8)
- HIGHBD_BFP(BLOCK_16X32,
- vpx_highbd_sad16x32_bits8,
- vpx_highbd_sad16x32_avg_bits8,
- vpx_highbd_8_variance16x32,
+ HIGHBD_BFP(BLOCK_16X32, vpx_highbd_sad16x32_bits8,
+ vpx_highbd_sad16x32_avg_bits8, vpx_highbd_8_variance16x32,
vpx_highbd_8_sub_pixel_variance16x32,
- vpx_highbd_8_sub_pixel_avg_variance16x32,
- NULL,
- NULL,
+ vpx_highbd_8_sub_pixel_avg_variance16x32, NULL, NULL,
vpx_highbd_sad16x32x4d_bits8)
- HIGHBD_BFP(BLOCK_64X32,
- vpx_highbd_sad64x32_bits8,
- vpx_highbd_sad64x32_avg_bits8,
- vpx_highbd_8_variance64x32,
+ HIGHBD_BFP(BLOCK_64X32, vpx_highbd_sad64x32_bits8,
+ vpx_highbd_sad64x32_avg_bits8, vpx_highbd_8_variance64x32,
vpx_highbd_8_sub_pixel_variance64x32,
- vpx_highbd_8_sub_pixel_avg_variance64x32,
- NULL,
- NULL,
+ vpx_highbd_8_sub_pixel_avg_variance64x32, NULL, NULL,
vpx_highbd_sad64x32x4d_bits8)
- HIGHBD_BFP(BLOCK_32X64,
- vpx_highbd_sad32x64_bits8,
- vpx_highbd_sad32x64_avg_bits8,
- vpx_highbd_8_variance32x64,
+ HIGHBD_BFP(BLOCK_32X64, vpx_highbd_sad32x64_bits8,
+ vpx_highbd_sad32x64_avg_bits8, vpx_highbd_8_variance32x64,
vpx_highbd_8_sub_pixel_variance32x64,
- vpx_highbd_8_sub_pixel_avg_variance32x64,
- NULL,
- NULL,
+ vpx_highbd_8_sub_pixel_avg_variance32x64, NULL, NULL,
vpx_highbd_sad32x64x4d_bits8)
- HIGHBD_BFP(BLOCK_32X32,
- vpx_highbd_sad32x32_bits8,
- vpx_highbd_sad32x32_avg_bits8,
- vpx_highbd_8_variance32x32,
+ HIGHBD_BFP(BLOCK_32X32, vpx_highbd_sad32x32_bits8,
+ vpx_highbd_sad32x32_avg_bits8, vpx_highbd_8_variance32x32,
vpx_highbd_8_sub_pixel_variance32x32,
vpx_highbd_8_sub_pixel_avg_variance32x32,
- vpx_highbd_sad32x32x3_bits8,
- vpx_highbd_sad32x32x8_bits8,
+ vpx_highbd_sad32x32x3_bits8, vpx_highbd_sad32x32x8_bits8,
vpx_highbd_sad32x32x4d_bits8)
- HIGHBD_BFP(BLOCK_64X64,
- vpx_highbd_sad64x64_bits8,
- vpx_highbd_sad64x64_avg_bits8,
- vpx_highbd_8_variance64x64,
+ HIGHBD_BFP(BLOCK_64X64, vpx_highbd_sad64x64_bits8,
+ vpx_highbd_sad64x64_avg_bits8, vpx_highbd_8_variance64x64,
vpx_highbd_8_sub_pixel_variance64x64,
vpx_highbd_8_sub_pixel_avg_variance64x64,
- vpx_highbd_sad64x64x3_bits8,
- vpx_highbd_sad64x64x8_bits8,
+ vpx_highbd_sad64x64x3_bits8, vpx_highbd_sad64x64x8_bits8,
vpx_highbd_sad64x64x4d_bits8)
- HIGHBD_BFP(BLOCK_16X16,
- vpx_highbd_sad16x16_bits8,
- vpx_highbd_sad16x16_avg_bits8,
- vpx_highbd_8_variance16x16,
+ HIGHBD_BFP(BLOCK_16X16, vpx_highbd_sad16x16_bits8,
+ vpx_highbd_sad16x16_avg_bits8, vpx_highbd_8_variance16x16,
vpx_highbd_8_sub_pixel_variance16x16,
vpx_highbd_8_sub_pixel_avg_variance16x16,
- vpx_highbd_sad16x16x3_bits8,
- vpx_highbd_sad16x16x8_bits8,
+ vpx_highbd_sad16x16x3_bits8, vpx_highbd_sad16x16x8_bits8,
vpx_highbd_sad16x16x4d_bits8)
- HIGHBD_BFP(BLOCK_16X8,
- vpx_highbd_sad16x8_bits8,
- vpx_highbd_sad16x8_avg_bits8,
- vpx_highbd_8_variance16x8,
- vpx_highbd_8_sub_pixel_variance16x8,
- vpx_highbd_8_sub_pixel_avg_variance16x8,
- vpx_highbd_sad16x8x3_bits8,
- vpx_highbd_sad16x8x8_bits8,
- vpx_highbd_sad16x8x4d_bits8)
+ HIGHBD_BFP(
+ BLOCK_16X8, vpx_highbd_sad16x8_bits8, vpx_highbd_sad16x8_avg_bits8,
+ vpx_highbd_8_variance16x8, vpx_highbd_8_sub_pixel_variance16x8,
+ vpx_highbd_8_sub_pixel_avg_variance16x8, vpx_highbd_sad16x8x3_bits8,
+ vpx_highbd_sad16x8x8_bits8, vpx_highbd_sad16x8x4d_bits8)
- HIGHBD_BFP(BLOCK_8X16,
- vpx_highbd_sad8x16_bits8,
- vpx_highbd_sad8x16_avg_bits8,
- vpx_highbd_8_variance8x16,
- vpx_highbd_8_sub_pixel_variance8x16,
- vpx_highbd_8_sub_pixel_avg_variance8x16,
- vpx_highbd_sad8x16x3_bits8,
- vpx_highbd_sad8x16x8_bits8,
- vpx_highbd_sad8x16x4d_bits8)
+ HIGHBD_BFP(
+ BLOCK_8X16, vpx_highbd_sad8x16_bits8, vpx_highbd_sad8x16_avg_bits8,
+ vpx_highbd_8_variance8x16, vpx_highbd_8_sub_pixel_variance8x16,
+ vpx_highbd_8_sub_pixel_avg_variance8x16, vpx_highbd_sad8x16x3_bits8,
+ vpx_highbd_sad8x16x8_bits8, vpx_highbd_sad8x16x4d_bits8)
- HIGHBD_BFP(BLOCK_8X8,
- vpx_highbd_sad8x8_bits8,
- vpx_highbd_sad8x8_avg_bits8,
- vpx_highbd_8_variance8x8,
- vpx_highbd_8_sub_pixel_variance8x8,
- vpx_highbd_8_sub_pixel_avg_variance8x8,
- vpx_highbd_sad8x8x3_bits8,
- vpx_highbd_sad8x8x8_bits8,
- vpx_highbd_sad8x8x4d_bits8)
+ HIGHBD_BFP(
+ BLOCK_8X8, vpx_highbd_sad8x8_bits8, vpx_highbd_sad8x8_avg_bits8,
+ vpx_highbd_8_variance8x8, vpx_highbd_8_sub_pixel_variance8x8,
+ vpx_highbd_8_sub_pixel_avg_variance8x8, vpx_highbd_sad8x8x3_bits8,
+ vpx_highbd_sad8x8x8_bits8, vpx_highbd_sad8x8x4d_bits8)
- HIGHBD_BFP(BLOCK_8X4,
- vpx_highbd_sad8x4_bits8,
- vpx_highbd_sad8x4_avg_bits8,
- vpx_highbd_8_variance8x4,
+ HIGHBD_BFP(BLOCK_8X4, vpx_highbd_sad8x4_bits8,
+ vpx_highbd_sad8x4_avg_bits8, vpx_highbd_8_variance8x4,
vpx_highbd_8_sub_pixel_variance8x4,
- vpx_highbd_8_sub_pixel_avg_variance8x4,
- NULL,
- vpx_highbd_sad8x4x8_bits8,
- vpx_highbd_sad8x4x4d_bits8)
+ vpx_highbd_8_sub_pixel_avg_variance8x4, NULL,
+ vpx_highbd_sad8x4x8_bits8, vpx_highbd_sad8x4x4d_bits8)
- HIGHBD_BFP(BLOCK_4X8,
- vpx_highbd_sad4x8_bits8,
- vpx_highbd_sad4x8_avg_bits8,
- vpx_highbd_8_variance4x8,
+ HIGHBD_BFP(BLOCK_4X8, vpx_highbd_sad4x8_bits8,
+ vpx_highbd_sad4x8_avg_bits8, vpx_highbd_8_variance4x8,
vpx_highbd_8_sub_pixel_variance4x8,
- vpx_highbd_8_sub_pixel_avg_variance4x8,
- NULL,
- vpx_highbd_sad4x8x8_bits8,
- vpx_highbd_sad4x8x4d_bits8)
+ vpx_highbd_8_sub_pixel_avg_variance4x8, NULL,
+ vpx_highbd_sad4x8x8_bits8, vpx_highbd_sad4x8x4d_bits8)
- HIGHBD_BFP(BLOCK_4X4,
- vpx_highbd_sad4x4_bits8,
- vpx_highbd_sad4x4_avg_bits8,
- vpx_highbd_8_variance4x4,
- vpx_highbd_8_sub_pixel_variance4x4,
- vpx_highbd_8_sub_pixel_avg_variance4x4,
- vpx_highbd_sad4x4x3_bits8,
- vpx_highbd_sad4x4x8_bits8,
- vpx_highbd_sad4x4x4d_bits8)
+ HIGHBD_BFP(
+ BLOCK_4X4, vpx_highbd_sad4x4_bits8, vpx_highbd_sad4x4_avg_bits8,
+ vpx_highbd_8_variance4x4, vpx_highbd_8_sub_pixel_variance4x4,
+ vpx_highbd_8_sub_pixel_avg_variance4x4, vpx_highbd_sad4x4x3_bits8,
+ vpx_highbd_sad4x4x8_bits8, vpx_highbd_sad4x4x4d_bits8)
break;
case VPX_BITS_10:
- HIGHBD_BFP(BLOCK_32X16,
- vpx_highbd_sad32x16_bits10,
- vpx_highbd_sad32x16_avg_bits10,
- vpx_highbd_10_variance32x16,
+ HIGHBD_BFP(BLOCK_32X16, vpx_highbd_sad32x16_bits10,
+ vpx_highbd_sad32x16_avg_bits10, vpx_highbd_10_variance32x16,
vpx_highbd_10_sub_pixel_variance32x16,
- vpx_highbd_10_sub_pixel_avg_variance32x16,
- NULL,
- NULL,
+ vpx_highbd_10_sub_pixel_avg_variance32x16, NULL, NULL,
vpx_highbd_sad32x16x4d_bits10)
- HIGHBD_BFP(BLOCK_16X32,
- vpx_highbd_sad16x32_bits10,
- vpx_highbd_sad16x32_avg_bits10,
- vpx_highbd_10_variance16x32,
+ HIGHBD_BFP(BLOCK_16X32, vpx_highbd_sad16x32_bits10,
+ vpx_highbd_sad16x32_avg_bits10, vpx_highbd_10_variance16x32,
vpx_highbd_10_sub_pixel_variance16x32,
- vpx_highbd_10_sub_pixel_avg_variance16x32,
- NULL,
- NULL,
+ vpx_highbd_10_sub_pixel_avg_variance16x32, NULL, NULL,
vpx_highbd_sad16x32x4d_bits10)
- HIGHBD_BFP(BLOCK_64X32,
- vpx_highbd_sad64x32_bits10,
- vpx_highbd_sad64x32_avg_bits10,
- vpx_highbd_10_variance64x32,
+ HIGHBD_BFP(BLOCK_64X32, vpx_highbd_sad64x32_bits10,
+ vpx_highbd_sad64x32_avg_bits10, vpx_highbd_10_variance64x32,
vpx_highbd_10_sub_pixel_variance64x32,
- vpx_highbd_10_sub_pixel_avg_variance64x32,
- NULL,
- NULL,
+ vpx_highbd_10_sub_pixel_avg_variance64x32, NULL, NULL,
vpx_highbd_sad64x32x4d_bits10)
- HIGHBD_BFP(BLOCK_32X64,
- vpx_highbd_sad32x64_bits10,
- vpx_highbd_sad32x64_avg_bits10,
- vpx_highbd_10_variance32x64,
+ HIGHBD_BFP(BLOCK_32X64, vpx_highbd_sad32x64_bits10,
+ vpx_highbd_sad32x64_avg_bits10, vpx_highbd_10_variance32x64,
vpx_highbd_10_sub_pixel_variance32x64,
- vpx_highbd_10_sub_pixel_avg_variance32x64,
- NULL,
- NULL,
+ vpx_highbd_10_sub_pixel_avg_variance32x64, NULL, NULL,
vpx_highbd_sad32x64x4d_bits10)
- HIGHBD_BFP(BLOCK_32X32,
- vpx_highbd_sad32x32_bits10,
- vpx_highbd_sad32x32_avg_bits10,
- vpx_highbd_10_variance32x32,
+ HIGHBD_BFP(BLOCK_32X32, vpx_highbd_sad32x32_bits10,
+ vpx_highbd_sad32x32_avg_bits10, vpx_highbd_10_variance32x32,
vpx_highbd_10_sub_pixel_variance32x32,
vpx_highbd_10_sub_pixel_avg_variance32x32,
- vpx_highbd_sad32x32x3_bits10,
- vpx_highbd_sad32x32x8_bits10,
+ vpx_highbd_sad32x32x3_bits10, vpx_highbd_sad32x32x8_bits10,
vpx_highbd_sad32x32x4d_bits10)
- HIGHBD_BFP(BLOCK_64X64,
- vpx_highbd_sad64x64_bits10,
- vpx_highbd_sad64x64_avg_bits10,
- vpx_highbd_10_variance64x64,
+ HIGHBD_BFP(BLOCK_64X64, vpx_highbd_sad64x64_bits10,
+ vpx_highbd_sad64x64_avg_bits10, vpx_highbd_10_variance64x64,
vpx_highbd_10_sub_pixel_variance64x64,
vpx_highbd_10_sub_pixel_avg_variance64x64,
- vpx_highbd_sad64x64x3_bits10,
- vpx_highbd_sad64x64x8_bits10,
+ vpx_highbd_sad64x64x3_bits10, vpx_highbd_sad64x64x8_bits10,
vpx_highbd_sad64x64x4d_bits10)
- HIGHBD_BFP(BLOCK_16X16,
- vpx_highbd_sad16x16_bits10,
- vpx_highbd_sad16x16_avg_bits10,
- vpx_highbd_10_variance16x16,
+ HIGHBD_BFP(BLOCK_16X16, vpx_highbd_sad16x16_bits10,
+ vpx_highbd_sad16x16_avg_bits10, vpx_highbd_10_variance16x16,
vpx_highbd_10_sub_pixel_variance16x16,
vpx_highbd_10_sub_pixel_avg_variance16x16,
- vpx_highbd_sad16x16x3_bits10,
- vpx_highbd_sad16x16x8_bits10,
+ vpx_highbd_sad16x16x3_bits10, vpx_highbd_sad16x16x8_bits10,
vpx_highbd_sad16x16x4d_bits10)
- HIGHBD_BFP(BLOCK_16X8,
- vpx_highbd_sad16x8_bits10,
- vpx_highbd_sad16x8_avg_bits10,
- vpx_highbd_10_variance16x8,
+ HIGHBD_BFP(BLOCK_16X8, vpx_highbd_sad16x8_bits10,
+ vpx_highbd_sad16x8_avg_bits10, vpx_highbd_10_variance16x8,
vpx_highbd_10_sub_pixel_variance16x8,
vpx_highbd_10_sub_pixel_avg_variance16x8,
- vpx_highbd_sad16x8x3_bits10,
- vpx_highbd_sad16x8x8_bits10,
+ vpx_highbd_sad16x8x3_bits10, vpx_highbd_sad16x8x8_bits10,
vpx_highbd_sad16x8x4d_bits10)
- HIGHBD_BFP(BLOCK_8X16,
- vpx_highbd_sad8x16_bits10,
- vpx_highbd_sad8x16_avg_bits10,
- vpx_highbd_10_variance8x16,
+ HIGHBD_BFP(BLOCK_8X16, vpx_highbd_sad8x16_bits10,
+ vpx_highbd_sad8x16_avg_bits10, vpx_highbd_10_variance8x16,
vpx_highbd_10_sub_pixel_variance8x16,
vpx_highbd_10_sub_pixel_avg_variance8x16,
- vpx_highbd_sad8x16x3_bits10,
- vpx_highbd_sad8x16x8_bits10,
+ vpx_highbd_sad8x16x3_bits10, vpx_highbd_sad8x16x8_bits10,
vpx_highbd_sad8x16x4d_bits10)
- HIGHBD_BFP(BLOCK_8X8,
- vpx_highbd_sad8x8_bits10,
- vpx_highbd_sad8x8_avg_bits10,
- vpx_highbd_10_variance8x8,
- vpx_highbd_10_sub_pixel_variance8x8,
- vpx_highbd_10_sub_pixel_avg_variance8x8,
- vpx_highbd_sad8x8x3_bits10,
- vpx_highbd_sad8x8x8_bits10,
- vpx_highbd_sad8x8x4d_bits10)
+ HIGHBD_BFP(
+ BLOCK_8X8, vpx_highbd_sad8x8_bits10, vpx_highbd_sad8x8_avg_bits10,
+ vpx_highbd_10_variance8x8, vpx_highbd_10_sub_pixel_variance8x8,
+ vpx_highbd_10_sub_pixel_avg_variance8x8, vpx_highbd_sad8x8x3_bits10,
+ vpx_highbd_sad8x8x8_bits10, vpx_highbd_sad8x8x4d_bits10)
- HIGHBD_BFP(BLOCK_8X4,
- vpx_highbd_sad8x4_bits10,
- vpx_highbd_sad8x4_avg_bits10,
- vpx_highbd_10_variance8x4,
+ HIGHBD_BFP(BLOCK_8X4, vpx_highbd_sad8x4_bits10,
+ vpx_highbd_sad8x4_avg_bits10, vpx_highbd_10_variance8x4,
vpx_highbd_10_sub_pixel_variance8x4,
- vpx_highbd_10_sub_pixel_avg_variance8x4,
- NULL,
- vpx_highbd_sad8x4x8_bits10,
- vpx_highbd_sad8x4x4d_bits10)
+ vpx_highbd_10_sub_pixel_avg_variance8x4, NULL,
+ vpx_highbd_sad8x4x8_bits10, vpx_highbd_sad8x4x4d_bits10)
- HIGHBD_BFP(BLOCK_4X8,
- vpx_highbd_sad4x8_bits10,
- vpx_highbd_sad4x8_avg_bits10,
- vpx_highbd_10_variance4x8,
+ HIGHBD_BFP(BLOCK_4X8, vpx_highbd_sad4x8_bits10,
+ vpx_highbd_sad4x8_avg_bits10, vpx_highbd_10_variance4x8,
vpx_highbd_10_sub_pixel_variance4x8,
- vpx_highbd_10_sub_pixel_avg_variance4x8,
- NULL,
- vpx_highbd_sad4x8x8_bits10,
- vpx_highbd_sad4x8x4d_bits10)
+ vpx_highbd_10_sub_pixel_avg_variance4x8, NULL,
+ vpx_highbd_sad4x8x8_bits10, vpx_highbd_sad4x8x4d_bits10)
- HIGHBD_BFP(BLOCK_4X4,
- vpx_highbd_sad4x4_bits10,
- vpx_highbd_sad4x4_avg_bits10,
- vpx_highbd_10_variance4x4,
- vpx_highbd_10_sub_pixel_variance4x4,
- vpx_highbd_10_sub_pixel_avg_variance4x4,
- vpx_highbd_sad4x4x3_bits10,
- vpx_highbd_sad4x4x8_bits10,
- vpx_highbd_sad4x4x4d_bits10)
+ HIGHBD_BFP(
+ BLOCK_4X4, vpx_highbd_sad4x4_bits10, vpx_highbd_sad4x4_avg_bits10,
+ vpx_highbd_10_variance4x4, vpx_highbd_10_sub_pixel_variance4x4,
+ vpx_highbd_10_sub_pixel_avg_variance4x4, vpx_highbd_sad4x4x3_bits10,
+ vpx_highbd_sad4x4x8_bits10, vpx_highbd_sad4x4x4d_bits10)
break;
case VPX_BITS_12:
- HIGHBD_BFP(BLOCK_32X16,
- vpx_highbd_sad32x16_bits12,
- vpx_highbd_sad32x16_avg_bits12,
- vpx_highbd_12_variance32x16,
+ HIGHBD_BFP(BLOCK_32X16, vpx_highbd_sad32x16_bits12,
+ vpx_highbd_sad32x16_avg_bits12, vpx_highbd_12_variance32x16,
vpx_highbd_12_sub_pixel_variance32x16,
- vpx_highbd_12_sub_pixel_avg_variance32x16,
- NULL,
- NULL,
+ vpx_highbd_12_sub_pixel_avg_variance32x16, NULL, NULL,
vpx_highbd_sad32x16x4d_bits12)
- HIGHBD_BFP(BLOCK_16X32,
- vpx_highbd_sad16x32_bits12,
- vpx_highbd_sad16x32_avg_bits12,
- vpx_highbd_12_variance16x32,
+ HIGHBD_BFP(BLOCK_16X32, vpx_highbd_sad16x32_bits12,
+ vpx_highbd_sad16x32_avg_bits12, vpx_highbd_12_variance16x32,
vpx_highbd_12_sub_pixel_variance16x32,
- vpx_highbd_12_sub_pixel_avg_variance16x32,
- NULL,
- NULL,
+ vpx_highbd_12_sub_pixel_avg_variance16x32, NULL, NULL,
vpx_highbd_sad16x32x4d_bits12)
- HIGHBD_BFP(BLOCK_64X32,
- vpx_highbd_sad64x32_bits12,
- vpx_highbd_sad64x32_avg_bits12,
- vpx_highbd_12_variance64x32,
+ HIGHBD_BFP(BLOCK_64X32, vpx_highbd_sad64x32_bits12,
+ vpx_highbd_sad64x32_avg_bits12, vpx_highbd_12_variance64x32,
vpx_highbd_12_sub_pixel_variance64x32,
- vpx_highbd_12_sub_pixel_avg_variance64x32,
- NULL,
- NULL,
+ vpx_highbd_12_sub_pixel_avg_variance64x32, NULL, NULL,
vpx_highbd_sad64x32x4d_bits12)
- HIGHBD_BFP(BLOCK_32X64,
- vpx_highbd_sad32x64_bits12,
- vpx_highbd_sad32x64_avg_bits12,
- vpx_highbd_12_variance32x64,
+ HIGHBD_BFP(BLOCK_32X64, vpx_highbd_sad32x64_bits12,
+ vpx_highbd_sad32x64_avg_bits12, vpx_highbd_12_variance32x64,
vpx_highbd_12_sub_pixel_variance32x64,
- vpx_highbd_12_sub_pixel_avg_variance32x64,
- NULL,
- NULL,
+ vpx_highbd_12_sub_pixel_avg_variance32x64, NULL, NULL,
vpx_highbd_sad32x64x4d_bits12)
- HIGHBD_BFP(BLOCK_32X32,
- vpx_highbd_sad32x32_bits12,
- vpx_highbd_sad32x32_avg_bits12,
- vpx_highbd_12_variance32x32,
+ HIGHBD_BFP(BLOCK_32X32, vpx_highbd_sad32x32_bits12,
+ vpx_highbd_sad32x32_avg_bits12, vpx_highbd_12_variance32x32,
vpx_highbd_12_sub_pixel_variance32x32,
vpx_highbd_12_sub_pixel_avg_variance32x32,
- vpx_highbd_sad32x32x3_bits12,
- vpx_highbd_sad32x32x8_bits12,
+ vpx_highbd_sad32x32x3_bits12, vpx_highbd_sad32x32x8_bits12,
vpx_highbd_sad32x32x4d_bits12)
- HIGHBD_BFP(BLOCK_64X64,
- vpx_highbd_sad64x64_bits12,
- vpx_highbd_sad64x64_avg_bits12,
- vpx_highbd_12_variance64x64,
+ HIGHBD_BFP(BLOCK_64X64, vpx_highbd_sad64x64_bits12,
+ vpx_highbd_sad64x64_avg_bits12, vpx_highbd_12_variance64x64,
vpx_highbd_12_sub_pixel_variance64x64,
vpx_highbd_12_sub_pixel_avg_variance64x64,
- vpx_highbd_sad64x64x3_bits12,
- vpx_highbd_sad64x64x8_bits12,
+ vpx_highbd_sad64x64x3_bits12, vpx_highbd_sad64x64x8_bits12,
vpx_highbd_sad64x64x4d_bits12)
- HIGHBD_BFP(BLOCK_16X16,
- vpx_highbd_sad16x16_bits12,
- vpx_highbd_sad16x16_avg_bits12,
- vpx_highbd_12_variance16x16,
+ HIGHBD_BFP(BLOCK_16X16, vpx_highbd_sad16x16_bits12,
+ vpx_highbd_sad16x16_avg_bits12, vpx_highbd_12_variance16x16,
vpx_highbd_12_sub_pixel_variance16x16,
vpx_highbd_12_sub_pixel_avg_variance16x16,
- vpx_highbd_sad16x16x3_bits12,
- vpx_highbd_sad16x16x8_bits12,
+ vpx_highbd_sad16x16x3_bits12, vpx_highbd_sad16x16x8_bits12,
vpx_highbd_sad16x16x4d_bits12)
- HIGHBD_BFP(BLOCK_16X8,
- vpx_highbd_sad16x8_bits12,
- vpx_highbd_sad16x8_avg_bits12,
- vpx_highbd_12_variance16x8,
+ HIGHBD_BFP(BLOCK_16X8, vpx_highbd_sad16x8_bits12,
+ vpx_highbd_sad16x8_avg_bits12, vpx_highbd_12_variance16x8,
vpx_highbd_12_sub_pixel_variance16x8,
vpx_highbd_12_sub_pixel_avg_variance16x8,
- vpx_highbd_sad16x8x3_bits12,
- vpx_highbd_sad16x8x8_bits12,
+ vpx_highbd_sad16x8x3_bits12, vpx_highbd_sad16x8x8_bits12,
vpx_highbd_sad16x8x4d_bits12)
- HIGHBD_BFP(BLOCK_8X16,
- vpx_highbd_sad8x16_bits12,
- vpx_highbd_sad8x16_avg_bits12,
- vpx_highbd_12_variance8x16,
+ HIGHBD_BFP(BLOCK_8X16, vpx_highbd_sad8x16_bits12,
+ vpx_highbd_sad8x16_avg_bits12, vpx_highbd_12_variance8x16,
vpx_highbd_12_sub_pixel_variance8x16,
vpx_highbd_12_sub_pixel_avg_variance8x16,
- vpx_highbd_sad8x16x3_bits12,
- vpx_highbd_sad8x16x8_bits12,
+ vpx_highbd_sad8x16x3_bits12, vpx_highbd_sad8x16x8_bits12,
vpx_highbd_sad8x16x4d_bits12)
- HIGHBD_BFP(BLOCK_8X8,
- vpx_highbd_sad8x8_bits12,
- vpx_highbd_sad8x8_avg_bits12,
- vpx_highbd_12_variance8x8,
- vpx_highbd_12_sub_pixel_variance8x8,
- vpx_highbd_12_sub_pixel_avg_variance8x8,
- vpx_highbd_sad8x8x3_bits12,
- vpx_highbd_sad8x8x8_bits12,
- vpx_highbd_sad8x8x4d_bits12)
+ HIGHBD_BFP(
+ BLOCK_8X8, vpx_highbd_sad8x8_bits12, vpx_highbd_sad8x8_avg_bits12,
+ vpx_highbd_12_variance8x8, vpx_highbd_12_sub_pixel_variance8x8,
+ vpx_highbd_12_sub_pixel_avg_variance8x8, vpx_highbd_sad8x8x3_bits12,
+ vpx_highbd_sad8x8x8_bits12, vpx_highbd_sad8x8x4d_bits12)
- HIGHBD_BFP(BLOCK_8X4,
- vpx_highbd_sad8x4_bits12,
- vpx_highbd_sad8x4_avg_bits12,
- vpx_highbd_12_variance8x4,
+ HIGHBD_BFP(BLOCK_8X4, vpx_highbd_sad8x4_bits12,
+ vpx_highbd_sad8x4_avg_bits12, vpx_highbd_12_variance8x4,
vpx_highbd_12_sub_pixel_variance8x4,
- vpx_highbd_12_sub_pixel_avg_variance8x4,
- NULL,
- vpx_highbd_sad8x4x8_bits12,
- vpx_highbd_sad8x4x4d_bits12)
+ vpx_highbd_12_sub_pixel_avg_variance8x4, NULL,
+ vpx_highbd_sad8x4x8_bits12, vpx_highbd_sad8x4x4d_bits12)
- HIGHBD_BFP(BLOCK_4X8,
- vpx_highbd_sad4x8_bits12,
- vpx_highbd_sad4x8_avg_bits12,
- vpx_highbd_12_variance4x8,
+ HIGHBD_BFP(BLOCK_4X8, vpx_highbd_sad4x8_bits12,
+ vpx_highbd_sad4x8_avg_bits12, vpx_highbd_12_variance4x8,
vpx_highbd_12_sub_pixel_variance4x8,
- vpx_highbd_12_sub_pixel_avg_variance4x8,
- NULL,
- vpx_highbd_sad4x8x8_bits12,
- vpx_highbd_sad4x8x4d_bits12)
+ vpx_highbd_12_sub_pixel_avg_variance4x8, NULL,
+ vpx_highbd_sad4x8x8_bits12, vpx_highbd_sad4x8x4d_bits12)
- HIGHBD_BFP(BLOCK_4X4,
- vpx_highbd_sad4x4_bits12,
- vpx_highbd_sad4x4_avg_bits12,
- vpx_highbd_12_variance4x4,
- vpx_highbd_12_sub_pixel_variance4x4,
- vpx_highbd_12_sub_pixel_avg_variance4x4,
- vpx_highbd_sad4x4x3_bits12,
- vpx_highbd_sad4x4x8_bits12,
- vpx_highbd_sad4x4x4d_bits12)
+ HIGHBD_BFP(
+ BLOCK_4X4, vpx_highbd_sad4x4_bits12, vpx_highbd_sad4x4_avg_bits12,
+ vpx_highbd_12_variance4x4, vpx_highbd_12_sub_pixel_variance4x4,
+ vpx_highbd_12_sub_pixel_avg_variance4x4, vpx_highbd_sad4x4x3_bits12,
+ vpx_highbd_sad4x4x8_bits12, vpx_highbd_sad4x4x4d_bits12)
break;
default:
- assert(0 && "cm->bit_depth should be VPX_BITS_8, "
- "VPX_BITS_10 or VPX_BITS_12");
+ assert(0 &&
+ "cm->bit_depth should be VPX_BITS_8, "
+ "VPX_BITS_10 or VPX_BITS_12");
}
}
}
@@ -1380,8 +1193,7 @@
vpx_calloc(cm->mi_rows * cm->mi_cols, 1));
// Create a map used for cyclic background refresh.
- if (cpi->cyclic_refresh)
- vp10_cyclic_refresh_free(cpi->cyclic_refresh);
+ if (cpi->cyclic_refresh) vp10_cyclic_refresh_free(cpi->cyclic_refresh);
CHECK_MEM_ERROR(cm, cpi->cyclic_refresh,
vp10_cyclic_refresh_alloc(cm->mi_rows, cm->mi_cols));
@@ -1401,8 +1213,7 @@
VP10_COMMON *const cm = &cpi->common;
RATE_CONTROL *const rc = &cpi->rc;
- if (cm->profile != oxcf->profile)
- cm->profile = oxcf->profile;
+ if (cm->profile != oxcf->profile) cm->profile = oxcf->profile;
cm->bit_depth = oxcf->bit_depth;
cm->color_space = oxcf->color_space;
cm->color_range = oxcf->color_range;
@@ -1425,10 +1236,11 @@
cpi->refresh_golden_frame = 0;
cpi->refresh_last_frame = 1;
- cm->refresh_frame_context =
- oxcf->error_resilient_mode ? REFRESH_FRAME_CONTEXT_OFF :
- oxcf->frame_parallel_decoding_mode ? REFRESH_FRAME_CONTEXT_FORWARD
- : REFRESH_FRAME_CONTEXT_BACKWARD;
+ cm->refresh_frame_context = oxcf->error_resilient_mode
+ ? REFRESH_FRAME_CONTEXT_OFF
+ : oxcf->frame_parallel_decoding_mode
+ ? REFRESH_FRAME_CONTEXT_FORWARD
+ : REFRESH_FRAME_CONTEXT_BACKWARD;
cm->reset_frame_context = RESET_FRAME_CONTEXT_NONE;
vp10_reset_segment_features(cm);
@@ -1500,7 +1312,7 @@
#ifndef M_LOG2_E
#define M_LOG2_E 0.693147180559945309417
#endif
-#define log2f(x) (log (x) / (float) M_LOG2_E)
+#define log2f(x) (log(x) / (float)M_LOG2_E)
static void cal_nmvjointsadcost(int *mvjointsadcost) {
mvjointsadcost[0] = 600;
@@ -1539,15 +1351,13 @@
} while (++i <= MV_MAX);
}
-
VP10_COMP *vp10_create_compressor(VP10EncoderConfig *oxcf,
- BufferPool *const pool) {
+ BufferPool *const pool) {
unsigned int i;
VP10_COMP *volatile const cpi = vpx_memalign(32, sizeof(VP10_COMP));
VP10_COMMON *volatile const cm = cpi != NULL ? &cpi->common : NULL;
- if (!cm)
- return NULL;
+ if (!cm) return NULL;
vp10_zero(*cpi);
@@ -1562,11 +1372,10 @@
cm->free_mi = vp10_enc_free_mi;
cm->setup_mi = vp10_enc_setup_mi;
- CHECK_MEM_ERROR(cm, cm->fc,
- (FRAME_CONTEXT *)vpx_calloc(1, sizeof(*cm->fc)));
- CHECK_MEM_ERROR(cm, cm->frame_contexts,
- (FRAME_CONTEXT *)vpx_calloc(FRAME_CONTEXTS,
- sizeof(*cm->frame_contexts)));
+ CHECK_MEM_ERROR(cm, cm->fc, (FRAME_CONTEXT *)vpx_calloc(1, sizeof(*cm->fc)));
+ CHECK_MEM_ERROR(
+ cm, cm->frame_contexts,
+ (FRAME_CONTEXT *)vpx_calloc(FRAME_CONTEXTS, sizeof(*cm->frame_contexts)));
cpi->resize_state = 0;
cpi->resize_avg_qp = 0;
@@ -1599,11 +1408,11 @@
CHECK_MEM_ERROR(cm, cpi->nmvsadcosts_hp[1],
vpx_calloc(MV_VALS, sizeof(*cpi->nmvsadcosts_hp[1])));
- for (i = 0; i < (sizeof(cpi->mbgraph_stats) /
- sizeof(cpi->mbgraph_stats[0])); i++) {
- CHECK_MEM_ERROR(cm, cpi->mbgraph_stats[i].mb_stats,
- vpx_calloc(cm->MBs *
- sizeof(*cpi->mbgraph_stats[i].mb_stats), 1));
+ for (i = 0; i < (sizeof(cpi->mbgraph_stats) / sizeof(cpi->mbgraph_stats[0]));
+ i++) {
+ CHECK_MEM_ERROR(
+ cm, cpi->mbgraph_stats[i].mb_stats,
+ vpx_calloc(cm->MBs * sizeof(*cpi->mbgraph_stats[i].mb_stats), 1));
}
#if CONFIG_FP_MB_STATS
@@ -1647,7 +1456,7 @@
}
if (cpi->b_calculate_ssimg) {
- cpi->ssimg.worst= 100.0;
+ cpi->ssimg.worst = 100.0;
}
cpi->fastssim.worst = 100.0;
@@ -1659,8 +1468,8 @@
}
if (cpi->b_calculate_consistency) {
- cpi->ssim_vars = vpx_malloc(sizeof(*cpi->ssim_vars) *
- 4 * cpi->common.mi_rows * cpi->common.mi_cols);
+ cpi->ssim_vars = vpx_malloc(sizeof(*cpi->ssim_vars) * 4 *
+ cpi->common.mi_rows * cpi->common.mi_cols);
cpi->worst_consistency = 100.0;
}
@@ -1725,79 +1534,71 @@
vp10_set_speed_features_framesize_dependent(cpi);
// Allocate memory to store variances for a frame.
- CHECK_MEM_ERROR(cm, cpi->source_diff_var,
- vpx_calloc(cm->MBs, sizeof(diff)));
+ CHECK_MEM_ERROR(cm, cpi->source_diff_var, vpx_calloc(cm->MBs, sizeof(diff)));
cpi->source_var_thresh = 0;
cpi->frames_till_next_var_check = 0;
-#define BFP(BT, SDF, SDAF, VF, SVF, SVAF, SDX3F, SDX8F, SDX4DF)\
- cpi->fn_ptr[BT].sdf = SDF; \
- cpi->fn_ptr[BT].sdaf = SDAF; \
- cpi->fn_ptr[BT].vf = VF; \
- cpi->fn_ptr[BT].svf = SVF; \
- cpi->fn_ptr[BT].svaf = SVAF; \
- cpi->fn_ptr[BT].sdx3f = SDX3F; \
- cpi->fn_ptr[BT].sdx8f = SDX8F; \
- cpi->fn_ptr[BT].sdx4df = SDX4DF;
+#define BFP(BT, SDF, SDAF, VF, SVF, SVAF, SDX3F, SDX8F, SDX4DF) \
+ cpi->fn_ptr[BT].sdf = SDF; \
+ cpi->fn_ptr[BT].sdaf = SDAF; \
+ cpi->fn_ptr[BT].vf = VF; \
+ cpi->fn_ptr[BT].svf = SVF; \
+ cpi->fn_ptr[BT].svaf = SVAF; \
+ cpi->fn_ptr[BT].sdx3f = SDX3F; \
+ cpi->fn_ptr[BT].sdx8f = SDX8F; \
+ cpi->fn_ptr[BT].sdx4df = SDX4DF;
- BFP(BLOCK_32X16, vpx_sad32x16, vpx_sad32x16_avg,
- vpx_variance32x16, vpx_sub_pixel_variance32x16,
- vpx_sub_pixel_avg_variance32x16, NULL, NULL, vpx_sad32x16x4d)
+ BFP(BLOCK_32X16, vpx_sad32x16, vpx_sad32x16_avg, vpx_variance32x16,
+ vpx_sub_pixel_variance32x16, vpx_sub_pixel_avg_variance32x16, NULL, NULL,
+ vpx_sad32x16x4d)
- BFP(BLOCK_16X32, vpx_sad16x32, vpx_sad16x32_avg,
- vpx_variance16x32, vpx_sub_pixel_variance16x32,
- vpx_sub_pixel_avg_variance16x32, NULL, NULL, vpx_sad16x32x4d)
+ BFP(BLOCK_16X32, vpx_sad16x32, vpx_sad16x32_avg, vpx_variance16x32,
+ vpx_sub_pixel_variance16x32, vpx_sub_pixel_avg_variance16x32, NULL, NULL,
+ vpx_sad16x32x4d)
- BFP(BLOCK_64X32, vpx_sad64x32, vpx_sad64x32_avg,
- vpx_variance64x32, vpx_sub_pixel_variance64x32,
- vpx_sub_pixel_avg_variance64x32, NULL, NULL, vpx_sad64x32x4d)
+ BFP(BLOCK_64X32, vpx_sad64x32, vpx_sad64x32_avg, vpx_variance64x32,
+ vpx_sub_pixel_variance64x32, vpx_sub_pixel_avg_variance64x32, NULL, NULL,
+ vpx_sad64x32x4d)
- BFP(BLOCK_32X64, vpx_sad32x64, vpx_sad32x64_avg,
- vpx_variance32x64, vpx_sub_pixel_variance32x64,
- vpx_sub_pixel_avg_variance32x64, NULL, NULL, vpx_sad32x64x4d)
+ BFP(BLOCK_32X64, vpx_sad32x64, vpx_sad32x64_avg, vpx_variance32x64,
+ vpx_sub_pixel_variance32x64, vpx_sub_pixel_avg_variance32x64, NULL, NULL,
+ vpx_sad32x64x4d)
- BFP(BLOCK_32X32, vpx_sad32x32, vpx_sad32x32_avg,
- vpx_variance32x32, vpx_sub_pixel_variance32x32,
- vpx_sub_pixel_avg_variance32x32, vpx_sad32x32x3, vpx_sad32x32x8,
- vpx_sad32x32x4d)
+ BFP(BLOCK_32X32, vpx_sad32x32, vpx_sad32x32_avg, vpx_variance32x32,
+ vpx_sub_pixel_variance32x32, vpx_sub_pixel_avg_variance32x32,
+ vpx_sad32x32x3, vpx_sad32x32x8, vpx_sad32x32x4d)
- BFP(BLOCK_64X64, vpx_sad64x64, vpx_sad64x64_avg,
- vpx_variance64x64, vpx_sub_pixel_variance64x64,
- vpx_sub_pixel_avg_variance64x64, vpx_sad64x64x3, vpx_sad64x64x8,
- vpx_sad64x64x4d)
+ BFP(BLOCK_64X64, vpx_sad64x64, vpx_sad64x64_avg, vpx_variance64x64,
+ vpx_sub_pixel_variance64x64, vpx_sub_pixel_avg_variance64x64,
+ vpx_sad64x64x3, vpx_sad64x64x8, vpx_sad64x64x4d)
- BFP(BLOCK_16X16, vpx_sad16x16, vpx_sad16x16_avg,
- vpx_variance16x16, vpx_sub_pixel_variance16x16,
- vpx_sub_pixel_avg_variance16x16, vpx_sad16x16x3, vpx_sad16x16x8,
- vpx_sad16x16x4d)
+ BFP(BLOCK_16X16, vpx_sad16x16, vpx_sad16x16_avg, vpx_variance16x16,
+ vpx_sub_pixel_variance16x16, vpx_sub_pixel_avg_variance16x16,
+ vpx_sad16x16x3, vpx_sad16x16x8, vpx_sad16x16x4d)
- BFP(BLOCK_16X8, vpx_sad16x8, vpx_sad16x8_avg,
- vpx_variance16x8, vpx_sub_pixel_variance16x8,
- vpx_sub_pixel_avg_variance16x8,
- vpx_sad16x8x3, vpx_sad16x8x8, vpx_sad16x8x4d)
+ BFP(BLOCK_16X8, vpx_sad16x8, vpx_sad16x8_avg, vpx_variance16x8,
+ vpx_sub_pixel_variance16x8, vpx_sub_pixel_avg_variance16x8, vpx_sad16x8x3,
+ vpx_sad16x8x8, vpx_sad16x8x4d)
- BFP(BLOCK_8X16, vpx_sad8x16, vpx_sad8x16_avg,
- vpx_variance8x16, vpx_sub_pixel_variance8x16,
- vpx_sub_pixel_avg_variance8x16,
- vpx_sad8x16x3, vpx_sad8x16x8, vpx_sad8x16x4d)
+ BFP(BLOCK_8X16, vpx_sad8x16, vpx_sad8x16_avg, vpx_variance8x16,
+ vpx_sub_pixel_variance8x16, vpx_sub_pixel_avg_variance8x16, vpx_sad8x16x3,
+ vpx_sad8x16x8, vpx_sad8x16x4d)
- BFP(BLOCK_8X8, vpx_sad8x8, vpx_sad8x8_avg,
- vpx_variance8x8, vpx_sub_pixel_variance8x8,
- vpx_sub_pixel_avg_variance8x8,
- vpx_sad8x8x3, vpx_sad8x8x8, vpx_sad8x8x4d)
+ BFP(BLOCK_8X8, vpx_sad8x8, vpx_sad8x8_avg, vpx_variance8x8,
+ vpx_sub_pixel_variance8x8, vpx_sub_pixel_avg_variance8x8, vpx_sad8x8x3,
+ vpx_sad8x8x8, vpx_sad8x8x4d)
- BFP(BLOCK_8X4, vpx_sad8x4, vpx_sad8x4_avg,
- vpx_variance8x4, vpx_sub_pixel_variance8x4,
- vpx_sub_pixel_avg_variance8x4, NULL, vpx_sad8x4x8, vpx_sad8x4x4d)
+ BFP(BLOCK_8X4, vpx_sad8x4, vpx_sad8x4_avg, vpx_variance8x4,
+ vpx_sub_pixel_variance8x4, vpx_sub_pixel_avg_variance8x4, NULL,
+ vpx_sad8x4x8, vpx_sad8x4x4d)
- BFP(BLOCK_4X8, vpx_sad4x8, vpx_sad4x8_avg,
- vpx_variance4x8, vpx_sub_pixel_variance4x8,
- vpx_sub_pixel_avg_variance4x8, NULL, vpx_sad4x8x8, vpx_sad4x8x4d)
+ BFP(BLOCK_4X8, vpx_sad4x8, vpx_sad4x8_avg, vpx_variance4x8,
+ vpx_sub_pixel_variance4x8, vpx_sub_pixel_avg_variance4x8, NULL,
+ vpx_sad4x8x8, vpx_sad4x8x4d)
- BFP(BLOCK_4X4, vpx_sad4x4, vpx_sad4x4_avg,
- vpx_variance4x4, vpx_sub_pixel_variance4x4,
- vpx_sub_pixel_avg_variance4x4,
- vpx_sad4x4x3, vpx_sad4x4x8, vpx_sad4x4x4d)
+ BFP(BLOCK_4X4, vpx_sad4x4, vpx_sad4x4_avg, vpx_variance4x4,
+ vpx_sub_pixel_variance4x4, vpx_sub_pixel_avg_variance4x4, vpx_sad4x4x3,
+ vpx_sad4x4x8, vpx_sad4x4x4d)
#if CONFIG_VPX_HIGHBITDEPTH
highbd_set_var_fns(cpi);
@@ -1816,8 +1617,7 @@
return cpi;
}
-#define SNPRINT(H, T) \
- snprintf((H) + strlen(H), sizeof(H) - strlen(H), (T))
+#define SNPRINT(H, T) snprintf((H) + strlen(H), sizeof(H) - strlen(H), (T))
#define SNPRINT2(H, T, V) \
snprintf((H) + strlen(H), sizeof(H) - strlen(H), (T), (V))
@@ -1827,8 +1627,7 @@
unsigned int i;
int t;
- if (!cpi)
- return;
+ if (!cpi) return;
cm = &cpi->common;
if (cm->current_video_frame > 0) {
@@ -1836,28 +1635,27 @@
vpx_clear_system_state();
if (cpi->oxcf.pass != 1) {
- char headings[512] = {0};
- char results[512] = {0};
+ char headings[512] = { 0 };
+ char results[512] = { 0 };
FILE *f = fopen("opsnr.stt", "a");
- double time_encoded = (cpi->last_end_time_stamp_seen
- - cpi->first_time_stamp_ever) / 10000000.000;
- double total_encode_time = (cpi->time_receive_data +
- cpi->time_compress_data) / 1000.000;
+ double time_encoded =
+ (cpi->last_end_time_stamp_seen - cpi->first_time_stamp_ever) /
+ 10000000.000;
+ double total_encode_time =
+ (cpi->time_receive_data + cpi->time_compress_data) / 1000.000;
const double dr =
- (double)cpi->bytes * (double) 8 / (double)1000 / time_encoded;
+ (double)cpi->bytes * (double)8 / (double)1000 / time_encoded;
const double peak = (double)((1 << cpi->oxcf.input_bit_depth) - 1);
if (cpi->b_calculate_psnr) {
- const double total_psnr =
- vpx_sse_to_psnr((double)cpi->total_samples, peak,
- (double)cpi->total_sq_error);
- const double totalp_psnr =
- vpx_sse_to_psnr((double)cpi->totalp_samples, peak,
- (double)cpi->totalp_sq_error);
- const double total_ssim = 100 * pow(cpi->summed_quality /
- cpi->summed_weights, 8.0);
- const double totalp_ssim = 100 * pow(cpi->summedp_quality /
- cpi->summedp_weights, 8.0);
+ const double total_psnr = vpx_sse_to_psnr(
+ (double)cpi->total_samples, peak, (double)cpi->total_sq_error);
+ const double totalp_psnr = vpx_sse_to_psnr(
+ (double)cpi->totalp_samples, peak, (double)cpi->totalp_sq_error);
+ const double total_ssim =
+ 100 * pow(cpi->summed_quality / cpi->summed_weights, 8.0);
+ const double totalp_ssim =
+ 100 * pow(cpi->summedp_quality / cpi->summedp_weights, 8.0);
snprintf(headings, sizeof(headings),
"Bitrate\tAVGPsnr\tGLBPsnr\tAVPsnrP\tGLPsnrP\t"
@@ -1868,12 +1666,10 @@
"%7.3f\t%7.3f\t%7.3f\t%7.3f\t"
"%7.3f\t%7.3f\t%7.3f\t%7.3f",
dr, cpi->psnr.stat[ALL] / cpi->count, total_psnr,
- cpi->psnrp.stat[ALL] / cpi->count, totalp_psnr,
- total_ssim, totalp_ssim,
- cpi->fastssim.stat[ALL] / cpi->count,
- cpi->psnrhvs.stat[ALL] / cpi->count,
- cpi->psnr.worst, cpi->worst_ssim, cpi->fastssim.worst,
- cpi->psnrhvs.worst);
+ cpi->psnrp.stat[ALL] / cpi->count, totalp_psnr, total_ssim,
+ totalp_ssim, cpi->fastssim.stat[ALL] / cpi->count,
+ cpi->psnrhvs.stat[ALL] / cpi->count, cpi->psnr.worst,
+ cpi->worst_ssim, cpi->fastssim.worst, cpi->psnrhvs.worst);
if (cpi->b_calculate_blockiness) {
SNPRINT(headings, "\t Block\tWstBlck");
@@ -1935,13 +1731,12 @@
vpx_free(cpi->tile_thr_data);
vpx_free(cpi->workers);
- if (cpi->num_workers > 1)
- vp10_loop_filter_dealloc(&cpi->lf_row_sync);
+ if (cpi->num_workers > 1) vp10_loop_filter_dealloc(&cpi->lf_row_sync);
dealloc_compressor_data(cpi);
- for (i = 0; i < sizeof(cpi->mbgraph_stats) /
- sizeof(cpi->mbgraph_stats[0]); ++i) {
+ for (i = 0; i < sizeof(cpi->mbgraph_stats) / sizeof(cpi->mbgraph_stats[0]);
+ ++i) {
vpx_free(cpi->mbgraph_stats[i].mb_stats);
}
@@ -1980,9 +1775,9 @@
/* TODO(yaowu): The block_variance calls the unoptimized versions of variance()
* and highbd_8_variance(). It should not.
*/
-static void encoder_variance(const uint8_t *a, int a_stride,
- const uint8_t *b, int b_stride,
- int w, int h, unsigned int *sse, int *sum) {
+static void encoder_variance(const uint8_t *a, int a_stride, const uint8_t *b,
+ int b_stride, int w, int h, unsigned int *sse,
+ int *sum) {
int i, j;
*sum = 0;
@@ -2001,10 +1796,9 @@
}
#if CONFIG_VPX_HIGHBITDEPTH
-static void encoder_highbd_variance64(const uint8_t *a8, int a_stride,
- const uint8_t *b8, int b_stride,
- int w, int h, uint64_t *sse,
- uint64_t *sum) {
+static void encoder_highbd_variance64(const uint8_t *a8, int a_stride,
+ const uint8_t *b8, int b_stride, int w,
+ int h, uint64_t *sse, uint64_t *sum) {
int i, j;
uint16_t *a = CONVERT_TO_SHORTPTR(a8);
@@ -2023,22 +1817,20 @@
}
}
-static void encoder_highbd_8_variance(const uint8_t *a8, int a_stride,
- const uint8_t *b8, int b_stride,
- int w, int h,
- unsigned int *sse, int *sum) {
+static void encoder_highbd_8_variance(const uint8_t *a8, int a_stride,
+ const uint8_t *b8, int b_stride, int w,
+ int h, unsigned int *sse, int *sum) {
uint64_t sse_long = 0;
uint64_t sum_long = 0;
- encoder_highbd_variance64(a8, a_stride, b8, b_stride, w, h,
- &sse_long, &sum_long);
+ encoder_highbd_variance64(a8, a_stride, b8, b_stride, w, h, &sse_long,
+ &sum_long);
*sse = (unsigned int)sse_long;
*sum = (int)sum_long;
}
#endif // CONFIG_VPX_HIGHBITDEPTH
-static int64_t get_sse(const uint8_t *a, int a_stride,
- const uint8_t *b, int b_stride,
- int width, int height) {
+static int64_t get_sse(const uint8_t *a, int a_stride, const uint8_t *b,
+ int b_stride, int width, int height) {
const int dw = width % 16;
const int dh = height % 16;
int64_t total_sse = 0;
@@ -2047,15 +1839,15 @@
int x, y;
if (dw > 0) {
- encoder_variance(&a[width - dw], a_stride, &b[width - dw], b_stride,
- dw, height, &sse, &sum);
+ encoder_variance(&a[width - dw], a_stride, &b[width - dw], b_stride, dw,
+ height, &sse, &sum);
total_sse += sse;
}
if (dh > 0) {
encoder_variance(&a[(height - dh) * a_stride], a_stride,
- &b[(height - dh) * b_stride], b_stride,
- width - dw, dh, &sse, &sum);
+ &b[(height - dh) * b_stride], b_stride, width - dw, dh,
+ &sse, &sum);
total_sse += sse;
}
@@ -2079,9 +1871,8 @@
#if CONFIG_VPX_HIGHBITDEPTH
static int64_t highbd_get_sse_shift(const uint8_t *a8, int a_stride,
- const uint8_t *b8, int b_stride,
- int width, int height,
- unsigned int input_shift) {
+ const uint8_t *b8, int b_stride, int width,
+ int height, unsigned int input_shift) {
const uint16_t *a = CONVERT_TO_SHORTPTR(a8);
const uint16_t *b = CONVERT_TO_SHORTPTR(b8);
int64_t total_sse = 0;
@@ -2098,9 +1889,8 @@
return total_sse;
}
-static int64_t highbd_get_sse(const uint8_t *a, int a_stride,
- const uint8_t *b, int b_stride,
- int width, int height) {
+static int64_t highbd_get_sse(const uint8_t *a, int a_stride, const uint8_t *b,
+ int b_stride, int width, int height) {
int64_t total_sse = 0;
int x, y;
const int dw = width % 16;
@@ -2108,9 +1898,8 @@
unsigned int sse = 0;
int sum = 0;
if (dw > 0) {
- encoder_highbd_8_variance(&a[width - dw], a_stride,
- &b[width - dw], b_stride,
- dw, height, &sse, &sum);
+ encoder_highbd_8_variance(&a[width - dw], a_stride, &b[width - dw],
+ b_stride, dw, height, &sse, &sum);
total_sse += sse;
}
if (dh > 0) {
@@ -2143,18 +1932,16 @@
#if CONFIG_VPX_HIGHBITDEPTH
static void calc_highbd_psnr(const YV12_BUFFER_CONFIG *a,
- const YV12_BUFFER_CONFIG *b,
- PSNR_STATS *psnr,
+ const YV12_BUFFER_CONFIG *b, PSNR_STATS *psnr,
unsigned int bit_depth,
unsigned int in_bit_depth) {
- const int widths[3] =
- {a->y_crop_width, a->uv_crop_width, a->uv_crop_width };
- const int heights[3] =
- {a->y_crop_height, a->uv_crop_height, a->uv_crop_height};
- const uint8_t *a_planes[3] = {a->y_buffer, a->u_buffer, a->v_buffer };
- const int a_strides[3] = {a->y_stride, a->uv_stride, a->uv_stride};
- const uint8_t *b_planes[3] = {b->y_buffer, b->u_buffer, b->v_buffer };
- const int b_strides[3] = {b->y_stride, b->uv_stride, b->uv_stride};
+ const int widths[3] = { a->y_crop_width, a->uv_crop_width, a->uv_crop_width };
+ const int heights[3] = { a->y_crop_height, a->uv_crop_height,
+ a->uv_crop_height };
+ const uint8_t *a_planes[3] = { a->y_buffer, a->u_buffer, a->v_buffer };
+ const int a_strides[3] = { a->y_stride, a->uv_stride, a->uv_stride };
+ const uint8_t *b_planes[3] = { b->y_buffer, b->u_buffer, b->v_buffer };
+ const int b_strides[3] = { b->y_stride, b->uv_stride, b->uv_stride };
int i;
uint64_t total_sse = 0;
uint32_t total_samples = 0;
@@ -2168,17 +1955,14 @@
uint64_t sse;
if (a->flags & YV12_FLAG_HIGHBITDEPTH) {
if (input_shift) {
- sse = highbd_get_sse_shift(a_planes[i], a_strides[i],
- b_planes[i], b_strides[i], w, h,
- input_shift);
+ sse = highbd_get_sse_shift(a_planes[i], a_strides[i], b_planes[i],
+ b_strides[i], w, h, input_shift);
} else {
- sse = highbd_get_sse(a_planes[i], a_strides[i],
- b_planes[i], b_strides[i], w, h);
+ sse = highbd_get_sse(a_planes[i], a_strides[i], b_planes[i],
+ b_strides[i], w, h);
}
} else {
- sse = get_sse(a_planes[i], a_strides[i],
- b_planes[i], b_strides[i],
- w, h);
+ sse = get_sse(a_planes[i], a_strides[i], b_planes[i], b_strides[i], w, h);
}
psnr->sse[1 + i] = sse;
psnr->samples[1 + i] = samples;
@@ -2190,23 +1974,22 @@
psnr->sse[0] = total_sse;
psnr->samples[0] = total_samples;
- psnr->psnr[0] = vpx_sse_to_psnr((double)total_samples, peak,
- (double)total_sse);
+ psnr->psnr[0] =
+ vpx_sse_to_psnr((double)total_samples, peak, (double)total_sse);
}
-#else // !CONFIG_VPX_HIGHBITDEPTH
+#else // !CONFIG_VPX_HIGHBITDEPTH
static void calc_psnr(const YV12_BUFFER_CONFIG *a, const YV12_BUFFER_CONFIG *b,
PSNR_STATS *psnr) {
static const double peak = 255.0;
- const int widths[3] = {
- a->y_crop_width, a->uv_crop_width, a->uv_crop_width};
- const int heights[3] = {
- a->y_crop_height, a->uv_crop_height, a->uv_crop_height};
- const uint8_t *a_planes[3] = {a->y_buffer, a->u_buffer, a->v_buffer};
- const int a_strides[3] = {a->y_stride, a->uv_stride, a->uv_stride};
- const uint8_t *b_planes[3] = {b->y_buffer, b->u_buffer, b->v_buffer};
- const int b_strides[3] = {b->y_stride, b->uv_stride, b->uv_stride};
+ const int widths[3] = { a->y_crop_width, a->uv_crop_width, a->uv_crop_width };
+ const int heights[3] = { a->y_crop_height, a->uv_crop_height,
+ a->uv_crop_height };
+ const uint8_t *a_planes[3] = { a->y_buffer, a->u_buffer, a->v_buffer };
+ const int a_strides[3] = { a->y_stride, a->uv_stride, a->uv_stride };
+ const uint8_t *b_planes[3] = { b->y_buffer, b->u_buffer, b->v_buffer };
+ const int b_strides[3] = { b->y_stride, b->uv_stride, b->uv_stride };
int i;
uint64_t total_sse = 0;
uint32_t total_samples = 0;
@@ -2215,9 +1998,8 @@
const int w = widths[i];
const int h = heights[i];
const uint32_t samples = w * h;
- const uint64_t sse = get_sse(a_planes[i], a_strides[i],
- b_planes[i], b_strides[i],
- w, h);
+ const uint64_t sse =
+ get_sse(a_planes[i], a_strides[i], b_planes[i], b_strides[i], w, h);
psnr->sse[1 + i] = sse;
psnr->samples[1 + i] = samples;
psnr->psnr[1 + i] = vpx_sse_to_psnr(samples, peak, (double)sse);
@@ -2228,8 +2010,8 @@
psnr->sse[0] = total_sse;
psnr->samples[0] = total_samples;
- psnr->psnr[0] = vpx_sse_to_psnr((double)total_samples, peak,
- (double)total_sse);
+ psnr->psnr[0] =
+ vpx_sse_to_psnr((double)total_samples, peak, (double)total_sse);
}
#endif // CONFIG_VPX_HIGHBITDEPTH
@@ -2254,8 +2036,7 @@
}
int vp10_use_as_reference(VP10_COMP *cpi, int ref_frame_flags) {
- if (ref_frame_flags > 7)
- return -1;
+ if (ref_frame_flags > 7) return -1;
cpi->ref_frame_flags = ref_frame_flags;
return 0;
@@ -2268,8 +2049,8 @@
cpi->ext_refresh_frame_flags_pending = 1;
}
-static YV12_BUFFER_CONFIG *get_vp10_ref_frame_buffer(VP10_COMP *cpi,
- VPX_REFFRAME ref_frame_flag) {
+static YV12_BUFFER_CONFIG *get_vp10_ref_frame_buffer(
+ VP10_COMP *cpi, VPX_REFFRAME ref_frame_flag) {
MV_REFERENCE_FRAME ref_frame = NONE;
if (ref_frame_flag == VPX_LAST_FLAG)
ref_frame = LAST_FRAME;
@@ -2282,7 +2063,7 @@
}
int vp10_copy_reference_enc(VP10_COMP *cpi, VPX_REFFRAME ref_frame_flag,
- YV12_BUFFER_CONFIG *sd) {
+ YV12_BUFFER_CONFIG *sd) {
YV12_BUFFER_CONFIG *cfg = get_vp10_ref_frame_buffer(cpi, ref_frame_flag);
if (cfg) {
vpx_yv12_copy_frame(cfg, sd);
@@ -2293,7 +2074,7 @@
}
int vp10_set_reference_enc(VP10_COMP *cpi, VPX_REFFRAME ref_frame_flag,
- YV12_BUFFER_CONFIG *sd) {
+ YV12_BUFFER_CONFIG *sd) {
YV12_BUFFER_CONFIG *cfg = get_vp10_ref_frame_buffer(cpi, ref_frame_flag);
if (cfg) {
vpx_yv12_copy_frame(sd, cfg);
@@ -2303,7 +2084,7 @@
}
}
-int vp10_update_entropy(VP10_COMP * cpi, int update) {
+int vp10_update_entropy(VP10_COMP *cpi, int update) {
cpi->ext_refresh_frame_context = update;
cpi->ext_refresh_frame_context_pending = 1;
return 0;
@@ -2352,7 +2133,7 @@
uint16_t *src16 = CONVERT_TO_SHORTPTR(s->y_buffer);
do {
- fwrite(src16, s->y_width, 2, yuv_rec_file);
+ fwrite(src16, s->y_width, 2, yuv_rec_file);
src16 += s->y_stride;
} while (--h);
@@ -2360,7 +2141,7 @@
h = s->uv_height;
do {
- fwrite(src16, s->uv_width, 2, yuv_rec_file);
+ fwrite(src16, s->uv_width, 2, yuv_rec_file);
src16 += s->uv_stride;
} while (--h);
@@ -2378,7 +2159,7 @@
#endif // CONFIG_VPX_HIGHBITDEPTH
do {
- fwrite(src, s->y_width, 1, yuv_rec_file);
+ fwrite(src, s->y_width, 1, yuv_rec_file);
src += s->y_stride;
} while (--h);
@@ -2386,7 +2167,7 @@
h = s->uv_height;
do {
- fwrite(src, s->uv_width, 1, yuv_rec_file);
+ fwrite(src, s->uv_width, 1, yuv_rec_file);
src += s->uv_stride;
} while (--h);
@@ -2412,32 +2193,33 @@
#endif // CONFIG_VPX_HIGHBITDEPTH
// TODO(dkovalev): replace YV12_BUFFER_CONFIG with vpx_image_t
int i;
- const uint8_t *const srcs[3] = {src->y_buffer, src->u_buffer, src->v_buffer};
- const int src_strides[3] = {src->y_stride, src->uv_stride, src->uv_stride};
- const int src_widths[3] = {src->y_crop_width, src->uv_crop_width,
- src->uv_crop_width };
- const int src_heights[3] = {src->y_crop_height, src->uv_crop_height,
- src->uv_crop_height};
- uint8_t *const dsts[3] = {dst->y_buffer, dst->u_buffer, dst->v_buffer};
- const int dst_strides[3] = {dst->y_stride, dst->uv_stride, dst->uv_stride};
- const int dst_widths[3] = {dst->y_crop_width, dst->uv_crop_width,
- dst->uv_crop_width};
- const int dst_heights[3] = {dst->y_crop_height, dst->uv_crop_height,
- dst->uv_crop_height};
+ const uint8_t *const srcs[3] = { src->y_buffer, src->u_buffer,
+ src->v_buffer };
+ const int src_strides[3] = { src->y_stride, src->uv_stride, src->uv_stride };
+ const int src_widths[3] = { src->y_crop_width, src->uv_crop_width,
+ src->uv_crop_width };
+ const int src_heights[3] = { src->y_crop_height, src->uv_crop_height,
+ src->uv_crop_height };
+ uint8_t *const dsts[3] = { dst->y_buffer, dst->u_buffer, dst->v_buffer };
+ const int dst_strides[3] = { dst->y_stride, dst->uv_stride, dst->uv_stride };
+ const int dst_widths[3] = { dst->y_crop_width, dst->uv_crop_width,
+ dst->uv_crop_width };
+ const int dst_heights[3] = { dst->y_crop_height, dst->uv_crop_height,
+ dst->uv_crop_height };
for (i = 0; i < MAX_MB_PLANE; ++i) {
#if CONFIG_VPX_HIGHBITDEPTH
if (src->flags & YV12_FLAG_HIGHBITDEPTH) {
vp10_highbd_resize_plane(srcs[i], src_heights[i], src_widths[i],
- src_strides[i], dsts[i], dst_heights[i],
- dst_widths[i], dst_strides[i], bd);
+ src_strides[i], dsts[i], dst_heights[i],
+ dst_widths[i], dst_strides[i], bd);
} else {
vp10_resize_plane(srcs[i], src_heights[i], src_widths[i], src_strides[i],
- dsts[i], dst_heights[i], dst_widths[i], dst_strides[i]);
+ dsts[i], dst_heights[i], dst_widths[i], dst_strides[i]);
}
#else
vp10_resize_plane(srcs[i], src_heights[i], src_widths[i], src_strides[i],
- dsts[i], dst_heights[i], dst_widths[i], dst_strides[i]);
+ dsts[i], dst_heights[i], dst_widths[i], dst_strides[i]);
#endif // CONFIG_VPX_HIGHBITDEPTH
}
vpx_extend_frame_borders(dst);
@@ -2454,10 +2236,11 @@
const int src_h = src->y_crop_height;
const int dst_w = dst->y_crop_width;
const int dst_h = dst->y_crop_height;
- const uint8_t *const srcs[3] = {src->y_buffer, src->u_buffer, src->v_buffer};
- const int src_strides[3] = {src->y_stride, src->uv_stride, src->uv_stride};
- uint8_t *const dsts[3] = {dst->y_buffer, dst->u_buffer, dst->v_buffer};
- const int dst_strides[3] = {dst->y_stride, dst->uv_stride, dst->uv_stride};
+ const uint8_t *const srcs[3] = { src->y_buffer, src->u_buffer,
+ src->v_buffer };
+ const int src_strides[3] = { src->y_stride, src->uv_stride, src->uv_stride };
+ uint8_t *const dsts[3] = { dst->y_buffer, dst->u_buffer, dst->v_buffer };
+ const int dst_strides[3] = { dst->y_stride, dst->uv_stride, dst->uv_stride };
const InterpKernel *const kernel = vp10_filter_kernels[EIGHTTAP];
int x, y, i;
@@ -2469,8 +2252,9 @@
const int y_q4 = y * (16 / factor) * src_h / dst_h;
const int src_stride = src_strides[i];
const int dst_stride = dst_strides[i];
- const uint8_t *src_ptr = srcs[i] + (y / factor) * src_h / dst_h *
- src_stride + (x / factor) * src_w / dst_w;
+ const uint8_t *src_ptr = srcs[i] +
+ (y / factor) * src_h / dst_h * src_stride +
+ (x / factor) * src_w / dst_w;
uint8_t *dst_ptr = dsts[i] + (y / factor) * dst_stride + (x / factor);
#if CONFIG_VPX_HIGHBITDEPTH
@@ -2482,14 +2266,14 @@
} else {
vpx_convolve8(src_ptr, src_stride, dst_ptr, dst_stride,
kernel[x_q4 & 0xf], 16 * src_w / dst_w,
- kernel[y_q4 & 0xf], 16 * src_h / dst_h,
- 16 / factor, 16 / factor);
+ kernel[y_q4 & 0xf], 16 * src_h / dst_h, 16 / factor,
+ 16 / factor);
}
#else
vpx_convolve8(src_ptr, src_stride, dst_ptr, dst_stride,
kernel[x_q4 & 0xf], 16 * src_w / dst_w,
- kernel[y_q4 & 0xf], 16 * src_h / dst_h,
- 16 / factor, 16 / factor);
+ kernel[y_q4 & 0xf], 16 * src_h / dst_h, 16 / factor,
+ 16 / factor);
#endif // CONFIG_VPX_HIGHBITDEPTH
}
}
@@ -2506,8 +2290,9 @@
if (rc->frame_size_selector == UNSCALED &&
q >= rc->rf_level_maxq[gf_group->rf_level[gf_group->index]]) {
- const int max_size_thresh = (int)(rate_thresh_mult[SCALE_STEP1]
- * VPXMAX(rc->this_frame_target, rc->avg_frame_bandwidth));
+ const int max_size_thresh =
+ (int)(rate_thresh_mult[SCALE_STEP1] *
+ VPXMAX(rc->this_frame_target, rc->avg_frame_bandwidth));
scale = rc->projected_frame_size > max_size_thresh ? 1 : 0;
}
return scale;
@@ -2515,8 +2300,7 @@
// Function to test for conditions that indicate we should loop
// back and recode a frame.
-static int recode_loop_test(VP10_COMP *cpi,
- int high_limit, int low_limit,
+static int recode_loop_test(VP10_COMP *cpi, int high_limit, int low_limit,
int q, int maxq, int minq) {
const RATE_CONTROL *const rc = &cpi->rc;
const VP10EncoderConfig *const oxcf = &cpi->oxcf;
@@ -2525,14 +2309,12 @@
if ((rc->projected_frame_size >= rc->max_frame_bandwidth) ||
(cpi->sf.recode_loop == ALLOW_RECODE) ||
- (frame_is_kfgfarf &&
- (cpi->sf.recode_loop == ALLOW_RECODE_KFARFGF))) {
- if (frame_is_kfgfarf &&
- (oxcf->resize_mode == RESIZE_DYNAMIC) &&
+ (frame_is_kfgfarf && (cpi->sf.recode_loop == ALLOW_RECODE_KFARFGF))) {
+ if (frame_is_kfgfarf && (oxcf->resize_mode == RESIZE_DYNAMIC) &&
scale_down(cpi, q)) {
- // Code this group at a lower resolution.
- cpi->resize_pending = 1;
- return 1;
+ // Code this group at a lower resolution.
+ cpi->resize_pending = 1;
+ return 1;
}
// TODO(agrange) high_limit could be greater than the scale-down threshold.
@@ -2552,16 +2334,16 @@
}
void vp10_update_reference_frames(VP10_COMP *cpi) {
- VP10_COMMON * const cm = &cpi->common;
+ VP10_COMMON *const cm = &cpi->common;
BufferPool *const pool = cm->buffer_pool;
// At this point the new frame has been encoded.
// If any buffer copy / swapping is signaled it should be done here.
if (cm->frame_type == KEY_FRAME) {
- ref_cnt_fb(pool->frame_bufs,
- &cm->ref_frame_map[cpi->gld_fb_idx], cm->new_fb_idx);
- ref_cnt_fb(pool->frame_bufs,
- &cm->ref_frame_map[cpi->alt_fb_idx], cm->new_fb_idx);
+ ref_cnt_fb(pool->frame_bufs, &cm->ref_frame_map[cpi->gld_fb_idx],
+ cm->new_fb_idx);
+ ref_cnt_fb(pool->frame_bufs, &cm->ref_frame_map[cpi->alt_fb_idx],
+ cm->new_fb_idx);
} else if (vp10_preserve_existing_gf(cpi)) {
// We have decided to preserve the previously existing golden frame as our
// new ARF frame. However, in the short term in function
@@ -2573,8 +2355,8 @@
// slot and, if we're updating the GF, the current frame becomes the new GF.
int tmp;
- ref_cnt_fb(pool->frame_bufs,
- &cm->ref_frame_map[cpi->alt_fb_idx], cm->new_fb_idx);
+ ref_cnt_fb(pool->frame_bufs, &cm->ref_frame_map[cpi->alt_fb_idx],
+ cm->new_fb_idx);
tmp = cpi->alt_fb_idx;
cpi->alt_fb_idx = cpi->gld_fb_idx;
@@ -2587,16 +2369,15 @@
arf_idx = gf_group->arf_update_idx[gf_group->index];
}
- ref_cnt_fb(pool->frame_bufs,
- &cm->ref_frame_map[arf_idx], cm->new_fb_idx);
+ ref_cnt_fb(pool->frame_bufs, &cm->ref_frame_map[arf_idx], cm->new_fb_idx);
memcpy(cpi->interp_filter_selected[ALTREF_FRAME],
cpi->interp_filter_selected[0],
sizeof(cpi->interp_filter_selected[0]));
}
if (cpi->refresh_golden_frame) {
- ref_cnt_fb(pool->frame_bufs,
- &cm->ref_frame_map[cpi->gld_fb_idx], cm->new_fb_idx);
+ ref_cnt_fb(pool->frame_bufs, &cm->ref_frame_map[cpi->gld_fb_idx],
+ cm->new_fb_idx);
if (!cpi->rc.is_src_frame_alt_ref)
memcpy(cpi->interp_filter_selected[GOLDEN_FRAME],
cpi->interp_filter_selected[0],
@@ -2609,8 +2390,8 @@
}
if (cpi->refresh_last_frame) {
- ref_cnt_fb(pool->frame_bufs,
- &cm->ref_frame_map[cpi->lst_fb_idx], cm->new_fb_idx);
+ ref_cnt_fb(pool->frame_bufs, &cm->ref_frame_map[cpi->lst_fb_idx],
+ cm->new_fb_idx);
if (!cpi->rc.is_src_frame_alt_ref)
memcpy(cpi->interp_filter_selected[LAST_FRAME],
cpi->interp_filter_selected[0],
@@ -2639,9 +2420,8 @@
if (lf->filter_level > 0) {
if (cpi->num_workers > 1)
vp10_loop_filter_frame_mt(cm->frame_to_show, cm, xd->plane,
- lf->filter_level, 0, 0,
- cpi->workers, cpi->num_workers,
- &cpi->lf_row_sync);
+ lf->filter_level, 0, 0, cpi->workers,
+ cpi->num_workers, &cpi->lf_row_sync);
else
vp10_loop_filter_frame(cm->frame_to_show, cm, xd, lf->filter_level, 0, 0);
}
@@ -2649,16 +2429,13 @@
vpx_extend_frame_inner_borders(cm->frame_to_show);
}
-static INLINE void alloc_frame_mvs(const VP10_COMMON *cm,
- int buffer_idx) {
+static INLINE void alloc_frame_mvs(const VP10_COMMON *cm, int buffer_idx) {
RefCntBuffer *const new_fb_ptr = &cm->buffer_pool->frame_bufs[buffer_idx];
- if (new_fb_ptr->mvs == NULL ||
- new_fb_ptr->mi_rows < cm->mi_rows ||
+ if (new_fb_ptr->mvs == NULL || new_fb_ptr->mi_rows < cm->mi_rows ||
new_fb_ptr->mi_cols < cm->mi_cols) {
vpx_free(new_fb_ptr->mvs);
- new_fb_ptr->mvs =
- (MV_REF *)vpx_calloc(cm->mi_rows * cm->mi_cols,
- sizeof(*new_fb_ptr->mvs));
+ new_fb_ptr->mvs = (MV_REF *)vpx_calloc(cm->mi_rows * cm->mi_cols,
+ sizeof(*new_fb_ptr->mvs));
new_fb_ptr->mi_rows = cm->mi_rows;
new_fb_ptr->mi_cols = cm->mi_cols;
}
@@ -2667,14 +2444,15 @@
void vp10_scale_references(VP10_COMP *cpi) {
VP10_COMMON *cm = &cpi->common;
MV_REFERENCE_FRAME ref_frame;
- const VPX_REFFRAME ref_mask[3] = {VPX_LAST_FLAG, VPX_GOLD_FLAG, VPX_ALT_FLAG};
+ const VPX_REFFRAME ref_mask[3] = { VPX_LAST_FLAG, VPX_GOLD_FLAG,
+ VPX_ALT_FLAG };
for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
// Need to convert from VPX_REFFRAME to index into ref_mask (subtract 1).
if (cpi->ref_frame_flags & ref_mask[ref_frame - 1]) {
BufferPool *const pool = cm->buffer_pool;
- const YV12_BUFFER_CONFIG *const ref = get_ref_frame_buffer(cpi,
- ref_frame);
+ const YV12_BUFFER_CONFIG *const ref =
+ get_ref_frame_buffer(cpi, ref_frame);
if (ref == NULL) {
cpi->scaled_ref_idx[ref_frame - 1] = INVALID_IDX;
@@ -2690,18 +2468,14 @@
new_fb = get_free_fb(cm);
force_scaling = 1;
}
- if (new_fb == INVALID_IDX)
- return;
+ if (new_fb == INVALID_IDX) return;
new_fb_ptr = &pool->frame_bufs[new_fb];
- if (force_scaling ||
- new_fb_ptr->buf.y_crop_width != cm->width ||
+ if (force_scaling || new_fb_ptr->buf.y_crop_width != cm->width ||
new_fb_ptr->buf.y_crop_height != cm->height) {
- vpx_realloc_frame_buffer(&new_fb_ptr->buf,
- cm->width, cm->height,
- cm->subsampling_x, cm->subsampling_y,
- cm->use_highbitdepth,
- VPX_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
- NULL, NULL, NULL);
+ vpx_realloc_frame_buffer(
+ &new_fb_ptr->buf, cm->width, cm->height, cm->subsampling_x,
+ cm->subsampling_y, cm->use_highbitdepth, VPX_ENC_BORDER_IN_PIXELS,
+ cm->byte_alignment, NULL, NULL, NULL);
scale_and_extend_frame(ref, &new_fb_ptr->buf, (int)cm->bit_depth);
cpi->scaled_ref_idx[ref_frame - 1] = new_fb;
alloc_frame_mvs(cm, new_fb);
@@ -2715,14 +2489,11 @@
new_fb = get_free_fb(cm);
force_scaling = 1;
}
- if (new_fb == INVALID_IDX)
- return;
+ if (new_fb == INVALID_IDX) return;
new_fb_ptr = &pool->frame_bufs[new_fb];
- if (force_scaling ||
- new_fb_ptr->buf.y_crop_width != cm->width ||
+ if (force_scaling || new_fb_ptr->buf.y_crop_width != cm->width ||
new_fb_ptr->buf.y_crop_height != cm->height) {
- vpx_realloc_frame_buffer(&new_fb_ptr->buf,
- cm->width, cm->height,
+ vpx_realloc_frame_buffer(&new_fb_ptr->buf, cm->width, cm->height,
cm->subsampling_x, cm->subsampling_y,
VPX_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
NULL, NULL, NULL);
@@ -2740,8 +2511,7 @@
++buf->ref_count;
}
} else {
- if (cpi->oxcf.pass != 0)
- cpi->scaled_ref_idx[ref_frame - 1] = INVALID_IDX;
+ if (cpi->oxcf.pass != 0) cpi->scaled_ref_idx[ref_frame - 1] = INVALID_IDX;
}
}
}
@@ -2758,22 +2528,21 @@
refresh[2] = (cpi->refresh_alt_ref_frame) ? 1 : 0;
for (i = LAST_FRAME; i <= ALTREF_FRAME; ++i) {
const int idx = cpi->scaled_ref_idx[i - 1];
- RefCntBuffer *const buf = idx != INVALID_IDX ?
- &cm->buffer_pool->frame_bufs[idx] : NULL;
+ RefCntBuffer *const buf =
+ idx != INVALID_IDX ? &cm->buffer_pool->frame_bufs[idx] : NULL;
const YV12_BUFFER_CONFIG *const ref = get_ref_frame_buffer(cpi, i);
if (buf != NULL &&
- (refresh[i - 1] ||
- (buf->buf.y_crop_width == ref->y_crop_width &&
- buf->buf.y_crop_height == ref->y_crop_height))) {
+ (refresh[i - 1] || (buf->buf.y_crop_width == ref->y_crop_width &&
+ buf->buf.y_crop_height == ref->y_crop_height))) {
--buf->ref_count;
- cpi->scaled_ref_idx[i -1] = INVALID_IDX;
+ cpi->scaled_ref_idx[i - 1] = INVALID_IDX;
}
}
} else {
for (i = 0; i < MAX_REF_FRAMES; ++i) {
const int idx = cpi->scaled_ref_idx[i];
- RefCntBuffer *const buf = idx != INVALID_IDX ?
- &cm->buffer_pool->frame_bufs[idx] : NULL;
+ RefCntBuffer *const buf =
+ idx != INVALID_IDX ? &cm->buffer_pool->frame_bufs[idx] : NULL;
if (buf != NULL) {
--buf->ref_count;
cpi->scaled_ref_idx[i] = INVALID_IDX;
@@ -2907,8 +2676,8 @@
cpi->common.interp_filter = cpi->sf.default_interp_filter;
}
-static void set_size_dependent_vars(VP10_COMP *cpi, int *q,
- int *bottom_index, int *top_index) {
+static void set_size_dependent_vars(VP10_COMP *cpi, int *q, int *bottom_index,
+ int *top_index) {
VP10_COMMON *const cm = &cpi->common;
const VP10EncoderConfig *const oxcf = &cpi->oxcf;
@@ -2946,40 +2715,37 @@
VP10EncoderConfig *const oxcf = &cpi->oxcf;
MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
- if (oxcf->pass == 2 &&
- oxcf->rc_mode == VPX_VBR &&
+ if (oxcf->pass == 2 && oxcf->rc_mode == VPX_VBR &&
((oxcf->resize_mode == RESIZE_FIXED && cm->current_video_frame == 0) ||
- (oxcf->resize_mode == RESIZE_DYNAMIC && cpi->resize_pending))) {
- vp10_calculate_coded_size(
- cpi, &oxcf->scaled_frame_width, &oxcf->scaled_frame_height);
+ (oxcf->resize_mode == RESIZE_DYNAMIC && cpi->resize_pending))) {
+ vp10_calculate_coded_size(cpi, &oxcf->scaled_frame_width,
+ &oxcf->scaled_frame_height);
// There has been a change in frame size.
vp10_set_size_literal(cpi, oxcf->scaled_frame_width,
- oxcf->scaled_frame_height);
+ oxcf->scaled_frame_height);
}
- if (oxcf->pass == 0 &&
- oxcf->rc_mode == VPX_CBR &&
+ if (oxcf->pass == 0 && oxcf->rc_mode == VPX_CBR &&
oxcf->resize_mode == RESIZE_DYNAMIC) {
- if (cpi->resize_pending == 1) {
- oxcf->scaled_frame_width =
- (cm->width * cpi->resize_scale_num) / cpi->resize_scale_den;
- oxcf->scaled_frame_height =
- (cm->height * cpi->resize_scale_num) /cpi->resize_scale_den;
- } else if (cpi->resize_pending == -1) {
- // Go back up to original size.
- oxcf->scaled_frame_width = oxcf->width;
- oxcf->scaled_frame_height = oxcf->height;
- }
- if (cpi->resize_pending != 0) {
- // There has been a change in frame size.
- vp10_set_size_literal(cpi,
- oxcf->scaled_frame_width,
- oxcf->scaled_frame_height);
+ if (cpi->resize_pending == 1) {
+ oxcf->scaled_frame_width =
+ (cm->width * cpi->resize_scale_num) / cpi->resize_scale_den;
+ oxcf->scaled_frame_height =
+ (cm->height * cpi->resize_scale_num) / cpi->resize_scale_den;
+ } else if (cpi->resize_pending == -1) {
+ // Go back up to original size.
+ oxcf->scaled_frame_width = oxcf->width;
+ oxcf->scaled_frame_height = oxcf->height;
+ }
+ if (cpi->resize_pending != 0) {
+ // There has been a change in frame size.
+ vp10_set_size_literal(cpi, oxcf->scaled_frame_width,
+ oxcf->scaled_frame_height);
- // TODO(agrange) Scale cpi->max_mv_magnitude if frame-size has changed.
- set_mv_search_params(cpi);
- }
+ // TODO(agrange) Scale cpi->max_mv_magnitude if frame-size has changed.
+ set_mv_search_params(cpi);
+ }
}
if (oxcf->pass == 2) {
@@ -2989,14 +2755,13 @@
alloc_frame_mvs(cm, cm->new_fb_idx);
// Reset the frame pointers to the current frame size.
- vpx_realloc_frame_buffer(get_frame_new_buffer(cm),
- cm->width, cm->height,
+ vpx_realloc_frame_buffer(get_frame_new_buffer(cm), cm->width, cm->height,
cm->subsampling_x, cm->subsampling_y,
#if CONFIG_VPX_HIGHBITDEPTH
cm->use_highbitdepth,
#endif
- VPX_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
- NULL, NULL, NULL);
+ VPX_ENC_BORDER_IN_PIXELS, cm->byte_alignment, NULL,
+ NULL, NULL);
alloc_util_frame_buffers(cpi);
init_motion_estimation(cpi);
@@ -3011,18 +2776,15 @@
YV12_BUFFER_CONFIG *const buf = &cm->buffer_pool->frame_bufs[buf_idx].buf;
ref_buf->buf = buf;
#if CONFIG_VPX_HIGHBITDEPTH
- vp10_setup_scale_factors_for_frame(&ref_buf->sf,
- buf->y_crop_width, buf->y_crop_height,
- cm->width, cm->height,
- (buf->flags & YV12_FLAG_HIGHBITDEPTH) ?
- 1 : 0);
+ vp10_setup_scale_factors_for_frame(
+ &ref_buf->sf, buf->y_crop_width, buf->y_crop_height, cm->width,
+ cm->height, (buf->flags & YV12_FLAG_HIGHBITDEPTH) ? 1 : 0);
#else
- vp10_setup_scale_factors_for_frame(&ref_buf->sf,
- buf->y_crop_width, buf->y_crop_height,
- cm->width, cm->height);
+ vp10_setup_scale_factors_for_frame(&ref_buf->sf, buf->y_crop_width,
+ buf->y_crop_height, cm->width,
+ cm->height);
#endif // CONFIG_VPX_HIGHBITDEPTH
- if (vp10_is_scaled(&ref_buf->sf))
- vpx_extend_frame_borders(buf);
+ if (vp10_is_scaled(&ref_buf->sf)) vpx_extend_frame_borders(buf);
} else {
ref_buf->buf = NULL;
}
@@ -3041,24 +2803,21 @@
// For 1 pass CBR under dynamic resize mode: use faster scaling for source.
// Only for 2x2 scaling for now.
- if (cpi->oxcf.pass == 0 &&
- cpi->oxcf.rc_mode == VPX_CBR &&
+ if (cpi->oxcf.pass == 0 && cpi->oxcf.rc_mode == VPX_CBR &&
cpi->oxcf.resize_mode == RESIZE_DYNAMIC &&
cpi->un_scaled_source->y_width == (cm->width << 1) &&
cpi->un_scaled_source->y_height == (cm->height << 1)) {
- cpi->Source = vp10_scale_if_required_fast(cm,
- cpi->un_scaled_source,
- &cpi->scaled_source);
+ cpi->Source = vp10_scale_if_required_fast(cm, cpi->un_scaled_source,
+ &cpi->scaled_source);
if (cpi->unscaled_last_source != NULL)
- cpi->Last_Source = vp10_scale_if_required_fast(cm,
- cpi->unscaled_last_source,
- &cpi->scaled_last_source);
+ cpi->Last_Source = vp10_scale_if_required_fast(
+ cm, cpi->unscaled_last_source, &cpi->scaled_last_source);
} else {
- cpi->Source = vp10_scale_if_required(cm, cpi->un_scaled_source,
- &cpi->scaled_source);
+ cpi->Source =
+ vp10_scale_if_required(cm, cpi->un_scaled_source, &cpi->scaled_source);
if (cpi->unscaled_last_source != NULL)
cpi->Last_Source = vp10_scale_if_required(cm, cpi->unscaled_last_source,
- &cpi->scaled_last_source);
+ &cpi->scaled_last_source);
}
if (frame_is_intra_only(cm) == 0) {
@@ -3090,8 +2849,7 @@
// Update some stats from cyclic refresh, and check if we should not update
// golden reference, for 1 pass CBR.
- if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ &&
- cm->frame_type != KEY_FRAME &&
+ if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->frame_type != KEY_FRAME &&
(cpi->oxcf.pass == 0 && cpi->oxcf.rc_mode == VPX_CBR))
vp10_cyclic_refresh_check_golden_update(cpi);
@@ -3101,8 +2859,7 @@
vpx_clear_system_state();
}
-static void encode_with_recode_loop(VP10_COMP *cpi,
- size_t *size,
+static void encode_with_recode_loop(VP10_COMP *cpi, size_t *size,
uint8_t *dest) {
VP10_COMMON *const cm = &cpi->common;
RATE_CONTROL *const rc = &cpi->rc;
@@ -3145,16 +2902,16 @@
// Decide frame size bounds first time through.
if (loop_count == 0) {
vp10_rc_compute_frame_size_bounds(cpi, rc->this_frame_target,
- &frame_under_shoot_limit,
- &frame_over_shoot_limit);
+ &frame_under_shoot_limit,
+ &frame_over_shoot_limit);
}
- cpi->Source = vp10_scale_if_required(cm, cpi->un_scaled_source,
- &cpi->scaled_source);
+ cpi->Source =
+ vp10_scale_if_required(cm, cpi->un_scaled_source, &cpi->scaled_source);
if (cpi->unscaled_last_source != NULL)
cpi->Last_Source = vp10_scale_if_required(cm, cpi->unscaled_last_source,
- &cpi->scaled_last_source);
+ &cpi->scaled_last_source);
if (frame_is_intra_only(cm) == 0) {
if (loop_count > 0) {
@@ -3165,8 +2922,7 @@
vp10_set_quantizer(cm, q);
- if (loop_count == 0)
- setup_frame(cpi);
+ if (loop_count == 0) setup_frame(cpi);
// Variance adaptive and in frame q adjustment experiments are mutually
// exclusive.
@@ -3195,16 +2951,14 @@
rc->projected_frame_size = (int)(*size) << 3;
restore_coding_context(cpi);
- if (frame_over_shoot_limit == 0)
- frame_over_shoot_limit = 1;
+ if (frame_over_shoot_limit == 0) frame_over_shoot_limit = 1;
}
if (cpi->oxcf.rc_mode == VPX_Q) {
loop = 0;
} else {
- if ((cm->frame_type == KEY_FRAME) &&
- rc->this_key_frame_forced &&
- (rc->projected_frame_size < rc->max_frame_bandwidth)) {
+ if ((cm->frame_type == KEY_FRAME) && rc->this_key_frame_forced &&
+ (rc->projected_frame_size < rc->max_frame_bandwidth)) {
int last_q = q;
int64_t kf_err;
@@ -3251,9 +3005,9 @@
q = clamp(q, q_low, q_high);
loop = q != last_q;
- } else if (recode_loop_test(
- cpi, frame_over_shoot_limit, frame_under_shoot_limit,
- q, VPXMAX(q_high, top_index), bottom_index)) {
+ } else if (recode_loop_test(cpi, frame_over_shoot_limit,
+ frame_under_shoot_limit, q,
+ VPXMAX(q_high, top_index), bottom_index)) {
// Is the projected frame size out of range and are we allowed
// to attempt to recode.
int last_q = q;
@@ -3294,13 +3048,13 @@
// Update rate_correction_factor unless
vp10_rc_update_rate_correction_factors(cpi);
- q = vp10_rc_regulate_q(cpi, rc->this_frame_target,
- bottom_index, VPXMAX(q_high, top_index));
+ q = vp10_rc_regulate_q(cpi, rc->this_frame_target, bottom_index,
+ VPXMAX(q_high, top_index));
while (q < q_low && retries < 10) {
vp10_rc_update_rate_correction_factors(cpi);
- q = vp10_rc_regulate_q(cpi, rc->this_frame_target,
- bottom_index, VPXMAX(q_high, top_index));
+ q = vp10_rc_regulate_q(cpi, rc->this_frame_target, bottom_index,
+ VPXMAX(q_high, top_index));
retries++;
}
}
@@ -3315,21 +3069,20 @@
q = (q_high + q_low) / 2;
} else {
vp10_rc_update_rate_correction_factors(cpi);
- q = vp10_rc_regulate_q(cpi, rc->this_frame_target,
- bottom_index, top_index);
+ q = vp10_rc_regulate_q(cpi, rc->this_frame_target, bottom_index,
+ top_index);
// Special case reset for qlow for constrained quality.
// This should only trigger where there is very substantial
// undershoot on a frame and the auto cq level is above
// the user passsed in value.
- if (cpi->oxcf.rc_mode == VPX_CQ &&
- q < q_low) {
+ if (cpi->oxcf.rc_mode == VPX_CQ && q < q_low) {
q_low = q;
}
while (q > q_high && retries < 10) {
vp10_rc_update_rate_correction_factors(cpi);
- q = vp10_rc_regulate_q(cpi, rc->this_frame_target,
- bottom_index, top_index);
+ q = vp10_rc_regulate_q(cpi, rc->this_frame_target, bottom_index,
+ top_index);
retries++;
}
}
@@ -3369,17 +3122,13 @@
const int gold_is_alt = map[cpi->gld_fb_idx] == map[cpi->alt_fb_idx];
int flags = VPX_ALT_FLAG | VPX_GOLD_FLAG | VPX_LAST_FLAG;
- if (gold_is_last)
- flags &= ~VPX_GOLD_FLAG;
+ if (gold_is_last) flags &= ~VPX_GOLD_FLAG;
- if (cpi->rc.frames_till_gf_update_due == INT_MAX)
- flags &= ~VPX_GOLD_FLAG;
+ if (cpi->rc.frames_till_gf_update_due == INT_MAX) flags &= ~VPX_GOLD_FLAG;
- if (alt_is_last)
- flags &= ~VPX_ALT_FLAG;
+ if (alt_is_last) flags &= ~VPX_ALT_FLAG;
- if (gold_is_alt)
- flags &= ~VPX_ALT_FLAG;
+ if (gold_is_alt) flags &= ~VPX_ALT_FLAG;
return flags;
}
@@ -3402,13 +3151,12 @@
}
YV12_BUFFER_CONFIG *vp10_scale_if_required_fast(VP10_COMMON *cm,
- YV12_BUFFER_CONFIG *unscaled,
- YV12_BUFFER_CONFIG *scaled) {
+ YV12_BUFFER_CONFIG *unscaled,
+ YV12_BUFFER_CONFIG *scaled) {
if (cm->mi_cols * MI_SIZE != unscaled->y_width ||
cm->mi_rows * MI_SIZE != unscaled->y_height) {
// For 2x2 scaling down.
- vpx_scale_frame(unscaled, scaled, unscaled->y_buffer, 9, 2, 1,
- 2, 1, 0);
+ vpx_scale_frame(unscaled, scaled, unscaled->y_buffer, 9, 2, 1, 2, 1, 0);
vpx_extend_frame_borders(scaled);
return scaled;
} else {
@@ -3417,8 +3165,8 @@
}
YV12_BUFFER_CONFIG *vp10_scale_if_required(VP10_COMMON *cm,
- YV12_BUFFER_CONFIG *unscaled,
- YV12_BUFFER_CONFIG *scaled) {
+ YV12_BUFFER_CONFIG *unscaled,
+ YV12_BUFFER_CONFIG *scaled) {
if (cm->mi_cols * MI_SIZE != unscaled->y_width ||
cm->mi_rows * MI_SIZE != unscaled->y_height) {
#if CONFIG_VPX_HIGHBITDEPTH
@@ -3443,18 +3191,17 @@
(gf_group->rf_level[gf_group->index] == GF_ARF_LOW));
} else {
arf_sign_bias =
- (cpi->rc.source_alt_ref_active && !cpi->refresh_alt_ref_frame);
+ (cpi->rc.source_alt_ref_active && !cpi->refresh_alt_ref_frame);
}
cm->ref_frame_sign_bias[ALTREF_FRAME] = arf_sign_bias;
}
static int setup_interp_filter_search_mask(VP10_COMP *cpi) {
INTERP_FILTER ifilter;
- int ref_total[MAX_REF_FRAMES] = {0};
+ int ref_total[MAX_REF_FRAMES] = { 0 };
MV_REFERENCE_FRAME ref;
int mask = 0;
- if (cpi->common.last_frame_type == KEY_FRAME ||
- cpi->refresh_alt_ref_frame)
+ if (cpi->common.last_frame_type == KEY_FRAME || cpi->refresh_alt_ref_frame)
return mask;
for (ref = LAST_FRAME; ref <= ALTREF_FRAME; ++ref)
for (ifilter = EIGHTTAP; ifilter <= EIGHTTAP_SHARP; ++ifilter)
@@ -3462,20 +3209,19 @@
for (ifilter = EIGHTTAP; ifilter <= EIGHTTAP_SHARP; ++ifilter) {
if ((ref_total[LAST_FRAME] &&
- cpi->interp_filter_selected[LAST_FRAME][ifilter] == 0) &&
+ cpi->interp_filter_selected[LAST_FRAME][ifilter] == 0) &&
(ref_total[GOLDEN_FRAME] == 0 ||
- cpi->interp_filter_selected[GOLDEN_FRAME][ifilter] * 50
- < ref_total[GOLDEN_FRAME]) &&
+ cpi->interp_filter_selected[GOLDEN_FRAME][ifilter] * 50 <
+ ref_total[GOLDEN_FRAME]) &&
(ref_total[ALTREF_FRAME] == 0 ||
- cpi->interp_filter_selected[ALTREF_FRAME][ifilter] * 50
- < ref_total[ALTREF_FRAME]))
+ cpi->interp_filter_selected[ALTREF_FRAME][ifilter] * 50 <
+ ref_total[ALTREF_FRAME]))
mask |= 1 << ifilter;
}
return mask;
}
-static void encode_frame_to_data_rate(VP10_COMP *cpi,
- size_t *size,
+static void encode_frame_to_data_rate(VP10_COMP *cpi, size_t *size,
uint8_t *dest,
unsigned int *frame_flags) {
VP10_COMMON *const cm = &cpi->common;
@@ -3492,10 +3238,8 @@
// Set default state for segment based loop filter update flags.
cm->lf.mode_ref_delta_update = 0;
- if (cpi->oxcf.pass == 2 &&
- cpi->sf.adaptive_interp_filter_search)
- cpi->sf.interp_filter_search_mask =
- setup_interp_filter_search_mask(cpi);
+ if (cpi->oxcf.pass == 2 && cpi->sf.adaptive_interp_filter_search)
+ cpi->sf.interp_filter_search_mask = setup_interp_filter_search_mask(cpi);
// Set various flags etc to special state if it is a key frame.
if (frame_is_intra_only(cm)) {
@@ -3525,8 +3269,7 @@
// For 1 pass CBR, check if we are dropping this frame.
// Never drop on key frame.
- if (oxcf->pass == 0 &&
- oxcf->rc_mode == VPX_CBR &&
+ if (oxcf->pass == 0 && oxcf->rc_mode == VPX_CBR &&
cm->frame_type != KEY_FRAME) {
if (vp10_rc_drop_frame(cpi)) {
vp10_rc_postencode_update_drop_frame(cpi);
@@ -3560,8 +3303,8 @@
if (cpi->rc.next_key_frame_forced && cpi->rc.frames_to_key == 1) {
#if CONFIG_VPX_HIGHBITDEPTH
if (cm->use_highbitdepth) {
- cpi->ambient_err = vp10_highbd_get_y_sse(cpi->Source,
- get_frame_new_buffer(cm));
+ cpi->ambient_err =
+ vp10_highbd_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
} else {
cpi->ambient_err = vp10_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
}
@@ -3571,13 +3314,12 @@
}
// If the encoder forced a KEY_FRAME decision
- if (cm->frame_type == KEY_FRAME)
- cpi->refresh_last_frame = 1;
+ if (cm->frame_type == KEY_FRAME) cpi->refresh_last_frame = 1;
cm->frame_to_show = get_frame_new_buffer(cm);
cm->frame_to_show->color_space = cm->color_space;
cm->frame_to_show->color_range = cm->color_range;
- cm->frame_to_show->render_width = cm->render_width;
+ cm->frame_to_show->render_width = cm->render_width;
cm->frame_to_show->render_height = cm->render_height;
// Pick the loop filter level for the frame.
@@ -3586,8 +3328,7 @@
// build the bitstream
vp10_pack_bitstream(cpi, dest, size);
- if (cm->seg.update_map)
- update_reference_segmentation_map(cpi);
+ if (cm->seg.update_map) update_reference_segmentation_map(cpi);
if (frame_is_intra_only(cm) == 0) {
release_scaled_references(cpi);
@@ -3603,8 +3344,7 @@
#if CONFIG_MISC_FIXES
vp10_adapt_intra_frame_probs(cm);
#else
- if (!frame_is_intra_only(cm))
- vp10_adapt_intra_frame_probs(cm);
+ if (!frame_is_intra_only(cm)) vp10_adapt_intra_frame_probs(cm);
#endif
}
@@ -3653,8 +3393,7 @@
cm->last_height = cm->height;
// reset to normal state now that we are done.
- if (!cm->show_existing_frame)
- cm->last_show_frame = cm->show_frame;
+ if (!cm->show_existing_frame) cm->last_show_frame = cm->show_frame;
if (cm->show_frame) {
vp10_swap_mi_and_prev_mi(cm);
@@ -3675,8 +3414,8 @@
encode_frame_to_data_rate(cpi, size, dest, frame_flags);
}
-static void Pass2Encode(VP10_COMP *cpi, size_t *size,
- uint8_t *dest, unsigned int *frame_flags) {
+static void Pass2Encode(VP10_COMP *cpi, size_t *size, uint8_t *dest,
+ unsigned int *frame_flags) {
cpi->allow_encode_breakout = ENCODE_BREAKOUT_ENABLED;
encode_frame_to_data_rate(cpi, size, dest, frame_flags);
@@ -3725,8 +3464,8 @@
}
int vp10_receive_raw_frame(VP10_COMP *cpi, unsigned int frame_flags,
- YV12_BUFFER_CONFIG *sd, int64_t time_stamp,
- int64_t end_time) {
+ YV12_BUFFER_CONFIG *sd, int64_t time_stamp,
+ int64_t end_time) {
VP10_COMMON *cm = &cpi->common;
struct vpx_usec_timer timer;
int res = 0;
@@ -3743,9 +3482,9 @@
if (vp10_lookahead_push(cpi->lookahead, sd, time_stamp, end_time,
#if CONFIG_VPX_HIGHBITDEPTH
- use_highbitdepth,
+ use_highbitdepth,
#endif // CONFIG_VPX_HIGHBITDEPTH
- frame_flags))
+ frame_flags))
res = -1;
vpx_usec_timer_mark(&timer);
cpi->time_receive_data += vpx_usec_timer_elapsed(&timer);
@@ -3766,17 +3505,13 @@
return res;
}
-
static int frame_is_reference(const VP10_COMP *cpi) {
const VP10_COMMON *cm = &cpi->common;
- return cm->frame_type == KEY_FRAME ||
- cpi->refresh_last_frame ||
- cpi->refresh_golden_frame ||
- cpi->refresh_alt_ref_frame ||
+ return cm->frame_type == KEY_FRAME || cpi->refresh_last_frame ||
+ cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame ||
cm->refresh_frame_context != REFRESH_FRAME_CONTEXT_OFF ||
- cm->lf.mode_ref_delta_update ||
- cm->seg.update_map ||
+ cm->lf.mode_ref_delta_update || cm->seg.update_map ||
cm->seg.update_data;
}
@@ -3789,8 +3524,8 @@
this_duration = source->ts_end - source->ts_start;
step = 1;
} else {
- int64_t last_duration = cpi->last_end_time_stamp_seen
- - cpi->last_time_stamp_seen;
+ int64_t last_duration =
+ cpi->last_end_time_stamp_seen - cpi->last_time_stamp_seen;
this_duration = source->ts_end - cpi->last_end_time_stamp_seen;
@@ -3844,10 +3579,10 @@
if (cpi->oxcf.pass == 2) {
const GF_GROUP *const gf_group = &cpi->twopass.gf_group;
rc->is_src_frame_alt_ref =
- (gf_group->update_type[gf_group->index] == OVERLAY_UPDATE);
+ (gf_group->update_type[gf_group->index] == OVERLAY_UPDATE);
} else {
- rc->is_src_frame_alt_ref = cpi->alt_ref_source &&
- (source == cpi->alt_ref_source);
+ rc->is_src_frame_alt_ref =
+ cpi->alt_ref_source && (source == cpi->alt_ref_source);
}
if (rc->is_src_frame_alt_ref) {
@@ -3862,8 +3597,8 @@
#if CONFIG_INTERNAL_STATS
extern double vp10_get_blockiness(const unsigned char *img1, int img1_pitch,
- const unsigned char *img2, int img2_pitch,
- int width, int height);
+ const unsigned char *img2, int img2_pitch,
+ int width, int height);
static void adjust_image_stat(double y, double u, double v, double all,
ImageStat *s) {
@@ -3876,13 +3611,13 @@
#endif // CONFIG_INTERNAL_STATS
int vp10_get_compressed_data(VP10_COMP *cpi, unsigned int *frame_flags,
- size_t *size, uint8_t *dest,
- int64_t *time_stamp, int64_t *time_end, int flush) {
+ size_t *size, uint8_t *dest, int64_t *time_stamp,
+ int64_t *time_end, int flush) {
const VP10EncoderConfig *const oxcf = &cpi->oxcf;
VP10_COMMON *const cm = &cpi->common;
BufferPool *const pool = cm->buffer_pool;
RATE_CONTROL *const rc = &cpi->rc;
- struct vpx_usec_timer cmptimer;
+ struct vpx_usec_timer cmptimer;
YV12_BUFFER_CONFIG *force_src_buffer = NULL;
struct lookahead_entry *last_source = NULL;
struct lookahead_entry *source = NULL;
@@ -3902,10 +3637,11 @@
// Normal defaults
cm->reset_frame_context = RESET_FRAME_CONTEXT_NONE;
- cm->refresh_frame_context =
- oxcf->error_resilient_mode ? REFRESH_FRAME_CONTEXT_OFF :
- oxcf->frame_parallel_decoding_mode ? REFRESH_FRAME_CONTEXT_FORWARD
- : REFRESH_FRAME_CONTEXT_BACKWARD;
+ cm->refresh_frame_context = oxcf->error_resilient_mode
+ ? REFRESH_FRAME_CONTEXT_OFF
+ : oxcf->frame_parallel_decoding_mode
+ ? REFRESH_FRAME_CONTEXT_FORWARD
+ : REFRESH_FRAME_CONTEXT_BACKWARD;
cpi->refresh_last_frame = 1;
cpi->refresh_golden_frame = 0;
@@ -3959,8 +3695,8 @@
}
if (source) {
- cpi->un_scaled_source = cpi->Source = force_src_buffer ? force_src_buffer
- : &source->img;
+ cpi->un_scaled_source = cpi->Source =
+ force_src_buffer ? force_src_buffer : &source->img;
cpi->unscaled_last_source = last_source != NULL ? &last_source->img : NULL;
@@ -3971,7 +3707,7 @@
} else {
*size = 0;
if (flush && oxcf->pass == 1 && !cpi->twopass.first_pass_done) {
- vp10_end_first_pass(cpi); /* get last stats packet */
+ vp10_end_first_pass(cpi); /* get last stats packet */
cpi->twopass.first_pass_done = 1;
}
return -1;
@@ -3997,8 +3733,7 @@
}
cm->new_fb_idx = get_free_fb(cm);
- if (cm->new_fb_idx == INVALID_IDX)
- return -1;
+ if (cm->new_fb_idx == INVALID_IDX) return -1;
cm->cur_frame = &pool->frame_bufs[cm->new_fb_idx];
@@ -4023,8 +3758,7 @@
}
if (cpi->oxcf.pass != 0 || frame_is_intra_only(cm) == 1) {
- for (i = 0; i < MAX_REF_FRAMES; ++i)
- cpi->scaled_ref_idx[i] = INVALID_IDX;
+ for (i = 0; i < MAX_REF_FRAMES; ++i) cpi->scaled_ref_idx[i] = INVALID_IDX;
}
if (oxcf->pass == 1) {
@@ -4087,8 +3821,8 @@
#if CONFIG_VPX_HIGHBITDEPTH
if (cm->use_highbitdepth) {
- frame_ssim2 = vpx_highbd_calc_ssim(orig, recon, &weight,
- (int)cm->bit_depth);
+ frame_ssim2 =
+ vpx_highbd_calc_ssim(orig, recon, &weight, (int)cm->bit_depth);
} else {
frame_ssim2 = vpx_calc_ssim(orig, recon, &weight);
}
@@ -4096,7 +3830,7 @@
frame_ssim2 = vpx_calc_ssim(orig, recon, &weight);
#endif // CONFIG_VPX_HIGHBITDEPTH
- cpi->worst_ssim= VPXMIN(cpi->worst_ssim, frame_ssim2);
+ cpi->worst_ssim = VPXMIN(cpi->worst_ssim, frame_ssim2);
cpi->summed_quality += frame_ssim2 * weight;
cpi->summed_weights += weight;
@@ -4140,8 +3874,8 @@
&cpi->metrics, 1);
const double peak = (double)((1 << cpi->oxcf.input_bit_depth) - 1);
- double consistency = vpx_sse_to_psnr(samples, peak,
- (double)cpi->total_inconsistency);
+ double consistency =
+ vpx_sse_to_psnr(samples, peak, (double)cpi->total_inconsistency);
if (consistency > 0.0)
cpi->worst_consistency =
VPXMIN(cpi->worst_consistency, consistency);
@@ -4156,8 +3890,8 @@
frame_all = vpx_highbd_calc_ssimg(cpi->Source, cm->frame_to_show, &y,
&u, &v, (int)cm->bit_depth);
} else {
- frame_all = vpx_calc_ssimg(cpi->Source, cm->frame_to_show, &y, &u,
- &v);
+ frame_all =
+ vpx_calc_ssimg(cpi->Source, cm->frame_to_show, &y, &u, &v);
}
#else
frame_all = vpx_calc_ssimg(cpi->Source, cm->frame_to_show, &y, &u, &v);
@@ -4169,8 +3903,8 @@
#endif
{
double y, u, v, frame_all;
- frame_all = vpx_calc_fastssim(cpi->Source, cm->frame_to_show, &y, &u,
- &v);
+ frame_all =
+ vpx_calc_fastssim(cpi->Source, cm->frame_to_show, &y, &u, &v);
adjust_image_stat(y, u, v, frame_all, &cpi->fastssim);
/* TODO(JBB): add 10/12 bit support */
}
@@ -4212,13 +3946,12 @@
}
}
-int vp10_set_internal_size(VP10_COMP *cpi,
- VPX_SCALING horiz_mode, VPX_SCALING vert_mode) {
+int vp10_set_internal_size(VP10_COMP *cpi, VPX_SCALING horiz_mode,
+ VPX_SCALING vert_mode) {
VP10_COMMON *cm = &cpi->common;
int hr = 0, hs = 0, vr = 0, vs = 0;
- if (horiz_mode > ONETWO || vert_mode > ONETWO)
- return -1;
+ if (horiz_mode > ONETWO || vert_mode > ONETWO) return -1;
Scale2Ratio(horiz_mode, &hr, &hs);
Scale2Ratio(vert_mode, &vr, &vs);
@@ -4235,7 +3968,7 @@
}
int vp10_set_size_literal(VP10_COMP *cpi, unsigned int width,
- unsigned int height) {
+ unsigned int height) {
VP10_COMMON *cm = &cpi->common;
#if CONFIG_VPX_HIGHBITDEPTH
check_initial_width(cpi, cm->use_highbitdepth, 1, 1);
@@ -4267,7 +4000,7 @@
}
int64_t vp10_get_y_sse(const YV12_BUFFER_CONFIG *a,
- const YV12_BUFFER_CONFIG *b) {
+ const YV12_BUFFER_CONFIG *b) {
assert(a->y_crop_width == b->y_crop_width);
assert(a->y_crop_height == b->y_crop_height);
@@ -4277,7 +4010,7 @@
#if CONFIG_VPX_HIGHBITDEPTH
int64_t vp10_highbd_get_y_sse(const YV12_BUFFER_CONFIG *a,
- const YV12_BUFFER_CONFIG *b) {
+ const YV12_BUFFER_CONFIG *b) {
assert(a->y_crop_width == b->y_crop_width);
assert(a->y_crop_height == b->y_crop_height);
assert((a->flags & YV12_FLAG_HIGHBITDEPTH) != 0);
@@ -4288,40 +4021,32 @@
}
#endif // CONFIG_VPX_HIGHBITDEPTH
-int vp10_get_quantizer(VP10_COMP *cpi) {
- return cpi->common.base_qindex;
-}
+int vp10_get_quantizer(VP10_COMP *cpi) { return cpi->common.base_qindex; }
void vp10_apply_encoding_flags(VP10_COMP *cpi, vpx_enc_frame_flags_t flags) {
- if (flags & (VP8_EFLAG_NO_REF_LAST | VP8_EFLAG_NO_REF_GF |
- VP8_EFLAG_NO_REF_ARF)) {
+ if (flags &
+ (VP8_EFLAG_NO_REF_LAST | VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF)) {
int ref = 7;
- if (flags & VP8_EFLAG_NO_REF_LAST)
- ref ^= VPX_LAST_FLAG;
+ if (flags & VP8_EFLAG_NO_REF_LAST) ref ^= VPX_LAST_FLAG;
- if (flags & VP8_EFLAG_NO_REF_GF)
- ref ^= VPX_GOLD_FLAG;
+ if (flags & VP8_EFLAG_NO_REF_GF) ref ^= VPX_GOLD_FLAG;
- if (flags & VP8_EFLAG_NO_REF_ARF)
- ref ^= VPX_ALT_FLAG;
+ if (flags & VP8_EFLAG_NO_REF_ARF) ref ^= VPX_ALT_FLAG;
vp10_use_as_reference(cpi, ref);
}
- if (flags & (VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_GF |
- VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_FORCE_GF |
- VP8_EFLAG_FORCE_ARF)) {
+ if (flags &
+ (VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_FORCE_GF | VP8_EFLAG_FORCE_ARF)) {
int upd = 7;
- if (flags & VP8_EFLAG_NO_UPD_LAST)
- upd ^= VPX_LAST_FLAG;
+ if (flags & VP8_EFLAG_NO_UPD_LAST) upd ^= VPX_LAST_FLAG;
- if (flags & VP8_EFLAG_NO_UPD_GF)
- upd ^= VPX_GOLD_FLAG;
+ if (flags & VP8_EFLAG_NO_UPD_GF) upd ^= VPX_GOLD_FLAG;
- if (flags & VP8_EFLAG_NO_UPD_ARF)
- upd ^= VPX_ALT_FLAG;
+ if (flags & VP8_EFLAG_NO_UPD_ARF) upd ^= VPX_ALT_FLAG;
vp10_update_reference(cpi, upd);
}
diff --git a/vp10/encoder/encoder.h b/vp10/encoder/encoder.h
index 87219ed..3547cba 100644
--- a/vp10/encoder/encoder.h
+++ b/vp10/encoder/encoder.h
@@ -64,7 +64,6 @@
FRAME_CONTEXT fc;
} CODING_CONTEXT;
-
typedef enum {
// encode_breakout is disabled.
ENCODE_BREAKOUT_DISABLED = 0,
@@ -75,10 +74,10 @@
} ENCODE_BREAKOUT_TYPE;
typedef enum {
- NORMAL = 0,
- FOURFIVE = 1,
- THREEFIVE = 2,
- ONETWO = 3
+ NORMAL = 0,
+ FOURFIVE = 1,
+ THREEFIVE = 2,
+ ONETWO = 3
} VPX_SCALING;
typedef enum {
@@ -98,7 +97,7 @@
} MODE;
typedef enum {
- FRAMEFLAGS_KEY = 1 << 0,
+ FRAMEFLAGS_KEY = 1 << 0,
FRAMEFLAGS_GOLDEN = 1 << 1,
FRAMEFLAGS_ALTREF = 1 << 2,
} FRAMETYPE_FLAGS;
@@ -120,14 +119,14 @@
typedef struct VP10EncoderConfig {
BITSTREAM_PROFILE profile;
vpx_bit_depth_t bit_depth; // Codec bit-depth.
- int width; // width of data passed to the compressor
- int height; // height of data passed to the compressor
+ int width; // width of data passed to the compressor
+ int height; // height of data passed to the compressor
unsigned int input_bit_depth; // Input bit depth.
- double init_framerate; // set to passed in framerate
- int64_t target_bandwidth; // bandwidth to be used in kilobits per second
+ double init_framerate; // set to passed in framerate
+ int64_t target_bandwidth; // bandwidth to be used in kilobits per second
int noise_sensitivity; // pre processing blur: recommendation 0
- int sharpness; // sharpening output: recommendation 0:
+ int sharpness; // sharpening output: recommendation 0:
int speed;
// maximum allowed bitrate for any intra frame in % of bitrate target.
unsigned int rc_max_intra_bitrate_pct;
@@ -179,7 +178,7 @@
int frame_periodic_boost;
// two pass datarate control
- int two_pass_vbrbias; // two pass datarate control tweaks
+ int two_pass_vbrbias; // two pass datarate control tweaks
int two_pass_vbrmin_section;
int two_pass_vbrmax_section;
// END DATARATE CONTROL OPTIONS
@@ -267,15 +266,10 @@
unsigned char *map;
} ActiveMap;
-typedef enum {
- Y,
- U,
- V,
- ALL
-} STAT_TYPE;
+typedef enum { Y, U, V, ALL } STAT_TYPE;
typedef struct IMAGE_STAT {
- double stat[ALL+1];
+ double stat[ALL + 1];
double worst;
} ImageStat;
@@ -287,8 +281,8 @@
DECLARE_ALIGNED(16, int16_t, uv_dequant[QINDEX_RANGE][8]);
VP10_COMMON common;
VP10EncoderConfig oxcf;
- struct lookahead_ctx *lookahead;
- struct lookahead_entry *alt_ref_source;
+ struct lookahead_ctx *lookahead;
+ struct lookahead_entry *alt_ref_source;
YV12_BUFFER_CONFIG *Source;
YV12_BUFFER_CONFIG *Last_Source; // NULL for first frame and alt_ref frames
@@ -346,11 +340,11 @@
int interp_filter_selected[MAX_REF_FRAMES][SWITCHABLE];
- struct vpx_codec_pkt_list *output_pkt_list;
+ struct vpx_codec_pkt_list *output_pkt_list;
MBGRAPH_FRAME_STATS mbgraph_stats[MAX_LAG_BUFFERS];
- int mbgraph_n_frames; // number of frames filled in the above
- int static_mb_pct; // % forced skip mbs by segmentation
+ int mbgraph_n_frames; // number of frames filled in the above
+ int static_mb_pct; // % forced skip mbs by segmentation
int ref_frame_flags;
SPEED_FEATURES sf;
@@ -370,7 +364,7 @@
unsigned char *segmentation_map;
// segment threashold for encode breakout
- int segment_encode_breakout[MAX_SEGMENTS];
+ int segment_encode_breakout[MAX_SEGMENTS];
CYCLIC_REFRESH *cyclic_refresh;
ActiveMap active_map;
@@ -392,11 +386,10 @@
YV12_BUFFER_CONFIG alt_ref_buffer;
-
#if CONFIG_INTERNAL_STATS
unsigned int mode_chosen_counts[MAX_MODES];
- int count;
+ int count;
uint64_t total_sq_error;
uint64_t total_samples;
ImageStat psnr;
@@ -408,7 +401,7 @@
double total_blockiness;
double worst_blockiness;
- int bytes;
+ int bytes;
double summed_quality;
double summed_weights;
double summedp_quality;
@@ -491,20 +484,20 @@
void vp10_initialize_enc(void);
struct VP10_COMP *vp10_create_compressor(VP10EncoderConfig *oxcf,
- BufferPool *const pool);
+ BufferPool *const pool);
void vp10_remove_compressor(VP10_COMP *cpi);
void vp10_change_config(VP10_COMP *cpi, const VP10EncoderConfig *oxcf);
- // receive a frames worth of data. caller can assume that a copy of this
- // frame is made and not just a copy of the pointer..
+// receive a frames worth of data. caller can assume that a copy of this
+// frame is made and not just a copy of the pointer..
int vp10_receive_raw_frame(VP10_COMP *cpi, unsigned int frame_flags,
- YV12_BUFFER_CONFIG *sd, int64_t time_stamp,
- int64_t end_time_stamp);
+ YV12_BUFFER_CONFIG *sd, int64_t time_stamp,
+ int64_t end_time_stamp);
int vp10_get_compressed_data(VP10_COMP *cpi, unsigned int *frame_flags,
- size_t *size, uint8_t *dest,
- int64_t *time_stamp, int64_t *time_end, int flush);
+ size_t *size, uint8_t *dest, int64_t *time_stamp,
+ int64_t *time_end, int flush);
int vp10_get_preview_raw_frame(VP10_COMP *cpi, YV12_BUFFER_CONFIG *dest);
@@ -513,10 +506,10 @@
void vp10_update_reference(VP10_COMP *cpi, int ref_frame_flags);
int vp10_copy_reference_enc(VP10_COMP *cpi, VPX_REFFRAME ref_frame_flag,
- YV12_BUFFER_CONFIG *sd);
+ YV12_BUFFER_CONFIG *sd);
int vp10_set_reference_enc(VP10_COMP *cpi, VPX_REFFRAME ref_frame_flag,
- YV12_BUFFER_CONFIG *sd);
+ YV12_BUFFER_CONFIG *sd);
int vp10_update_entropy(VP10_COMP *cpi, int update);
@@ -524,17 +517,16 @@
int vp10_get_active_map(VP10_COMP *cpi, unsigned char *map, int rows, int cols);
-int vp10_set_internal_size(VP10_COMP *cpi,
- VPX_SCALING horiz_mode, VPX_SCALING vert_mode);
+int vp10_set_internal_size(VP10_COMP *cpi, VPX_SCALING horiz_mode,
+ VPX_SCALING vert_mode);
int vp10_set_size_literal(VP10_COMP *cpi, unsigned int width,
- unsigned int height);
+ unsigned int height);
int vp10_get_quantizer(struct VP10_COMP *cpi);
static INLINE int frame_is_kf_gf_arf(const VP10_COMP *cpi) {
- return frame_is_intra_only(&cpi->common) ||
- cpi->refresh_alt_ref_frame ||
+ return frame_is_intra_only(&cpi->common) || cpi->refresh_alt_ref_frame ||
(cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref);
}
@@ -560,8 +552,8 @@
VP10_COMP *cpi, MV_REFERENCE_FRAME ref_frame) {
VP10_COMMON *const cm = &cpi->common;
const int buf_idx = get_ref_frame_buf_idx(cpi, ref_frame);
- return
- buf_idx != INVALID_IDX ? &cm->buffer_pool->frame_bufs[buf_idx].buf : NULL;
+ return buf_idx != INVALID_IDX ? &cm->buffer_pool->frame_bufs[buf_idx].buf
+ : NULL;
}
static INLINE int get_token_alloc(int mb_rows, int mb_cols) {
@@ -582,10 +574,11 @@
return get_token_alloc(tile_mb_rows, tile_mb_cols);
}
-int64_t vp10_get_y_sse(const YV12_BUFFER_CONFIG *a, const YV12_BUFFER_CONFIG *b);
+int64_t vp10_get_y_sse(const YV12_BUFFER_CONFIG *a,
+ const YV12_BUFFER_CONFIG *b);
#if CONFIG_VPX_HIGHBITDEPTH
int64_t vp10_highbd_get_y_sse(const YV12_BUFFER_CONFIG *a,
- const YV12_BUFFER_CONFIG *b);
+ const YV12_BUFFER_CONFIG *b);
#endif // CONFIG_VPX_HIGHBITDEPTH
void vp10_alloc_compressor_data(VP10_COMP *cpi);
@@ -597,12 +590,12 @@
void vp10_set_high_precision_mv(VP10_COMP *cpi, int allow_high_precision_mv);
YV12_BUFFER_CONFIG *vp10_scale_if_required_fast(VP10_COMMON *cm,
- YV12_BUFFER_CONFIG *unscaled,
- YV12_BUFFER_CONFIG *scaled);
+ YV12_BUFFER_CONFIG *unscaled,
+ YV12_BUFFER_CONFIG *scaled);
YV12_BUFFER_CONFIG *vp10_scale_if_required(VP10_COMMON *cm,
- YV12_BUFFER_CONFIG *unscaled,
- YV12_BUFFER_CONFIG *scaled);
+ YV12_BUFFER_CONFIG *unscaled,
+ YV12_BUFFER_CONFIG *scaled);
void vp10_apply_encoding_flags(VP10_COMP *cpi, vpx_enc_frame_flags_t flags);
@@ -614,10 +607,10 @@
static INLINE void set_ref_ptrs(VP10_COMMON *cm, MACROBLOCKD *xd,
MV_REFERENCE_FRAME ref0,
MV_REFERENCE_FRAME ref1) {
- xd->block_refs[0] = &cm->frame_refs[ref0 >= LAST_FRAME ? ref0 - LAST_FRAME
- : 0];
- xd->block_refs[1] = &cm->frame_refs[ref1 >= LAST_FRAME ? ref1 - LAST_FRAME
- : 0];
+ xd->block_refs[0] =
+ &cm->frame_refs[ref0 >= LAST_FRAME ? ref0 - LAST_FRAME : 0];
+ xd->block_refs[1] =
+ &cm->frame_refs[ref1 >= LAST_FRAME ? ref1 - LAST_FRAME : 0];
}
static INLINE int get_chessboard_index(const int frame_index) {
diff --git a/vp10/encoder/ethread.c b/vp10/encoder/ethread.c
index ad47ccf..21f9d71 100644
--- a/vp10/encoder/ethread.c
+++ b/vp10/encoder/ethread.c
@@ -31,7 +31,6 @@
td->rd_counts.coef_counts[i][j][k][l][m][n] +=
td_t->rd_counts.coef_counts[i][j][k][l][m][n];
-
// Counts of all motion searches and exhuastive mesh searches.
td->rd_counts.m_search_count += td_t->rd_counts.m_search_count;
td->rd_counts.ex_search_count += td_t->rd_counts.ex_search_count;
@@ -44,10 +43,10 @@
const int tile_rows = 1 << cm->log2_tile_rows;
int t;
- (void) unused;
+ (void)unused;
for (t = thread_data->start; t < tile_rows * tile_cols;
- t += cpi->num_workers) {
+ t += cpi->num_workers) {
int tile_row = t / tile_cols;
int tile_col = t % tile_cols;
@@ -74,8 +73,7 @@
vpx_malloc(allocated_workers * sizeof(*cpi->workers)));
CHECK_MEM_ERROR(cm, cpi->tile_thr_data,
- vpx_calloc(allocated_workers,
- sizeof(*cpi->tile_thr_data)));
+ vpx_calloc(allocated_workers, sizeof(*cpi->tile_thr_data)));
for (i = 0; i < allocated_workers; i++) {
VPxWorker *const worker = &cpi->workers[i];
@@ -122,7 +120,7 @@
worker->hook = (VPxWorkerHook)enc_worker_hook;
worker->data1 = &cpi->tile_thr_data[i];
worker->data2 = NULL;
- thread_data = (EncWorkerData*)worker->data1;
+ thread_data = (EncWorkerData *)worker->data1;
// Before encoding a frame, copy the thread data from cpi.
if (thread_data->td != &cpi->td) {
@@ -138,7 +136,7 @@
// Encode a frame
for (i = 0; i < num_workers; i++) {
VPxWorker *const worker = &cpi->workers[i];
- EncWorkerData *const thread_data = (EncWorkerData*)worker->data1;
+ EncWorkerData *const thread_data = (EncWorkerData *)worker->data1;
// Set the starting tile for each thread.
thread_data->start = i;
@@ -157,7 +155,7 @@
for (i = 0; i < num_workers; i++) {
VPxWorker *const worker = &cpi->workers[i];
- EncWorkerData *const thread_data = (EncWorkerData*)worker->data1;
+ EncWorkerData *const thread_data = (EncWorkerData *)worker->data1;
// Accumulate counters.
if (i < cpi->num_workers - 1) {
diff --git a/vp10/encoder/extend.c b/vp10/encoder/extend.c
index b6d32c6..3b0c20d 100644
--- a/vp10/encoder/extend.c
+++ b/vp10/encoder/extend.c
@@ -16,8 +16,7 @@
#include "vp10/encoder/extend.h"
static void copy_and_extend_plane(const uint8_t *src, int src_pitch,
- uint8_t *dst, int dst_pitch,
- int w, int h,
+ uint8_t *dst, int dst_pitch, int w, int h,
int extend_top, int extend_left,
int extend_bottom, int extend_right) {
int i, linesize;
@@ -43,7 +42,7 @@
src_ptr1 = dst - extend_left;
src_ptr2 = dst + dst_pitch * (h - 1) - extend_left;
dst_ptr1 = dst + dst_pitch * (-extend_top) - extend_left;
- dst_ptr2 = dst + dst_pitch * (h) - extend_left;
+ dst_ptr2 = dst + dst_pitch * (h)-extend_left;
linesize = extend_left + extend_right + w;
for (i = 0; i < extend_top; i++) {
@@ -59,9 +58,8 @@
#if CONFIG_VPX_HIGHBITDEPTH
static void highbd_copy_and_extend_plane(const uint8_t *src8, int src_pitch,
- uint8_t *dst8, int dst_pitch,
- int w, int h,
- int extend_top, int extend_left,
+ uint8_t *dst8, int dst_pitch, int w,
+ int h, int extend_top, int extend_left,
int extend_bottom, int extend_right) {
int i, linesize;
uint16_t *src = CONVERT_TO_SHORTPTR(src8);
@@ -88,7 +86,7 @@
src_ptr1 = dst - extend_left;
src_ptr2 = dst + dst_pitch * (h - 1) - extend_left;
dst_ptr1 = dst + dst_pitch * (-extend_top) - extend_left;
- dst_ptr2 = dst + dst_pitch * (h) - extend_left;
+ dst_ptr2 = dst + dst_pitch * (h)-extend_left;
linesize = extend_left + extend_right + w;
for (i = 0; i < extend_top; i++) {
@@ -104,7 +102,7 @@
#endif // CONFIG_VPX_HIGHBITDEPTH
void vp10_copy_and_extend_frame(const YV12_BUFFER_CONFIG *src,
- YV12_BUFFER_CONFIG *dst) {
+ YV12_BUFFER_CONFIG *dst) {
// Extend src frame in buffer
// Altref filtering assumes 16 pixel extension
const int et_y = 16;
@@ -127,51 +125,46 @@
#if CONFIG_VPX_HIGHBITDEPTH
if (src->flags & YV12_FLAG_HIGHBITDEPTH) {
- highbd_copy_and_extend_plane(src->y_buffer, src->y_stride,
- dst->y_buffer, dst->y_stride,
- src->y_crop_width, src->y_crop_height,
- et_y, el_y, eb_y, er_y);
+ highbd_copy_and_extend_plane(src->y_buffer, src->y_stride, dst->y_buffer,
+ dst->y_stride, src->y_crop_width,
+ src->y_crop_height, et_y, el_y, eb_y, er_y);
- highbd_copy_and_extend_plane(src->u_buffer, src->uv_stride,
- dst->u_buffer, dst->uv_stride,
- src->uv_crop_width, src->uv_crop_height,
- et_uv, el_uv, eb_uv, er_uv);
+ highbd_copy_and_extend_plane(
+ src->u_buffer, src->uv_stride, dst->u_buffer, dst->uv_stride,
+ src->uv_crop_width, src->uv_crop_height, et_uv, el_uv, eb_uv, er_uv);
- highbd_copy_and_extend_plane(src->v_buffer, src->uv_stride,
- dst->v_buffer, dst->uv_stride,
- src->uv_crop_width, src->uv_crop_height,
- et_uv, el_uv, eb_uv, er_uv);
+ highbd_copy_and_extend_plane(
+ src->v_buffer, src->uv_stride, dst->v_buffer, dst->uv_stride,
+ src->uv_crop_width, src->uv_crop_height, et_uv, el_uv, eb_uv, er_uv);
return;
}
#endif // CONFIG_VPX_HIGHBITDEPTH
- copy_and_extend_plane(src->y_buffer, src->y_stride,
- dst->y_buffer, dst->y_stride,
- src->y_crop_width, src->y_crop_height,
+ copy_and_extend_plane(src->y_buffer, src->y_stride, dst->y_buffer,
+ dst->y_stride, src->y_crop_width, src->y_crop_height,
et_y, el_y, eb_y, er_y);
- copy_and_extend_plane(src->u_buffer, src->uv_stride,
- dst->u_buffer, dst->uv_stride,
- src->uv_crop_width, src->uv_crop_height,
+ copy_and_extend_plane(src->u_buffer, src->uv_stride, dst->u_buffer,
+ dst->uv_stride, src->uv_crop_width, src->uv_crop_height,
et_uv, el_uv, eb_uv, er_uv);
- copy_and_extend_plane(src->v_buffer, src->uv_stride,
- dst->v_buffer, dst->uv_stride,
- src->uv_crop_width, src->uv_crop_height,
+ copy_and_extend_plane(src->v_buffer, src->uv_stride, dst->v_buffer,
+ dst->uv_stride, src->uv_crop_width, src->uv_crop_height,
et_uv, el_uv, eb_uv, er_uv);
}
void vp10_copy_and_extend_frame_with_rect(const YV12_BUFFER_CONFIG *src,
- YV12_BUFFER_CONFIG *dst,
- int srcy, int srcx,
- int srch, int srcw) {
+ YV12_BUFFER_CONFIG *dst, int srcy,
+ int srcx, int srch, int srcw) {
// If the side is not touching the bounder then don't extend.
const int et_y = srcy ? 0 : dst->border;
const int el_y = srcx ? 0 : dst->border;
- const int eb_y = srcy + srch != src->y_height ? 0 :
- dst->border + dst->y_height - src->y_height;
- const int er_y = srcx + srcw != src->y_width ? 0 :
- dst->border + dst->y_width - src->y_width;
+ const int eb_y = srcy + srch != src->y_height
+ ? 0
+ : dst->border + dst->y_height - src->y_height;
+ const int er_y = srcx + srcw != src->y_width
+ ? 0
+ : dst->border + dst->y_width - src->y_width;
const int src_y_offset = srcy * src->y_stride + srcx;
const int dst_y_offset = srcy * dst->y_stride + srcx;
@@ -185,17 +178,14 @@
const int srcw_uv = ROUND_POWER_OF_TWO(srcw, 1);
copy_and_extend_plane(src->y_buffer + src_y_offset, src->y_stride,
- dst->y_buffer + dst_y_offset, dst->y_stride,
- srcw, srch,
+ dst->y_buffer + dst_y_offset, dst->y_stride, srcw, srch,
et_y, el_y, eb_y, er_y);
copy_and_extend_plane(src->u_buffer + src_uv_offset, src->uv_stride,
- dst->u_buffer + dst_uv_offset, dst->uv_stride,
- srcw_uv, srch_uv,
- et_uv, el_uv, eb_uv, er_uv);
+ dst->u_buffer + dst_uv_offset, dst->uv_stride, srcw_uv,
+ srch_uv, et_uv, el_uv, eb_uv, er_uv);
copy_and_extend_plane(src->v_buffer + src_uv_offset, src->uv_stride,
- dst->v_buffer + dst_uv_offset, dst->uv_stride,
- srcw_uv, srch_uv,
- et_uv, el_uv, eb_uv, er_uv);
+ dst->v_buffer + dst_uv_offset, dst->uv_stride, srcw_uv,
+ srch_uv, et_uv, el_uv, eb_uv, er_uv);
}
diff --git a/vp10/encoder/extend.h b/vp10/encoder/extend.h
index 6f502ef..f7967fd 100644
--- a/vp10/encoder/extend.h
+++ b/vp10/encoder/extend.h
@@ -18,14 +18,12 @@
extern "C" {
#endif
-
void vp10_copy_and_extend_frame(const YV12_BUFFER_CONFIG *src,
- YV12_BUFFER_CONFIG *dst);
+ YV12_BUFFER_CONFIG *dst);
void vp10_copy_and_extend_frame_with_rect(const YV12_BUFFER_CONFIG *src,
- YV12_BUFFER_CONFIG *dst,
- int srcy, int srcx,
- int srch, int srcw);
+ YV12_BUFFER_CONFIG *dst, int srcy,
+ int srcx, int srch, int srcw);
#ifdef __cplusplus
} // extern "C"
#endif
diff --git a/vp10/encoder/firstpass.c b/vp10/encoder/firstpass.c
index 450c939..b5c728c 100644
--- a/vp10/encoder/firstpass.c
+++ b/vp10/encoder/firstpass.c
@@ -38,35 +38,34 @@
#include "vp10/encoder/rd.h"
#include "vpx_dsp/variance.h"
-#define OUTPUT_FPF 0
-#define ARF_STATS_OUTPUT 0
+#define OUTPUT_FPF 0
+#define ARF_STATS_OUTPUT 0
#define GROUP_ADAPTIVE_MAXQ 1
-#define BOOST_BREAKOUT 12.5
-#define BOOST_FACTOR 12.5
-#define ERR_DIVISOR 128.0
-#define FACTOR_PT_LOW 0.70
-#define FACTOR_PT_HIGH 0.90
-#define FIRST_PASS_Q 10.0
-#define GF_MAX_BOOST 96.0
-#define INTRA_MODE_PENALTY 1024
-#define KF_MAX_BOOST 128.0
-#define MIN_ARF_GF_BOOST 240
-#define MIN_DECAY_FACTOR 0.01
-#define MIN_KF_BOOST 300
+#define BOOST_BREAKOUT 12.5
+#define BOOST_FACTOR 12.5
+#define ERR_DIVISOR 128.0
+#define FACTOR_PT_LOW 0.70
+#define FACTOR_PT_HIGH 0.90
+#define FIRST_PASS_Q 10.0
+#define GF_MAX_BOOST 96.0
+#define INTRA_MODE_PENALTY 1024
+#define KF_MAX_BOOST 128.0
+#define MIN_ARF_GF_BOOST 240
+#define MIN_DECAY_FACTOR 0.01
+#define MIN_KF_BOOST 300
#define NEW_MV_MODE_PENALTY 32
-#define DARK_THRESH 64
-#define DEFAULT_GRP_WEIGHT 1.0
-#define RC_FACTOR_MIN 0.75
-#define RC_FACTOR_MAX 1.75
-
+#define DARK_THRESH 64
+#define DEFAULT_GRP_WEIGHT 1.0
+#define RC_FACTOR_MIN 0.75
+#define RC_FACTOR_MAX 1.75
#define NCOUNT_INTRA_THRESH 8192
#define NCOUNT_INTRA_FACTOR 3
#define NCOUNT_FRAME_II_THRESH 5.0
-#define DOUBLE_DIVIDE_CHECK(x) ((x) < 0 ? (x) - 0.000001 : (x) + 0.000001)
+#define DOUBLE_DIVIDE_CHECK(x) ((x) < 0 ? (x)-0.000001 : (x) + 0.000001)
#if ARF_STATS_OUTPUT
unsigned int arf_count = 0;
@@ -74,8 +73,7 @@
// Resets the first pass file to the given position using a relative seek from
// the current position.
-static void reset_fpf_position(TWO_PASS *p,
- const FIRSTPASS_STATS *position) {
+static void reset_fpf_position(TWO_PASS *p, const FIRSTPASS_STATS *position) {
p->stats_in = position;
}
@@ -90,8 +88,7 @@
}
static int input_stats(TWO_PASS *p, FIRSTPASS_STATS *fps) {
- if (p->stats_in >= p->stats_in_end)
- return EOF;
+ if (p->stats_in >= p->stats_in_end) return EOF;
*fps = *p->stats_in;
++p->stats_in;
@@ -112,39 +109,24 @@
FILE *fpfile;
fpfile = fopen("firstpass.stt", "a");
- fprintf(fpfile, "%12.0lf %12.4lf %12.0lf %12.0lf %12.0lf %12.4lf %12.4lf"
+ fprintf(fpfile,
+ "%12.0lf %12.4lf %12.0lf %12.0lf %12.0lf %12.4lf %12.4lf"
"%12.4lf %12.4lf %12.4lf %12.4lf %12.4lf %12.4lf %12.4lf %12.4lf"
"%12.4lf %12.4lf %12.0lf %12.0lf %12.0lf %12.4lf\n",
- stats->frame,
- stats->weight,
- stats->intra_error,
- stats->coded_error,
- stats->sr_coded_error,
- stats->pcnt_inter,
- stats->pcnt_motion,
- stats->pcnt_second_ref,
- stats->pcnt_neutral,
- stats->intra_skip_pct,
- stats->inactive_zone_rows,
- stats->inactive_zone_cols,
- stats->MVr,
- stats->mvr_abs,
- stats->MVc,
- stats->mvc_abs,
- stats->MVrv,
- stats->MVcv,
- stats->mv_in_out_count,
- stats->new_mv_count,
- stats->count,
- stats->duration);
+ stats->frame, stats->weight, stats->intra_error, stats->coded_error,
+ stats->sr_coded_error, stats->pcnt_inter, stats->pcnt_motion,
+ stats->pcnt_second_ref, stats->pcnt_neutral, stats->intra_skip_pct,
+ stats->inactive_zone_rows, stats->inactive_zone_cols, stats->MVr,
+ stats->mvr_abs, stats->MVc, stats->mvc_abs, stats->MVrv,
+ stats->MVcv, stats->mv_in_out_count, stats->new_mv_count,
+ stats->count, stats->duration);
fclose(fpfile);
}
#endif
}
#if CONFIG_FP_MB_STATS
-static void output_fpmb_stats(uint8_t *this_frame_mb_stats,
- VP10_COMMON *cm,
+static void output_fpmb_stats(uint8_t *this_frame_mb_stats, VP10_COMMON *cm,
struct vpx_codec_pkt_list *pktlist) {
struct vpx_codec_cx_pkt pkt;
pkt.kind = VPX_CODEC_FPMB_STATS_PKT;
@@ -160,23 +142,23 @@
section->intra_error = 0.0;
section->coded_error = 0.0;
section->sr_coded_error = 0.0;
- section->pcnt_inter = 0.0;
- section->pcnt_motion = 0.0;
+ section->pcnt_inter = 0.0;
+ section->pcnt_motion = 0.0;
section->pcnt_second_ref = 0.0;
section->pcnt_neutral = 0.0;
section->intra_skip_pct = 0.0;
section->inactive_zone_rows = 0.0;
section->inactive_zone_cols = 0.0;
section->MVr = 0.0;
- section->mvr_abs = 0.0;
- section->MVc = 0.0;
- section->mvc_abs = 0.0;
- section->MVrv = 0.0;
- section->MVcv = 0.0;
- section->mv_in_out_count = 0.0;
+ section->mvr_abs = 0.0;
+ section->MVc = 0.0;
+ section->mvc_abs = 0.0;
+ section->MVrv = 0.0;
+ section->MVcv = 0.0;
+ section->mv_in_out_count = 0.0;
section->new_mv_count = 0.0;
- section->count = 0.0;
- section->duration = 1.0;
+ section->count = 0.0;
+ section->duration = 1.0;
}
static void accumulate_stats(FIRSTPASS_STATS *section,
@@ -186,7 +168,7 @@
section->intra_error += frame->intra_error;
section->coded_error += frame->coded_error;
section->sr_coded_error += frame->sr_coded_error;
- section->pcnt_inter += frame->pcnt_inter;
+ section->pcnt_inter += frame->pcnt_inter;
section->pcnt_motion += frame->pcnt_motion;
section->pcnt_second_ref += frame->pcnt_second_ref;
section->pcnt_neutral += frame->pcnt_neutral;
@@ -194,15 +176,15 @@
section->inactive_zone_rows += frame->inactive_zone_rows;
section->inactive_zone_cols += frame->inactive_zone_cols;
section->MVr += frame->MVr;
- section->mvr_abs += frame->mvr_abs;
- section->MVc += frame->MVc;
- section->mvc_abs += frame->mvc_abs;
- section->MVrv += frame->MVrv;
- section->MVcv += frame->MVcv;
- section->mv_in_out_count += frame->mv_in_out_count;
+ section->mvr_abs += frame->mvr_abs;
+ section->MVc += frame->MVc;
+ section->mvc_abs += frame->mvc_abs;
+ section->MVrv += frame->MVrv;
+ section->MVcv += frame->MVcv;
+ section->mv_in_out_count += frame->mv_in_out_count;
section->new_mv_count += frame->new_mv_count;
- section->count += frame->count;
- section->duration += frame->duration;
+ section->count += frame->count;
+ section->duration += frame->duration;
}
static void subtract_stats(FIRSTPASS_STATS *section,
@@ -212,7 +194,7 @@
section->intra_error -= frame->intra_error;
section->coded_error -= frame->coded_error;
section->sr_coded_error -= frame->sr_coded_error;
- section->pcnt_inter -= frame->pcnt_inter;
+ section->pcnt_inter -= frame->pcnt_inter;
section->pcnt_motion -= frame->pcnt_motion;
section->pcnt_second_ref -= frame->pcnt_second_ref;
section->pcnt_neutral -= frame->pcnt_neutral;
@@ -220,15 +202,15 @@
section->inactive_zone_rows -= frame->inactive_zone_rows;
section->inactive_zone_cols -= frame->inactive_zone_cols;
section->MVr -= frame->MVr;
- section->mvr_abs -= frame->mvr_abs;
- section->MVc -= frame->MVc;
- section->mvc_abs -= frame->mvc_abs;
- section->MVrv -= frame->MVrv;
- section->MVcv -= frame->MVcv;
- section->mv_in_out_count -= frame->mv_in_out_count;
+ section->mvr_abs -= frame->mvr_abs;
+ section->MVc -= frame->MVc;
+ section->mvc_abs -= frame->mvc_abs;
+ section->MVrv -= frame->MVrv;
+ section->MVcv -= frame->MVcv;
+ section->mv_in_out_count -= frame->mv_in_out_count;
section->new_mv_count -= frame->new_mv_count;
- section->count -= frame->count;
- section->duration -= frame->duration;
+ section->count -= frame->count;
+ section->duration -= frame->duration;
}
// Calculate an active area of the image that discounts formatting
@@ -236,13 +218,13 @@
#define MIN_ACTIVE_AREA 0.5
#define MAX_ACTIVE_AREA 1.0
static double calculate_active_area(const VP10_COMP *cpi,
- const FIRSTPASS_STATS *this_frame)
-{
+ const FIRSTPASS_STATS *this_frame) {
double active_pct;
- active_pct = 1.0 -
- ((this_frame->intra_skip_pct / 2) +
- ((this_frame->inactive_zone_rows * 2) / (double)cpi->common.mb_rows));
+ active_pct =
+ 1.0 -
+ ((this_frame->intra_skip_pct / 2) +
+ ((this_frame->inactive_zone_rows * 2) / (double)cpi->common.mb_rows));
return fclamp(active_pct, MIN_ACTIVE_AREA, MAX_ACTIVE_AREA);
}
@@ -257,8 +239,9 @@
const double av_weight = stats->weight / stats->count;
const double av_err = (stats->coded_error * av_weight) / stats->count;
double modified_error =
- av_err * pow(this_frame->coded_error * this_frame->weight /
- DOUBLE_DIVIDE_CHECK(av_err), oxcf->two_pass_vbrbias / 100.0);
+ av_err * pow(this_frame->coded_error * this_frame->weight /
+ DOUBLE_DIVIDE_CHECK(av_err),
+ oxcf->two_pass_vbrbias / 100.0);
// Correction for active area. Frames with a reduced active area
// (eg due to formatting bars) have a higher error per mb for the
@@ -266,17 +249,18 @@
// 0.5N blocks of complexity 2X is a little easier than coding N
// blocks of complexity X.
modified_error *=
- pow(calculate_active_area(cpi, this_frame), ACT_AREA_CORRECTION);
+ pow(calculate_active_area(cpi, this_frame), ACT_AREA_CORRECTION);
- return fclamp(modified_error,
- twopass->modified_error_min, twopass->modified_error_max);
+ return fclamp(modified_error, twopass->modified_error_min,
+ twopass->modified_error_max);
}
// This function returns the maximum target rate per frame.
static int frame_max_bits(const RATE_CONTROL *rc,
const VP10EncoderConfig *oxcf) {
int64_t max_bits = ((int64_t)rc->avg_frame_bandwidth *
- (int64_t)oxcf->two_pass_vbrmax_section) / 100;
+ (int64_t)oxcf->two_pass_vbrmax_section) /
+ 100;
if (max_bits < 0)
max_bits = 0;
else if (max_bits > rc->max_frame_bandwidth)
@@ -295,14 +279,10 @@
static vpx_variance_fn_t get_block_variance_fn(BLOCK_SIZE bsize) {
switch (bsize) {
- case BLOCK_8X8:
- return vpx_mse8x8;
- case BLOCK_16X8:
- return vpx_mse16x8;
- case BLOCK_8X16:
- return vpx_mse8x16;
- default:
- return vpx_mse16x16;
+ case BLOCK_8X8: return vpx_mse8x8;
+ case BLOCK_16X8: return vpx_mse16x8;
+ case BLOCK_8X16: return vpx_mse8x16;
+ default: return vpx_mse16x16;
}
}
@@ -321,38 +301,26 @@
switch (bd) {
default:
switch (bsize) {
- case BLOCK_8X8:
- return vpx_highbd_8_mse8x8;
- case BLOCK_16X8:
- return vpx_highbd_8_mse16x8;
- case BLOCK_8X16:
- return vpx_highbd_8_mse8x16;
- default:
- return vpx_highbd_8_mse16x16;
+ case BLOCK_8X8: return vpx_highbd_8_mse8x8;
+ case BLOCK_16X8: return vpx_highbd_8_mse16x8;
+ case BLOCK_8X16: return vpx_highbd_8_mse8x16;
+ default: return vpx_highbd_8_mse16x16;
}
break;
case 10:
switch (bsize) {
- case BLOCK_8X8:
- return vpx_highbd_10_mse8x8;
- case BLOCK_16X8:
- return vpx_highbd_10_mse16x8;
- case BLOCK_8X16:
- return vpx_highbd_10_mse8x16;
- default:
- return vpx_highbd_10_mse16x16;
+ case BLOCK_8X8: return vpx_highbd_10_mse8x8;
+ case BLOCK_16X8: return vpx_highbd_10_mse16x8;
+ case BLOCK_8X16: return vpx_highbd_10_mse8x16;
+ default: return vpx_highbd_10_mse16x16;
}
break;
case 12:
switch (bsize) {
- case BLOCK_8X8:
- return vpx_highbd_12_mse8x8;
- case BLOCK_16X8:
- return vpx_highbd_12_mse16x8;
- case BLOCK_8X16:
- return vpx_highbd_12_mse8x16;
- default:
- return vpx_highbd_12_mse16x16;
+ case BLOCK_8X8: return vpx_highbd_12_mse8x8;
+ case BLOCK_16X8: return vpx_highbd_12_mse16x8;
+ case BLOCK_8X16: return vpx_highbd_12_mse8x16;
+ default: return vpx_highbd_12_mse16x16;
}
break;
}
@@ -375,8 +343,7 @@
int sr = 0;
const int dim = VPXMIN(cpi->initial_width, cpi->initial_height);
- while ((dim << sr) < MAX_FULL_PEL_VAL)
- ++sr;
+ while ((dim << sr) < MAX_FULL_PEL_VAL) ++sr;
return sr;
}
@@ -384,8 +351,8 @@
const MV *ref_mv, MV *best_mv,
int *best_motion_err) {
MACROBLOCKD *const xd = &x->e_mbd;
- MV tmp_mv = {0, 0};
- MV ref_mv_full = {ref_mv->row >> 3, ref_mv->col >> 3};
+ MV tmp_mv = { 0, 0 };
+ MV ref_mv_full = { ref_mv->row >> 3, ref_mv->col >> 3 };
int num00, tmp_err, n;
const BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
vpx_variance_fn_ptr_t v_fn_ptr = cpi->fn_ptr[bsize];
@@ -407,12 +374,11 @@
// Center the initial step/diamond search on best mv.
tmp_err = cpi->diamond_search_sad(x, &cpi->ss_cfg, &ref_mv_full, &tmp_mv,
- step_param,
- x->sadperbit16, &num00, &v_fn_ptr, ref_mv);
+ step_param, x->sadperbit16, &num00,
+ &v_fn_ptr, ref_mv);
if (tmp_err < INT_MAX)
tmp_err = vp10_get_mvpred_var(x, &tmp_mv, ref_mv, &v_fn_ptr, 1);
- if (tmp_err < INT_MAX - new_mv_mode_penalty)
- tmp_err += new_mv_mode_penalty;
+ if (tmp_err < INT_MAX - new_mv_mode_penalty) tmp_err += new_mv_mode_penalty;
if (tmp_err < *best_motion_err) {
*best_motion_err = tmp_err;
@@ -430,8 +396,8 @@
--num00;
} else {
tmp_err = cpi->diamond_search_sad(x, &cpi->ss_cfg, &ref_mv_full, &tmp_mv,
- step_param + n, x->sadperbit16,
- &num00, &v_fn_ptr, ref_mv);
+ step_param + n, x->sadperbit16, &num00,
+ &v_fn_ptr, ref_mv);
if (tmp_err < INT_MAX)
tmp_err = vp10_get_mvpred_var(x, &tmp_mv, ref_mv, &v_fn_ptr, 1);
if (tmp_err < INT_MAX - new_mv_mode_penalty)
@@ -447,11 +413,9 @@
static BLOCK_SIZE get_bsize(const VP10_COMMON *cm, int mb_row, int mb_col) {
if (2 * mb_col + 1 < cm->mi_cols) {
- return 2 * mb_row + 1 < cm->mi_rows ? BLOCK_16X16
- : BLOCK_16X8;
+ return 2 * mb_row + 1 < cm->mi_rows ? BLOCK_16X16 : BLOCK_16X8;
} else {
- return 2 * mb_row + 1 < cm->mi_rows ? BLOCK_8X16
- : BLOCK_8X8;
+ return 2 * mb_row + 1 < cm->mi_rows ? BLOCK_8X16 : BLOCK_8X8;
}
}
@@ -459,11 +423,9 @@
int i;
for (i = 0; i < QINDEX_RANGE; ++i)
- if (vp10_convert_qindex_to_q(i, bit_depth) >= FIRST_PASS_Q)
- break;
+ if (vp10_convert_qindex_to_q(i, bit_depth) >= FIRST_PASS_Q) break;
- if (i == QINDEX_RANGE)
- i--;
+ if (i == QINDEX_RANGE) i--;
return i;
}
@@ -471,8 +433,7 @@
static void set_first_pass_params(VP10_COMP *cpi) {
VP10_COMMON *const cm = &cpi->common;
if (!cpi->refresh_alt_ref_frame &&
- (cm->current_video_frame == 0 ||
- (cpi->frame_flags & FRAMEFLAGS_KEY))) {
+ (cm->current_video_frame == 0 || (cpi->frame_flags & FRAMEFLAGS_KEY))) {
cm->frame_type = KEY_FRAME;
} else {
cm->frame_type = INTER_FRAME;
@@ -511,9 +472,9 @@
int image_data_start_row = INVALID_ROW;
int new_mv_count = 0;
int sum_in_vectors = 0;
- MV lastmv = {0, 0};
+ MV lastmv = { 0, 0 };
TWO_PASS *twopass = &cpi->twopass;
- const MV zero_mv = {0, 0};
+ const MV zero_mv = { 0, 0 };
int recon_y_stride, recon_uv_stride, uv_mb_height;
YV12_BUFFER_CONFIG *const lst_yv12 = get_ref_frame_buffer(cpi, LAST_FRAME);
@@ -576,7 +537,7 @@
uv_mb_height = 16 >> (new_yv12->y_height > new_yv12->uv_height);
for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) {
- MV best_ref_mv = {0, 0};
+ MV best_ref_mv = { 0, 0 };
// Reset above block coeffs.
xd->up_available = (mb_row != 0);
@@ -586,8 +547,7 @@
// Set up limit values for motion vectors to prevent them extending
// outside the UMV borders.
x->mv_row_min = -((mb_row * 16) + BORDER_MV_PIXELS_B16);
- x->mv_row_max = ((cm->mb_rows - 1 - mb_row) * 16)
- + BORDER_MV_PIXELS_B16;
+ x->mv_row_max = ((cm->mb_rows - 1 - mb_row) * 16) + BORDER_MV_PIXELS_B16;
for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) {
int this_error;
@@ -608,16 +568,15 @@
xd->left_available = (mb_col != 0);
xd->mi[0]->mbmi.sb_type = bsize;
xd->mi[0]->mbmi.ref_frame[0] = INTRA_FRAME;
- set_mi_row_col(xd, &tile,
- mb_row << 1, num_8x8_blocks_high_lookup[bsize],
+ set_mi_row_col(xd, &tile, mb_row << 1, num_8x8_blocks_high_lookup[bsize],
mb_col << 1, num_8x8_blocks_wide_lookup[bsize],
cm->mi_rows, cm->mi_cols);
// Do intra 16x16 prediction.
xd->mi[0]->mbmi.segment_id = 0;
xd->mi[0]->mbmi.mode = DC_PRED;
- xd->mi[0]->mbmi.tx_size = use_dc_pred ?
- (bsize >= BLOCK_16X16 ? TX_16X16 : TX_8X8) : TX_4X4;
+ xd->mi[0]->mbmi.tx_size =
+ use_dc_pred ? (bsize >= BLOCK_16X16 ? TX_16X16 : TX_8X8) : TX_4X4;
vp10_encode_intra_block_plane(x, bsize, 0);
this_error = vpx_get_mb_ss(x->plane[0].src_diff);
@@ -635,17 +594,13 @@
#if CONFIG_VPX_HIGHBITDEPTH
if (cm->use_highbitdepth) {
switch (cm->bit_depth) {
- case VPX_BITS_8:
- break;
- case VPX_BITS_10:
- this_error >>= 4;
- break;
- case VPX_BITS_12:
- this_error >>= 8;
- break;
+ case VPX_BITS_8: break;
+ case VPX_BITS_10: this_error >>= 4; break;
+ case VPX_BITS_12: this_error >>= 8; break;
default:
- assert(0 && "cm->bit_depth should be VPX_BITS_8, "
- "VPX_BITS_10 or VPX_BITS_12");
+ assert(0 &&
+ "cm->bit_depth should be VPX_BITS_8, "
+ "VPX_BITS_10 or VPX_BITS_12");
return;
}
}
@@ -699,7 +654,7 @@
if (cm->current_video_frame > 0) {
int tmp_err, motion_error, raw_motion_error;
// Assume 0,0 motion with no mv overhead.
- MV mv = {0, 0} , tmp_mv = {0, 0};
+ MV mv = { 0, 0 }, tmp_mv = { 0, 0 };
struct buf_2d unscaled_last_source_buf_2d;
xd->plane[0].pre[0].buf = first_ref_buf->y_buffer + recon_yoffset;
@@ -708,12 +663,12 @@
motion_error = highbd_get_prediction_error(
bsize, &x->plane[0].src, &xd->plane[0].pre[0], xd->bd);
} else {
- motion_error = get_prediction_error(
- bsize, &x->plane[0].src, &xd->plane[0].pre[0]);
+ motion_error = get_prediction_error(bsize, &x->plane[0].src,
+ &xd->plane[0].pre[0]);
}
#else
- motion_error = get_prediction_error(
- bsize, &x->plane[0].src, &xd->plane[0].pre[0]);
+ motion_error =
+ get_prediction_error(bsize, &x->plane[0].src, &xd->plane[0].pre[0]);
#endif // CONFIG_VPX_HIGHBITDEPTH
// Compute the motion error of the 0,0 motion using the last source
@@ -728,12 +683,12 @@
raw_motion_error = highbd_get_prediction_error(
bsize, &x->plane[0].src, &unscaled_last_source_buf_2d, xd->bd);
} else {
- raw_motion_error = get_prediction_error(
- bsize, &x->plane[0].src, &unscaled_last_source_buf_2d);
+ raw_motion_error = get_prediction_error(bsize, &x->plane[0].src,
+ &unscaled_last_source_buf_2d);
}
#else
- raw_motion_error = get_prediction_error(
- bsize, &x->plane[0].src, &unscaled_last_source_buf_2d);
+ raw_motion_error = get_prediction_error(bsize, &x->plane[0].src,
+ &unscaled_last_source_buf_2d);
#endif // CONFIG_VPX_HIGHBITDEPTH
// TODO(pengchong): Replace the hard-coded threshold
@@ -765,12 +720,12 @@
gf_motion_error = highbd_get_prediction_error(
bsize, &x->plane[0].src, &xd->plane[0].pre[0], xd->bd);
} else {
- gf_motion_error = get_prediction_error(
- bsize, &x->plane[0].src, &xd->plane[0].pre[0]);
+ gf_motion_error = get_prediction_error(bsize, &x->plane[0].src,
+ &xd->plane[0].pre[0]);
}
#else
- gf_motion_error = get_prediction_error(
- bsize, &x->plane[0].src, &xd->plane[0].pre[0]);
+ gf_motion_error = get_prediction_error(bsize, &x->plane[0].src,
+ &xd->plane[0].pre[0]);
#endif // CONFIG_VPX_HIGHBITDEPTH
first_pass_motion_search(cpi, x, &zero_mv, &tmp_mv,
@@ -826,12 +781,12 @@
if (((this_error - intrapenalty) * 9 <= motion_error * 10) &&
(this_error < (2 * intrapenalty))) {
neutral_count += 1.0;
- // Also track cases where the intra is not much worse than the inter
- // and use this in limiting the GF/arf group length.
+ // Also track cases where the intra is not much worse than the inter
+ // and use this in limiting the GF/arf group length.
} else if ((this_error > NCOUNT_INTRA_THRESH) &&
(this_error < (NCOUNT_INTRA_FACTOR * motion_error))) {
- neutral_count += (double)motion_error /
- DOUBLE_DIVIDE_CHECK((double)this_error);
+ neutral_count +=
+ (double)motion_error / DOUBLE_DIVIDE_CHECK((double)this_error);
}
mv.row *= 8;
@@ -901,8 +856,7 @@
#endif
// Non-zero vector, was it different from the last non zero vector?
- if (!is_equal_mv(&mv, &lastmv))
- ++new_mv_count;
+ if (!is_equal_mv(&mv, &lastmv)) ++new_mv_count;
lastmv = mv;
// Does the row vector point inwards or outwards?
@@ -948,10 +902,10 @@
// Adjust to the next row of MBs.
x->plane[0].src.buf += 16 * x->plane[0].src.stride - 16 * cm->mb_cols;
- x->plane[1].src.buf += uv_mb_height * x->plane[1].src.stride -
- uv_mb_height * cm->mb_cols;
- x->plane[2].src.buf += uv_mb_height * x->plane[1].src.stride -
- uv_mb_height * cm->mb_cols;
+ x->plane[1].src.buf +=
+ uv_mb_height * x->plane[1].src.stride - uv_mb_height * cm->mb_cols;
+ x->plane[2].src.buf +=
+ uv_mb_height * x->plane[1].src.stride - uv_mb_height * cm->mb_cols;
vpx_clear_system_state();
}
@@ -976,7 +930,8 @@
// Initial estimate here uses sqrt(mbs) to define the min_err, where the
// number of mbs is proportional to the image area.
const int num_mbs = (cpi->oxcf.resize_mode != RESIZE_NONE)
- ? cpi->initial_mbs : cpi->common.MBs;
+ ? cpi->initial_mbs
+ : cpi->common.MBs;
const double min_err = 200 * sqrt(num_mbs);
intra_factor = intra_factor / (double)num_mbs;
@@ -1000,10 +955,10 @@
fps.mvr_abs = (double)sum_mvr_abs / mvcount;
fps.MVc = (double)sum_mvc / mvcount;
fps.mvc_abs = (double)sum_mvc_abs / mvcount;
- fps.MVrv = ((double)sum_mvrs -
- ((double)sum_mvr * sum_mvr / mvcount)) / mvcount;
- fps.MVcv = ((double)sum_mvcs -
- ((double)sum_mvc * sum_mvc / mvcount)) / mvcount;
+ fps.MVrv =
+ ((double)sum_mvrs - ((double)sum_mvr * sum_mvr / mvcount)) / mvcount;
+ fps.MVcv =
+ ((double)sum_mvcs - ((double)sum_mvc * sum_mvc / mvcount)) / mvcount;
fps.mv_in_out_count = (double)sum_in_vectors / (mvcount * 2);
fps.new_mv_count = new_mv_count;
fps.pcnt_motion = (double)mvcount / num_mbs;
@@ -1084,11 +1039,8 @@
++cm->current_video_frame;
}
-static double calc_correction_factor(double err_per_mb,
- double err_divisor,
- double pt_low,
- double pt_high,
- int q,
+static double calc_correction_factor(double err_per_mb, double err_divisor,
+ double pt_low, double pt_high, int q,
vpx_bit_depth_t bit_depth) {
const double error_term = err_per_mb / err_divisor;
@@ -1097,8 +1049,7 @@
VPXMIN(vp10_convert_qindex_to_q(q, bit_depth) * 0.01 + pt_low, pt_high);
// Calculate correction factor.
- if (power_term < 1.0)
- assert(error_term >= 0.0);
+ if (power_term < 1.0) assert(error_term >= 0.0);
return fclamp(pow(error_term, power_term), 0.05, 5.0);
}
@@ -1122,35 +1073,31 @@
return rc->worst_quality; // Highest value allowed
} else {
const int num_mbs = (cpi->oxcf.resize_mode != RESIZE_NONE)
- ? cpi->initial_mbs : cpi->common.MBs;
+ ? cpi->initial_mbs
+ : cpi->common.MBs;
const int active_mbs = VPXMAX(1, num_mbs - (int)(num_mbs * inactive_zone));
const double av_err_per_mb = section_err / active_mbs;
const double speed_term = 1.0 + 0.04 * oxcf->speed;
const double ediv_size_correction = (double)num_mbs / EDIV_SIZE_FACTOR;
- const int target_norm_bits_per_mb = ((uint64_t)section_target_bandwidth <<
- BPER_MB_NORMBITS) / active_mbs;
+ const int target_norm_bits_per_mb =
+ ((uint64_t)section_target_bandwidth << BPER_MB_NORMBITS) / active_mbs;
int q;
// Try and pick a max Q that will be high enough to encode the
// content at the given rate.
for (q = rc->best_quality; q < rc->worst_quality; ++q) {
- const double factor =
- calc_correction_factor(av_err_per_mb,
- ERR_DIVISOR - ediv_size_correction,
- FACTOR_PT_LOW, FACTOR_PT_HIGH, q,
- cpi->common.bit_depth);
- const int bits_per_mb =
- vp10_rc_bits_per_mb(INTER_FRAME, q,
- factor * speed_term * group_weight_factor,
- cpi->common.bit_depth);
- if (bits_per_mb <= target_norm_bits_per_mb)
- break;
+ const double factor = calc_correction_factor(
+ av_err_per_mb, ERR_DIVISOR - ediv_size_correction, FACTOR_PT_LOW,
+ FACTOR_PT_HIGH, q, cpi->common.bit_depth);
+ const int bits_per_mb = vp10_rc_bits_per_mb(
+ INTER_FRAME, q, factor * speed_term * group_weight_factor,
+ cpi->common.bit_depth);
+ if (bits_per_mb <= target_norm_bits_per_mb) break;
}
// Restriction on active max q for constrained quality mode.
- if (cpi->oxcf.rc_mode == VPX_CQ)
- q = VPXMAX(q, oxcf->cq_level);
+ if (cpi->oxcf.rc_mode == VPX_CQ) q = VPXMAX(q, oxcf->cq_level);
return q;
}
}
@@ -1180,9 +1127,8 @@
setup_rf_level_maxq(cpi);
}
-void vp10_calculate_coded_size(VP10_COMP *cpi,
- int *scaled_frame_width,
- int *scaled_frame_height) {
+void vp10_calculate_coded_size(VP10_COMP *cpi, int *scaled_frame_width,
+ int *scaled_frame_height) {
RATE_CONTROL *const rc = &cpi->rc;
*scaled_frame_width = rc->frame_width[rc->frame_size_selector];
*scaled_frame_height = rc->frame_height[rc->frame_size_selector];
@@ -1197,8 +1143,7 @@
zero_stats(&twopass->total_stats);
zero_stats(&twopass->total_left_stats);
- if (!twopass->stats_in_end)
- return;
+ if (!twopass->stats_in_end) return;
stats = &twopass->total_stats;
@@ -1212,8 +1157,8 @@
// It is calculated based on the actual durations of all frames from the
// first pass.
vp10_new_framerate(cpi, frame_rate);
- twopass->bits_left = (int64_t)(stats->duration * oxcf->target_bandwidth /
- 10000000.0);
+ twopass->bits_left =
+ (int64_t)(stats->duration * oxcf->target_bandwidth / 10000000.0);
// This variable monitors how far behind the second ref update is lagging.
twopass->sr_update_lag = 1;
@@ -1221,14 +1166,14 @@
// Scan the first pass file and calculate a modified total error based upon
// the bias/power function used to allocate bits.
{
- const double avg_error = stats->coded_error /
- DOUBLE_DIVIDE_CHECK(stats->count);
+ const double avg_error =
+ stats->coded_error / DOUBLE_DIVIDE_CHECK(stats->count);
const FIRSTPASS_STATS *s = twopass->stats_in;
double modified_error_total = 0.0;
- twopass->modified_error_min = (avg_error *
- oxcf->two_pass_vbrmin_section) / 100;
- twopass->modified_error_max = (avg_error *
- oxcf->two_pass_vbrmax_section) / 100;
+ twopass->modified_error_min =
+ (avg_error * oxcf->two_pass_vbrmin_section) / 100;
+ twopass->modified_error_max =
+ (avg_error * oxcf->two_pass_vbrmax_section) / 100;
while (s < twopass->stats_in_end) {
modified_error_total += calculate_modified_err(cpi, twopass, oxcf, s);
++s;
@@ -1260,15 +1205,14 @@
static double get_sr_decay_rate(const VP10_COMP *cpi,
const FIRSTPASS_STATS *frame) {
- const int num_mbs = (cpi->oxcf.resize_mode != RESIZE_NONE)
- ? cpi->initial_mbs : cpi->common.MBs;
- double sr_diff =
- (frame->sr_coded_error - frame->coded_error) / num_mbs;
+ const int num_mbs = (cpi->oxcf.resize_mode != RESIZE_NONE) ? cpi->initial_mbs
+ : cpi->common.MBs;
+ double sr_diff = (frame->sr_coded_error - frame->coded_error) / num_mbs;
double sr_decay = 1.0;
double modified_pct_inter;
double modified_pcnt_intra;
const double motion_amplitude_factor =
- frame->pcnt_motion * ((frame->mvc_abs + frame->mvr_abs) / 2);
+ frame->pcnt_motion * ((frame->mvc_abs + frame->mvr_abs) / 2);
modified_pct_inter = frame->pcnt_inter;
if ((frame->intra_error / DOUBLE_DIVIDE_CHECK(frame->coded_error)) <
@@ -1277,7 +1221,6 @@
}
modified_pcnt_intra = 100 * (1.0 - modified_pct_inter);
-
if ((sr_diff > LOW_SR_DIFF_TRHESH)) {
sr_diff = VPXMIN(sr_diff, SR_DIFF_MAX);
sr_decay = 1.0 - (SR_DIFF_PART * sr_diff) -
@@ -1291,8 +1234,7 @@
// quality is decaying from frame to frame.
static double get_zero_motion_factor(const VP10_COMP *cpi,
const FIRSTPASS_STATS *frame) {
- const double zero_motion_pct = frame->pcnt_inter -
- frame->pcnt_motion;
+ const double zero_motion_pct = frame->pcnt_inter - frame->pcnt_motion;
double sr_decay = get_sr_decay_rate(cpi, frame);
return VPXMIN(sr_decay, zero_motion_pct);
}
@@ -1303,8 +1245,8 @@
const FIRSTPASS_STATS *next_frame) {
const double sr_decay_rate = get_sr_decay_rate(cpi, next_frame);
const double zero_motion_factor =
- (0.95 * pow((next_frame->pcnt_inter - next_frame->pcnt_motion),
- ZM_POWER_FACTOR));
+ (0.95 * pow((next_frame->pcnt_inter - next_frame->pcnt_motion),
+ ZM_POWER_FACTOR));
return VPXMAX(zero_motion_factor,
(sr_decay_rate + ((1.0 - sr_decay_rate) * zero_motion_factor)));
@@ -1313,8 +1255,8 @@
// Function to test for a condition where a complex transition is followed
// by a static section. For example in slide shows where there is a fade
// between slides. This is to help with more optimal kf and gf positioning.
-static int detect_transition_to_still(VP10_COMP *cpi,
- int frame_interval, int still_interval,
+static int detect_transition_to_still(VP10_COMP *cpi, int frame_interval,
+ int still_interval,
double loop_decay_rate,
double last_decay_rate) {
TWO_PASS *const twopass = &cpi->twopass;
@@ -1323,19 +1265,16 @@
// Break clause to detect very still sections after motion
// For example a static image after a fade or other transition
// instead of a clean scene cut.
- if (frame_interval > rc->min_gf_interval &&
- loop_decay_rate >= 0.999 &&
+ if (frame_interval > rc->min_gf_interval && loop_decay_rate >= 0.999 &&
last_decay_rate < 0.9) {
int j;
// Look ahead a few frames to see if static condition persists...
for (j = 0; j < still_interval; ++j) {
const FIRSTPASS_STATS *stats = &twopass->stats_in[j];
- if (stats >= twopass->stats_in_end)
- break;
+ if (stats >= twopass->stats_in_end) break;
- if (stats->pcnt_inter - stats->pcnt_motion < 0.999)
- break;
+ if (stats->pcnt_inter - stats->pcnt_motion < 0.999) break;
}
// Only if it does do we signal a transition to still.
@@ -1377,30 +1316,28 @@
// Accumulate a measure of how uniform (or conversely how random) the motion
// field is (a ratio of abs(mv) / mv).
if (pct > 0.05) {
- const double mvr_ratio = fabs(stats->mvr_abs) /
- DOUBLE_DIVIDE_CHECK(fabs(stats->MVr));
- const double mvc_ratio = fabs(stats->mvc_abs) /
- DOUBLE_DIVIDE_CHECK(fabs(stats->MVc));
+ const double mvr_ratio =
+ fabs(stats->mvr_abs) / DOUBLE_DIVIDE_CHECK(fabs(stats->MVr));
+ const double mvc_ratio =
+ fabs(stats->mvc_abs) / DOUBLE_DIVIDE_CHECK(fabs(stats->MVc));
- *mv_ratio_accumulator += pct * (mvr_ratio < stats->mvr_abs ?
- mvr_ratio : stats->mvr_abs);
- *mv_ratio_accumulator += pct * (mvc_ratio < stats->mvc_abs ?
- mvc_ratio : stats->mvc_abs);
+ *mv_ratio_accumulator +=
+ pct * (mvr_ratio < stats->mvr_abs ? mvr_ratio : stats->mvr_abs);
+ *mv_ratio_accumulator +=
+ pct * (mvc_ratio < stats->mvc_abs ? mvc_ratio : stats->mvc_abs);
}
}
#define BASELINE_ERR_PER_MB 1000.0
static double calc_frame_boost(VP10_COMP *cpi,
const FIRSTPASS_STATS *this_frame,
- double this_frame_mv_in_out,
- double max_boost) {
+ double this_frame_mv_in_out, double max_boost) {
double frame_boost;
- const double lq =
- vp10_convert_qindex_to_q(cpi->rc.avg_frame_qindex[INTER_FRAME],
- cpi->common.bit_depth);
+ const double lq = vp10_convert_qindex_to_q(
+ cpi->rc.avg_frame_qindex[INTER_FRAME], cpi->common.bit_depth);
const double boost_q_correction = VPXMIN((0.5 + (lq * 0.015)), 1.5);
- int num_mbs = (cpi->oxcf.resize_mode != RESIZE_NONE)
- ? cpi->initial_mbs : cpi->common.MBs;
+ int num_mbs = (cpi->oxcf.resize_mode != RESIZE_NONE) ? cpi->initial_mbs
+ : cpi->common.MBs;
// Correct for any inactive region in the image
num_mbs = (int)VPXMAX(1, num_mbs * calculate_active_area(cpi, this_frame));
@@ -1422,9 +1359,8 @@
return VPXMIN(frame_boost, max_boost * boost_q_correction);
}
-static int calc_arf_boost(VP10_COMP *cpi, int offset,
- int f_frames, int b_frames,
- int *f_boost, int *b_boost) {
+static int calc_arf_boost(VP10_COMP *cpi, int offset, int f_frames,
+ int b_frames, int *f_boost, int *b_boost) {
TWO_PASS *const twopass = &cpi->twopass;
int i;
double boost_score = 0.0;
@@ -1439,14 +1375,12 @@
// Search forward from the proposed arf/next gf position.
for (i = 0; i < f_frames; ++i) {
const FIRSTPASS_STATS *this_frame = read_frame_stats(twopass, i + offset);
- if (this_frame == NULL)
- break;
+ if (this_frame == NULL) break;
// Update the motion related elements to the boost calculation.
- accumulate_frame_motion_stats(this_frame,
- &this_frame_mv_in_out, &mv_in_out_accumulator,
- &abs_mv_in_out_accumulator,
- &mv_ratio_accumulator);
+ accumulate_frame_motion_stats(
+ this_frame, &this_frame_mv_in_out, &mv_in_out_accumulator,
+ &abs_mv_in_out_accumulator, &mv_ratio_accumulator);
// We want to discount the flash frame itself and the recovery
// frame that follows as both will have poor scores.
@@ -1457,12 +1391,13 @@
if (!flash_detected) {
decay_accumulator *= get_prediction_decay_rate(cpi, this_frame);
decay_accumulator = decay_accumulator < MIN_DECAY_FACTOR
- ? MIN_DECAY_FACTOR : decay_accumulator;
+ ? MIN_DECAY_FACTOR
+ : decay_accumulator;
}
- boost_score += decay_accumulator * calc_frame_boost(cpi, this_frame,
- this_frame_mv_in_out,
- GF_MAX_BOOST);
+ boost_score +=
+ decay_accumulator *
+ calc_frame_boost(cpi, this_frame, this_frame_mv_in_out, GF_MAX_BOOST);
}
*f_boost = (int)boost_score;
@@ -1478,14 +1413,12 @@
// Search backward towards last gf position.
for (i = -1; i >= -b_frames; --i) {
const FIRSTPASS_STATS *this_frame = read_frame_stats(twopass, i + offset);
- if (this_frame == NULL)
- break;
+ if (this_frame == NULL) break;
// Update the motion related elements to the boost calculation.
- accumulate_frame_motion_stats(this_frame,
- &this_frame_mv_in_out, &mv_in_out_accumulator,
- &abs_mv_in_out_accumulator,
- &mv_ratio_accumulator);
+ accumulate_frame_motion_stats(
+ this_frame, &this_frame_mv_in_out, &mv_in_out_accumulator,
+ &abs_mv_in_out_accumulator, &mv_ratio_accumulator);
// We want to discount the the flash frame itself and the recovery
// frame that follows as both will have poor scores.
@@ -1496,12 +1429,13 @@
if (!flash_detected) {
decay_accumulator *= get_prediction_decay_rate(cpi, this_frame);
decay_accumulator = decay_accumulator < MIN_DECAY_FACTOR
- ? MIN_DECAY_FACTOR : decay_accumulator;
+ ? MIN_DECAY_FACTOR
+ : decay_accumulator;
}
- boost_score += decay_accumulator * calc_frame_boost(cpi, this_frame,
- this_frame_mv_in_out,
- GF_MAX_BOOST);
+ boost_score +=
+ decay_accumulator *
+ calc_frame_boost(cpi, this_frame, this_frame_mv_in_out, GF_MAX_BOOST);
}
*b_boost = (int)boost_score;
@@ -1549,9 +1483,10 @@
}
// Clamp odd edge cases.
- total_group_bits = (total_group_bits < 0) ?
- 0 : (total_group_bits > twopass->kf_group_bits) ?
- twopass->kf_group_bits : total_group_bits;
+ total_group_bits =
+ (total_group_bits < 0) ? 0 : (total_group_bits > twopass->kf_group_bits)
+ ? twopass->kf_group_bits
+ : total_group_bits;
// Clip based on user supplied data rate variability limit.
if (total_group_bits > (int64_t)max_bits * rc->baseline_gf_interval)
@@ -1561,13 +1496,12 @@
}
// Calculate the number bits extra to assign to boosted frames in a group.
-static int calculate_boost_bits(int frame_count,
- int boost, int64_t total_group_bits) {
+static int calculate_boost_bits(int frame_count, int boost,
+ int64_t total_group_bits) {
int allocation_chunks;
// return 0 for invalid inputs (could arise e.g. through rounding errors)
- if (!boost || (total_group_bits <= 0) || (frame_count <= 0) )
- return 0;
+ if (!boost || (total_group_bits <= 0) || (frame_count <= 0)) return 0;
allocation_chunks = (frame_count * 100) + boost;
@@ -1637,14 +1571,12 @@
}
// Step over the golden frame / overlay frame
- if (EOF == input_stats(twopass, &frame_stats))
- return;
+ if (EOF == input_stats(twopass, &frame_stats)) return;
}
// Deduct the boost bits for arf (or gf if it is not a key frame)
// from the group total.
- if (rc->source_alt_ref_pending || !key_frame)
- total_group_bits -= gf_arf_bits;
+ if (rc->source_alt_ref_pending || !key_frame) total_group_bits -= gf_arf_bits;
// Store the bits to spend on the ARF if there is one.
if (rc->source_alt_ref_pending) {
@@ -1657,8 +1589,8 @@
gf_group->arf_update_idx[alt_frame_index] = arf_buffer_indices[0];
gf_group->arf_ref_idx[alt_frame_index] =
- arf_buffer_indices[cpi->multi_arf_last_grp_enabled &&
- rc->source_alt_ref_active];
+ arf_buffer_indices[cpi->multi_arf_last_grp_enabled &&
+ rc->source_alt_ref_active];
++frame_index;
if (cpi->multi_arf_enabled) {
@@ -1666,7 +1598,7 @@
gf_group->update_type[frame_index] = ARF_UPDATE;
gf_group->rf_level[frame_index] = GF_ARF_LOW;
gf_group->arf_src_offset[frame_index] =
- (unsigned char)((rc->baseline_gf_interval >> 1) - 1);
+ (unsigned char)((rc->baseline_gf_interval >> 1) - 1);
gf_group->arf_update_idx[frame_index] = arf_buffer_indices[1];
gf_group->arf_ref_idx[frame_index] = arf_buffer_indices[0];
++frame_index;
@@ -1679,8 +1611,7 @@
// Allocate bits to the other frames in the group.
for (i = 0; i < rc->baseline_gf_interval - rc->source_alt_ref_pending; ++i) {
int arf_idx = 0;
- if (EOF == input_stats(twopass, &frame_stats))
- break;
+ if (EOF == input_stats(twopass, &frame_stats)) break;
modified_err = calculate_modified_err(cpi, twopass, oxcf, &frame_stats);
@@ -1695,14 +1626,13 @@
mid_boost_bits += (target_frame_size >> 4);
target_frame_size -= (target_frame_size >> 4);
- if (frame_index <= mid_frame_idx)
- arf_idx = 1;
+ if (frame_index <= mid_frame_idx) arf_idx = 1;
}
gf_group->arf_update_idx[frame_index] = arf_buffer_indices[arf_idx];
gf_group->arf_ref_idx[frame_index] = arf_buffer_indices[arf_idx];
- target_frame_size = clamp(target_frame_size, 0,
- VPXMIN(max_bits, (int)total_group_bits));
+ target_frame_size =
+ clamp(target_frame_size, 0, VPXMIN(max_bits, (int)total_group_bits));
gf_group->update_type[frame_index] = LF_UPDATE;
gf_group->rf_level[frame_index] = INTER_NORMAL;
@@ -1817,12 +1747,10 @@
// Set a maximum and minimum interval for the GF group.
// If the image appears almost completely static we can extend beyond this.
{
- int int_max_q =
- (int)(vp10_convert_qindex_to_q(twopass->active_worst_quality,
- cpi->common.bit_depth));
- int int_lbq =
- (int)(vp10_convert_qindex_to_q(rc->last_boosted_qindex,
- cpi->common.bit_depth));
+ int int_max_q = (int)(vp10_convert_qindex_to_q(
+ twopass->active_worst_quality, cpi->common.bit_depth));
+ int int_lbq = (int)(vp10_convert_qindex_to_q(rc->last_boosted_qindex,
+ cpi->common.bit_depth));
active_min_gf_interval = rc->min_gf_interval + VPXMIN(2, int_max_q / 200);
if (active_min_gf_interval > rc->max_gf_interval)
active_min_gf_interval = rc->max_gf_interval;
@@ -1858,18 +1786,16 @@
gf_group_skip_pct += this_frame->intra_skip_pct;
gf_group_inactive_zone_rows += this_frame->inactive_zone_rows;
- if (EOF == input_stats(twopass, &next_frame))
- break;
+ if (EOF == input_stats(twopass, &next_frame)) break;
// Test for the case where there is a brief flash but the prediction
// quality back to an earlier frame is then restored.
flash_detected = detect_flash(twopass, 0);
// Update the motion related elements to the boost calculation.
- accumulate_frame_motion_stats(&next_frame,
- &this_frame_mv_in_out, &mv_in_out_accumulator,
- &abs_mv_in_out_accumulator,
- &mv_ratio_accumulator);
+ accumulate_frame_motion_stats(
+ &next_frame, &this_frame_mv_in_out, &mv_in_out_accumulator,
+ &abs_mv_in_out_accumulator, &mv_ratio_accumulator);
// Accumulate the effect of prediction quality decay.
if (!flash_detected) {
@@ -1892,23 +1818,23 @@
}
// Calculate a boost number for this frame.
- boost_score += decay_accumulator * calc_frame_boost(cpi, &next_frame,
- this_frame_mv_in_out,
- GF_MAX_BOOST);
+ boost_score +=
+ decay_accumulator *
+ calc_frame_boost(cpi, &next_frame, this_frame_mv_in_out, GF_MAX_BOOST);
// Break out conditions.
if (
- // Break at active_max_gf_interval unless almost totally static.
- (i >= (active_max_gf_interval + arf_active_or_kf) &&
- zero_motion_accumulator < 0.995) ||
- (
- // Don't break out with a very short interval.
- (i >= active_min_gf_interval + arf_active_or_kf) &&
- (!flash_detected) &&
- ((mv_ratio_accumulator > mv_ratio_accumulator_thresh) ||
- (abs_mv_in_out_accumulator > 3.0) ||
- (mv_in_out_accumulator < -2.0) ||
- ((boost_score - old_boost_score) < BOOST_BREAKOUT)))) {
+ // Break at active_max_gf_interval unless almost totally static.
+ (i >= (active_max_gf_interval + arf_active_or_kf) &&
+ zero_motion_accumulator < 0.995) ||
+ (
+ // Don't break out with a very short interval.
+ (i >= active_min_gf_interval + arf_active_or_kf) &&
+ (!flash_detected) &&
+ ((mv_ratio_accumulator > mv_ratio_accumulator_thresh) ||
+ (abs_mv_in_out_accumulator > 3.0) ||
+ (mv_in_out_accumulator < -2.0) ||
+ ((boost_score - old_boost_score) < BOOST_BREAKOUT)))) {
boost_score = old_boost_score;
break;
}
@@ -1923,18 +1849,19 @@
rc->constrained_gf_group = (i >= rc->frames_to_key) ? 1 : 0;
// Should we use the alternate reference frame.
- if (allow_alt_ref &&
- (i < cpi->oxcf.lag_in_frames) &&
- (i >= rc->min_gf_interval)) {
+ if (allow_alt_ref && (i < cpi->oxcf.lag_in_frames) &&
+ (i >= rc->min_gf_interval)) {
// Calculate the boost for alt ref.
- rc->gfu_boost = calc_arf_boost(cpi, 0, (i - 1), (i - 1), &f_boost,
- &b_boost);
+ rc->gfu_boost =
+ calc_arf_boost(cpi, 0, (i - 1), (i - 1), &f_boost, &b_boost);
rc->source_alt_ref_pending = 1;
// Test to see if multi arf is appropriate.
cpi->multi_arf_enabled =
- (cpi->multi_arf_allowed && (rc->baseline_gf_interval >= 6) &&
- (zero_motion_accumulator < 0.995)) ? 1 : 0;
+ (cpi->multi_arf_allowed && (rc->baseline_gf_interval >= 6) &&
+ (zero_motion_accumulator < 0.995))
+ ? 1
+ : 0;
} else {
rc->gfu_boost = VPXMAX((int)boost_score, MIN_ARF_GF_BOOST);
rc->source_alt_ref_pending = 0;
@@ -1959,13 +1886,13 @@
// of the allocated bit budget.
if ((cpi->oxcf.rc_mode != VPX_Q) && (rc->baseline_gf_interval > 1)) {
const int vbr_group_bits_per_frame =
- (int)(gf_group_bits / rc->baseline_gf_interval);
- const double group_av_err = gf_group_raw_error / rc->baseline_gf_interval;
+ (int)(gf_group_bits / rc->baseline_gf_interval);
+ const double group_av_err = gf_group_raw_error / rc->baseline_gf_interval;
const double group_av_skip_pct =
- gf_group_skip_pct / rc->baseline_gf_interval;
+ gf_group_skip_pct / rc->baseline_gf_interval;
const double group_av_inactive_zone =
- ((gf_group_inactive_zone_rows * 2) /
- (rc->baseline_gf_interval * (double)cm->mb_rows));
+ ((gf_group_inactive_zone_rows * 2) /
+ (rc->baseline_gf_interval * (double)cm->mb_rows));
int tmp_q;
// rc factor is a weight factor that corrects for local rate control drift.
@@ -1977,19 +1904,17 @@
rc_factor = VPXMIN(RC_FACTOR_MAX,
(double)(100 - rc->rate_error_estimate) / 100.0);
}
- tmp_q =
- get_twopass_worst_quality(cpi, group_av_err,
- (group_av_skip_pct + group_av_inactive_zone),
- vbr_group_bits_per_frame,
- twopass->kfgroup_inter_fraction * rc_factor);
+ tmp_q = get_twopass_worst_quality(
+ cpi, group_av_err, (group_av_skip_pct + group_av_inactive_zone),
+ vbr_group_bits_per_frame, twopass->kfgroup_inter_fraction * rc_factor);
twopass->active_worst_quality =
- VPXMAX(tmp_q, twopass->active_worst_quality >> 1);
+ VPXMAX(tmp_q, twopass->active_worst_quality >> 1);
}
#endif
// Calculate the extra bits to be used for boosted frame(s)
- gf_arf_bits = calculate_boost_bits(rc->baseline_gf_interval,
- rc->gfu_boost, gf_group_bits);
+ gf_arf_bits = calculate_boost_bits(rc->baseline_gf_interval, rc->gfu_boost,
+ gf_group_bits);
// Adjust KF group bits and error remaining.
twopass->kf_group_error_left -= (int64_t)gf_group_err;
@@ -2016,9 +1941,8 @@
// Calculate a section intra ratio used in setting max loop filter.
if (cpi->common.frame_type != KEY_FRAME) {
- twopass->section_intra_rating =
- calculate_section_intra_ratio(start_pos, twopass->stats_in_end,
- rc->baseline_gf_interval);
+ twopass->section_intra_rating = calculate_section_intra_ratio(
+ start_pos, twopass->stats_in_end, rc->baseline_gf_interval);
}
if (oxcf->resize_mode == RESIZE_DYNAMIC) {
@@ -2060,7 +1984,7 @@
int is_viable_kf = 0;
double pcnt_intra = 1.0 - this_frame->pcnt_inter;
double modified_pcnt_inter =
- this_frame->pcnt_inter - this_frame->pcnt_neutral;
+ this_frame->pcnt_inter - this_frame->pcnt_neutral;
// Does the frame satisfy the primary criteria of a key frame?
// See above for an explanation of the test criteria.
@@ -2072,15 +1996,15 @@
(pcnt_intra > (INTRA_VS_INTER_THRESH * modified_pcnt_inter)) &&
((this_frame->intra_error /
DOUBLE_DIVIDE_CHECK(this_frame->coded_error)) <
- KF_II_ERR_THRESHOLD) &&
+ KF_II_ERR_THRESHOLD) &&
((fabs(last_frame->coded_error - this_frame->coded_error) /
- DOUBLE_DIVIDE_CHECK(this_frame->coded_error) >
+ DOUBLE_DIVIDE_CHECK(this_frame->coded_error) >
ERR_CHANGE_THRESHOLD) ||
(fabs(last_frame->intra_error - this_frame->intra_error) /
- DOUBLE_DIVIDE_CHECK(this_frame->intra_error) >
+ DOUBLE_DIVIDE_CHECK(this_frame->intra_error) >
ERR_CHANGE_THRESHOLD) ||
((next_frame->intra_error /
- DOUBLE_DIVIDE_CHECK(next_frame->coded_error)) >
+ DOUBLE_DIVIDE_CHECK(next_frame->coded_error)) >
II_IMPROVEMENT_THRESHOLD))))) {
int i;
const FIRSTPASS_STATS *start_pos = twopass->stats_in;
@@ -2094,8 +2018,7 @@
double next_iiratio = (BOOST_FACTOR * local_next_frame.intra_error /
DOUBLE_DIVIDE_CHECK(local_next_frame.coded_error));
- if (next_iiratio > KF_II_MAX)
- next_iiratio = KF_II_MAX;
+ if (next_iiratio > KF_II_MAX) next_iiratio = KF_II_MAX;
// Cumulative effect of decay in prediction quality.
if (local_next_frame.pcnt_inter > 0.85)
@@ -2107,10 +2030,9 @@
boost_score += (decay_accumulator * next_iiratio);
// Test various breakout clauses.
- if ((local_next_frame.pcnt_inter < 0.05) ||
- (next_iiratio < 1.5) ||
- (((local_next_frame.pcnt_inter -
- local_next_frame.pcnt_neutral) < 0.20) &&
+ if ((local_next_frame.pcnt_inter < 0.05) || (next_iiratio < 1.5) ||
+ (((local_next_frame.pcnt_inter - local_next_frame.pcnt_neutral) <
+ 0.20) &&
(next_iiratio < 3.0)) ||
((boost_score - old_boost_score) < 3.0) ||
(local_next_frame.intra_error < 200)) {
@@ -2120,8 +2042,7 @@
old_boost_score = boost_score;
// Get the next frame details
- if (EOF == input_stats(twopass, &local_next_frame))
- break;
+ if (EOF == input_stats(twopass, &local_next_frame)) break;
}
// If there is tolerable prediction for at least the next 3 frames then
@@ -2157,7 +2078,7 @@
double boost_score = 0.0;
double kf_mod_err = 0.0;
double kf_group_err = 0.0;
- double recent_loop_decay[8] = {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0};
+ double recent_loop_decay[8] = { 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 };
vp10_zero(next_frame);
@@ -2212,8 +2133,7 @@
// quality since the last GF or KF.
recent_loop_decay[i % 8] = loop_decay_rate;
decay_accumulator = 1.0;
- for (j = 0; j < 8; ++j)
- decay_accumulator *= recent_loop_decay[j];
+ for (j = 0; j < 8; ++j) decay_accumulator *= recent_loop_decay[j];
// Special check for transition or high motion followed by a
// static scene.
@@ -2226,8 +2146,7 @@
// If we don't have a real key frame within the next two
// key_freq intervals then break out of the loop.
- if (rc->frames_to_key >= 2 * cpi->oxcf.key_freq)
- break;
+ if (rc->frames_to_key >= 2 * cpi->oxcf.key_freq) break;
} else {
++rc->frames_to_key;
}
@@ -2238,8 +2157,7 @@
// We already breakout of the loop above at 2x max.
// This code centers the extra kf if the actual natural interval
// is between 1x and 2x.
- if (cpi->oxcf.auto_key &&
- rc->frames_to_key > cpi->oxcf.key_freq) {
+ if (cpi->oxcf.auto_key && rc->frames_to_key > cpi->oxcf.key_freq) {
FIRSTPASS_STATS tmp_frame = first_frame;
rc->frames_to_key /= 2;
@@ -2278,8 +2196,8 @@
// Default allocation based on bits left and relative
// complexity of the section.
- twopass->kf_group_bits = (int64_t)(twopass->bits_left *
- (kf_group_err / twopass->modified_error_left));
+ twopass->kf_group_bits = (int64_t)(
+ twopass->bits_left * (kf_group_err / twopass->modified_error_left));
// Clip based on maximum per frame rate defined by the user.
max_grp_bits = (int64_t)max_bits * (int64_t)rc->frames_to_key;
@@ -2298,23 +2216,22 @@
decay_accumulator = 1.0;
boost_score = 0.0;
for (i = 0; i < (rc->frames_to_key - 1); ++i) {
- if (EOF == input_stats(twopass, &next_frame))
- break;
+ if (EOF == input_stats(twopass, &next_frame)) break;
// Monitor for static sections.
- zero_motion_accumulator = VPXMIN(
- zero_motion_accumulator, get_zero_motion_factor(cpi, &next_frame));
+ zero_motion_accumulator = VPXMIN(zero_motion_accumulator,
+ get_zero_motion_factor(cpi, &next_frame));
// Not all frames in the group are necessarily used in calculating boost.
if ((i <= rc->max_gf_interval) ||
((i <= (rc->max_gf_interval * 4)) && (decay_accumulator > 0.5))) {
const double frame_boost =
- calc_frame_boost(cpi, this_frame, 0, KF_MAX_BOOST);
+ calc_frame_boost(cpi, this_frame, 0, KF_MAX_BOOST);
// How fast is prediction quality decaying.
if (!detect_flash(twopass, 0)) {
const double loop_decay_rate =
- get_prediction_decay_rate(cpi, &next_frame);
+ get_prediction_decay_rate(cpi, &next_frame);
decay_accumulator *= loop_decay_rate;
decay_accumulator = VPXMAX(decay_accumulator, MIN_DECAY_FACTOR);
av_decay_accumulator += decay_accumulator;
@@ -2331,9 +2248,8 @@
twopass->kf_zeromotion_pct = (int)(zero_motion_accumulator * 100.0);
// Calculate a section intra ratio used in setting max loop filter.
- twopass->section_intra_rating =
- calculate_section_intra_ratio(start_position, twopass->stats_in_end,
- rc->frames_to_key);
+ twopass->section_intra_rating = calculate_section_intra_ratio(
+ start_position, twopass->stats_in_end, rc->frames_to_key);
// Apply various clamps for min and max boost
rc->kf_boost = (int)(av_decay_accumulator * boost_score);
@@ -2341,15 +2257,15 @@
rc->kf_boost = VPXMAX(rc->kf_boost, MIN_KF_BOOST);
// Work out how many bits to allocate for the key frame itself.
- kf_bits = calculate_boost_bits((rc->frames_to_key - 1),
- rc->kf_boost, twopass->kf_group_bits);
+ kf_bits = calculate_boost_bits((rc->frames_to_key - 1), rc->kf_boost,
+ twopass->kf_group_bits);
// Work out the fraction of the kf group bits reserved for the inter frames
// within the group after discounting the bits for the kf itself.
if (twopass->kf_group_bits) {
twopass->kfgroup_inter_fraction =
- (double)(twopass->kf_group_bits - kf_bits) /
- (double)twopass->kf_group_bits;
+ (double)(twopass->kf_group_bits - kf_bits) /
+ (double)twopass->kf_group_bits;
} else {
twopass->kfgroup_inter_fraction = 1.0;
}
@@ -2407,9 +2323,7 @@
cpi->refresh_golden_frame = 0;
cpi->refresh_alt_ref_frame = 1;
break;
- default:
- assert(0);
- break;
+ default: assert(0); break;
}
}
@@ -2421,13 +2335,15 @@
const TWO_PASS *const twopass = &cpi->twopass;
return (!frame_is_intra_only(&cpi->common) &&
- twopass->stats_in - 2 > twopass->stats_in_start &&
- twopass->stats_in < twopass->stats_in_end &&
- (twopass->stats_in - 1)->pcnt_inter - (twopass->stats_in - 1)->pcnt_motion
- == 1 &&
- (twopass->stats_in - 2)->pcnt_inter - (twopass->stats_in - 2)->pcnt_motion
- == 1 &&
- twopass->stats_in->pcnt_inter - twopass->stats_in->pcnt_motion == 1);
+ twopass->stats_in - 2 > twopass->stats_in_start &&
+ twopass->stats_in < twopass->stats_in_end &&
+ (twopass->stats_in - 1)->pcnt_inter -
+ (twopass->stats_in - 1)->pcnt_motion ==
+ 1 &&
+ (twopass->stats_in - 2)->pcnt_inter -
+ (twopass->stats_in - 2)->pcnt_motion ==
+ 1 &&
+ twopass->stats_in->pcnt_inter - twopass->stats_in->pcnt_motion == 1);
}
void vp10_rc_get_second_pass_params(VP10_COMP *cpi) {
@@ -2440,11 +2356,9 @@
int target_rate;
- frames_left = (int)(twopass->total_stats.count -
- cm->current_video_frame);
+ frames_left = (int)(twopass->total_stats.count - cm->current_video_frame);
- if (!twopass->stats_in)
- return;
+ if (!twopass->stats_in) return;
// If this is an arf frame then we dont want to read the stats file or
// advance the input pointer as we already have what we need.
@@ -2472,20 +2386,19 @@
twopass->active_worst_quality = cpi->oxcf.cq_level;
} else if (cm->current_video_frame == 0) {
// Special case code for first frame.
- const int section_target_bandwidth = (int)(twopass->bits_left /
- frames_left);
+ const int section_target_bandwidth =
+ (int)(twopass->bits_left / frames_left);
const double section_length = twopass->total_left_stats.count;
const double section_error =
- twopass->total_left_stats.coded_error / section_length;
+ twopass->total_left_stats.coded_error / section_length;
const double section_intra_skip =
- twopass->total_left_stats.intra_skip_pct / section_length;
+ twopass->total_left_stats.intra_skip_pct / section_length;
const double section_inactive_zone =
- (twopass->total_left_stats.inactive_zone_rows * 2) /
- ((double)cm->mb_rows * section_length);
- const int tmp_q =
- get_twopass_worst_quality(cpi, section_error,
- section_intra_skip + section_inactive_zone,
- section_target_bandwidth, DEFAULT_GRP_WEIGHT);
+ (twopass->total_left_stats.inactive_zone_rows * 2) /
+ ((double)cm->mb_rows * section_length);
+ const int tmp_q = get_twopass_worst_quality(
+ cpi, section_error, section_intra_skip + section_inactive_zone,
+ section_target_bandwidth, DEFAULT_GRP_WEIGHT);
twopass->active_worst_quality = tmp_q;
twopass->baseline_active_worst_quality = tmp_q;
@@ -2497,8 +2410,7 @@
rc->avg_frame_qindex[KEY_FRAME] = rc->last_q[KEY_FRAME];
}
vp10_zero(this_frame);
- if (EOF == input_stats(twopass, &this_frame))
- return;
+ if (EOF == input_stats(twopass, &this_frame)) return;
// Set the frame content type flag.
if (this_frame.intra_skip_pct >= FC_ANIMATION_THRESH)
@@ -2528,9 +2440,9 @@
FILE *fpfile;
fpfile = fopen("arf.stt", "a");
++arf_count;
- fprintf(fpfile, "%10d %10ld %10d %10d %10ld\n",
- cm->current_video_frame, rc->frames_till_gf_update_due,
- rc->kf_boost, arf_count, rc->gfu_boost);
+ fprintf(fpfile, "%10d %10ld %10d %10d %10ld\n", cm->current_video_frame,
+ rc->frames_till_gf_update_due, rc->kf_boost, arf_count,
+ rc->gfu_boost);
fclose(fpfile);
}
@@ -2555,11 +2467,12 @@
{
const int num_mbs = (cpi->oxcf.resize_mode != RESIZE_NONE)
- ? cpi->initial_mbs : cpi->common.MBs;
+ ? cpi->initial_mbs
+ : cpi->common.MBs;
// The multiplication by 256 reverses a scaling factor of (>> 8)
// applied when combining MB error values for the frame.
twopass->mb_av_energy =
- log(((this_frame.intra_error * 256.0) / num_mbs) + 1.0);
+ log(((this_frame.intra_error * 256.0) / num_mbs) + 1.0);
}
// Update the total stats remaining structure.
@@ -2585,7 +2498,7 @@
// Calculate the pct rc error.
if (rc->total_actual_bits) {
rc->rate_error_estimate =
- (int)((rc->vbr_bits_off_target * 100) / rc->total_actual_bits);
+ (int)((rc->vbr_bits_off_target * 100) / rc->total_actual_bits);
rc->rate_error_estimate = clamp(rc->rate_error_estimate, -100, 100);
} else {
rc->rate_error_estimate = 0;
@@ -2605,7 +2518,7 @@
(cpi->twopass.gf_zeromotion_pct < VLOW_MOTION_THRESHOLD) &&
!cpi->rc.is_src_frame_alt_ref) {
const int maxq_adj_limit =
- rc->worst_quality - twopass->active_worst_quality;
+ rc->worst_quality - twopass->active_worst_quality;
const int minq_adj_limit =
(cpi->oxcf.rc_mode == VPX_CQ ? MINQ_ADJ_LIMIT_CQ : MINQ_ADJ_LIMIT);
@@ -2614,7 +2527,7 @@
--twopass->extend_maxq;
if (rc->rolling_target_bits >= rc->rolling_actual_bits)
++twopass->extend_minq;
- // Overshoot.
+ // Overshoot.
} else if (rc->rate_error_estimate < -cpi->oxcf.over_shoot_pct) {
--twopass->extend_minq;
if (rc->rolling_target_bits < rc->rolling_actual_bits)
@@ -2643,14 +2556,14 @@
int fast_extra_thresh = rc->base_frame_target / HIGH_UNDERSHOOT_RATIO;
if (rc->projected_frame_size < fast_extra_thresh) {
rc->vbr_bits_off_target_fast +=
- fast_extra_thresh - rc->projected_frame_size;
+ fast_extra_thresh - rc->projected_frame_size;
rc->vbr_bits_off_target_fast =
- VPXMIN(rc->vbr_bits_off_target_fast, (4 * rc->avg_frame_bandwidth));
+ VPXMIN(rc->vbr_bits_off_target_fast, (4 * rc->avg_frame_bandwidth));
// Fast adaptation of minQ if necessary to use up the extra bits.
if (rc->avg_frame_bandwidth) {
twopass->extend_minq_fast =
- (int)(rc->vbr_bits_off_target_fast * 8 / rc->avg_frame_bandwidth);
+ (int)(rc->vbr_bits_off_target_fast * 8 / rc->avg_frame_bandwidth);
}
twopass->extend_minq_fast = VPXMIN(
twopass->extend_minq_fast, minq_adj_limit - twopass->extend_minq);
diff --git a/vp10/encoder/firstpass.h b/vp10/encoder/firstpass.h
index 68a8887..2fbd3f7 100644
--- a/vp10/encoder/firstpass.h
+++ b/vp10/encoder/firstpass.h
@@ -155,9 +155,8 @@
void vp10_init_subsampling(struct VP10_COMP *cpi);
-void vp10_calculate_coded_size(struct VP10_COMP *cpi,
- int *scaled_frame_width,
- int *scaled_frame_height);
+void vp10_calculate_coded_size(struct VP10_COMP *cpi, int *scaled_frame_width,
+ int *scaled_frame_height);
#ifdef __cplusplus
} // extern "C"
diff --git a/vp10/encoder/lookahead.c b/vp10/encoder/lookahead.c
index 6289fbc..19beee3 100644
--- a/vp10/encoder/lookahead.c
+++ b/vp10/encoder/lookahead.c
@@ -25,35 +25,31 @@
struct lookahead_entry *buf = ctx->buf + index;
assert(index < ctx->max_sz);
- if (++index >= ctx->max_sz)
- index -= ctx->max_sz;
+ if (++index >= ctx->max_sz) index -= ctx->max_sz;
*idx = index;
return buf;
}
-
void vp10_lookahead_destroy(struct lookahead_ctx *ctx) {
if (ctx) {
if (ctx->buf) {
unsigned int i;
- for (i = 0; i < ctx->max_sz; i++)
- vpx_free_frame_buffer(&ctx->buf[i].img);
+ for (i = 0; i < ctx->max_sz; i++) vpx_free_frame_buffer(&ctx->buf[i].img);
free(ctx->buf);
}
free(ctx);
}
}
-
struct lookahead_ctx *vp10_lookahead_init(unsigned int width,
- unsigned int height,
- unsigned int subsampling_x,
- unsigned int subsampling_y,
+ unsigned int height,
+ unsigned int subsampling_x,
+ unsigned int subsampling_y,
#if CONFIG_VPX_HIGHBITDEPTH
- int use_highbitdepth,
+ int use_highbitdepth,
#endif
- unsigned int depth) {
+ unsigned int depth) {
struct lookahead_ctx *ctx = NULL;
// Clamp the lookahead queue depth
@@ -69,32 +65,30 @@
unsigned int i;
ctx->max_sz = depth;
ctx->buf = calloc(depth, sizeof(*ctx->buf));
- if (!ctx->buf)
- goto bail;
+ if (!ctx->buf) goto bail;
for (i = 0; i < depth; i++)
- if (vpx_alloc_frame_buffer(&ctx->buf[i].img,
- width, height, subsampling_x, subsampling_y,
+ if (vpx_alloc_frame_buffer(
+ &ctx->buf[i].img, width, height, subsampling_x, subsampling_y,
#if CONFIG_VPX_HIGHBITDEPTH
- use_highbitdepth,
+ use_highbitdepth,
#endif
- VPX_ENC_BORDER_IN_PIXELS,
- legacy_byte_alignment))
+ VPX_ENC_BORDER_IN_PIXELS, legacy_byte_alignment))
goto bail;
}
return ctx;
- bail:
+bail:
vp10_lookahead_destroy(ctx);
return NULL;
}
#define USE_PARTIAL_COPY 0
-int vp10_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG *src,
- int64_t ts_start, int64_t ts_end,
+int vp10_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG *src,
+ int64_t ts_start, int64_t ts_end,
#if CONFIG_VPX_HIGHBITDEPTH
- int use_highbitdepth,
+ int use_highbitdepth,
#endif
- unsigned int flags) {
+ unsigned int flags) {
struct lookahead_entry *buf;
#if USE_PARTIAL_COPY
int row, col, active_end;
@@ -109,8 +103,7 @@
int subsampling_y = src->subsampling_y;
int larger_dimensions, new_dimensions;
- if (ctx->sz + 1 + MAX_PRE_FRAMES > ctx->max_sz)
- return 1;
+ if (ctx->sz + 1 + MAX_PRE_FRAMES > ctx->max_sz) return 1;
ctx->sz++;
buf = pop(ctx, &ctx->write_idx);
@@ -118,8 +111,7 @@
height != buf->img.y_crop_height ||
uv_width != buf->img.uv_crop_width ||
uv_height != buf->img.uv_crop_height;
- larger_dimensions = width > buf->img.y_width ||
- height > buf->img.y_height ||
+ larger_dimensions = width > buf->img.y_width || height > buf->img.y_height ||
uv_width > buf->img.uv_width ||
uv_height > buf->img.uv_height;
assert(!larger_dimensions || new_dimensions);
@@ -139,27 +131,22 @@
while (1) {
// Find the first active macroblock in this row.
for (; col < mb_cols; ++col) {
- if (active_map[col])
- break;
+ if (active_map[col]) break;
}
// No more active macroblock in this row.
- if (col == mb_cols)
- break;
+ if (col == mb_cols) break;
// Find the end of active region in this row.
active_end = col;
for (; active_end < mb_cols; ++active_end) {
- if (!active_map[active_end])
- break;
+ if (!active_map[active_end]) break;
}
// Only copy this active region.
- vp10_copy_and_extend_frame_with_rect(src, &buf->img,
- row << 4,
- col << 4, 16,
- (active_end - col) << 4);
+ vp10_copy_and_extend_frame_with_rect(src, &buf->img, row << 4, col << 4,
+ 16, (active_end - col) << 4);
// Start again from the end of this active region.
col = active_end;
@@ -172,14 +159,13 @@
if (larger_dimensions) {
YV12_BUFFER_CONFIG new_img;
memset(&new_img, 0, sizeof(new_img));
- if (vpx_alloc_frame_buffer(&new_img,
- width, height, subsampling_x, subsampling_y,
+ if (vpx_alloc_frame_buffer(&new_img, width, height, subsampling_x,
+ subsampling_y,
#if CONFIG_VPX_HIGHBITDEPTH
use_highbitdepth,
#endif
- VPX_ENC_BORDER_IN_PIXELS,
- 0))
- return 1;
+ VPX_ENC_BORDER_IN_PIXELS, 0))
+ return 1;
vpx_free_frame_buffer(&buf->img);
buf->img = new_img;
} else if (new_dimensions) {
@@ -202,9 +188,8 @@
return 0;
}
-
struct lookahead_entry *vp10_lookahead_pop(struct lookahead_ctx *ctx,
- int drain) {
+ int drain) {
struct lookahead_entry *buf = NULL;
if (ctx->sz && (drain || ctx->sz == ctx->max_sz - MAX_PRE_FRAMES)) {
@@ -214,25 +199,22 @@
return buf;
}
-
struct lookahead_entry *vp10_lookahead_peek(struct lookahead_ctx *ctx,
- int index) {
+ int index) {
struct lookahead_entry *buf = NULL;
if (index >= 0) {
// Forward peek
if (index < (int)ctx->sz) {
index += ctx->read_idx;
- if (index >= (int)ctx->max_sz)
- index -= ctx->max_sz;
+ if (index >= (int)ctx->max_sz) index -= ctx->max_sz;
buf = ctx->buf + index;
}
} else if (index < 0) {
// Backward peek
if (-index <= MAX_PRE_FRAMES) {
index += ctx->read_idx;
- if (index < 0)
- index += ctx->max_sz;
+ if (index < 0) index += ctx->max_sz;
buf = ctx->buf + index;
}
}
@@ -240,6 +222,4 @@
return buf;
}
-unsigned int vp10_lookahead_depth(struct lookahead_ctx *ctx) {
- return ctx->sz;
-}
+unsigned int vp10_lookahead_depth(struct lookahead_ctx *ctx) { return ctx->sz; }
diff --git a/vp10/encoder/lookahead.h b/vp10/encoder/lookahead.h
index fb28c32..4b5fe7f 100644
--- a/vp10/encoder/lookahead.h
+++ b/vp10/encoder/lookahead.h
@@ -21,10 +21,10 @@
#define MAX_LAG_BUFFERS 25
struct lookahead_entry {
- YV12_BUFFER_CONFIG img;
- int64_t ts_start;
- int64_t ts_end;
- unsigned int flags;
+ YV12_BUFFER_CONFIG img;
+ int64_t ts_start;
+ int64_t ts_end;
+ unsigned int flags;
};
// The max of past frames we want to keep in the queue.
@@ -44,20 +44,18 @@
* may be done when buffers are enqueued.
*/
struct lookahead_ctx *vp10_lookahead_init(unsigned int width,
- unsigned int height,
- unsigned int subsampling_x,
- unsigned int subsampling_y,
+ unsigned int height,
+ unsigned int subsampling_x,
+ unsigned int subsampling_y,
#if CONFIG_VPX_HIGHBITDEPTH
- int use_highbitdepth,
+ int use_highbitdepth,
#endif
- unsigned int depth);
-
+ unsigned int depth);
/**\brief Destroys the lookahead stage
*/
void vp10_lookahead_destroy(struct lookahead_ctx *ctx);
-
/**\brief Enqueue a source buffer
*
* This function will copy the source image into a new framebuffer with
@@ -74,12 +72,11 @@
* \param[in] active_map Map that specifies which macroblock is active
*/
int vp10_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG *src,
- int64_t ts_start, int64_t ts_end,
+ int64_t ts_start, int64_t ts_end,
#if CONFIG_VPX_HIGHBITDEPTH
- int use_highbitdepth,
+ int use_highbitdepth,
#endif
- unsigned int flags);
-
+ unsigned int flags);
/**\brief Get the next source buffer to encode
*
@@ -92,8 +89,7 @@
* \retval NULL, if drain not set and queue not of the configured depth
*/
struct lookahead_entry *vp10_lookahead_pop(struct lookahead_ctx *ctx,
- int drain);
-
+ int drain);
/**\brief Get a future source buffer to encode
*
@@ -103,8 +99,7 @@
* \retval NULL, if no buffer exists at the specified index
*/
struct lookahead_entry *vp10_lookahead_peek(struct lookahead_ctx *ctx,
- int index);
-
+ int index);
/**\brief Get the number of frames currently in the lookahead queue
*
diff --git a/vp10/encoder/mbgraph.c b/vp10/encoder/mbgraph.c
index 98c532e..1579bf4 100644
--- a/vp10/encoder/mbgraph.c
+++ b/vp10/encoder/mbgraph.c
@@ -22,11 +22,8 @@
#include "vp10/common/reconinter.h"
#include "vp10/common/reconintra.h"
-
-static unsigned int do_16x16_motion_iteration(VP10_COMP *cpi,
- const MV *ref_mv,
- MV *dst_mv,
- int mb_row,
+static unsigned int do_16x16_motion_iteration(VP10_COMP *cpi, const MV *ref_mv,
+ MV *dst_mv, int mb_row,
int mb_col) {
MACROBLOCK *const x = &cpi->td.mb;
MACROBLOCKD *const xd = &x->e_mbd;
@@ -51,8 +48,7 @@
/*cpi->sf.search_method == HEX*/
vp10_hex_search(x, &ref_full, step_param, x->errorperbit, 0,
- cond_cost_list(cpi, cost_list),
- &v_fn_ptr, 0, ref_mv, dst_mv);
+ cond_cost_list(cpi, cost_list), &v_fn_ptr, 0, ref_mv, dst_mv);
// Try sub-pixel MC
// if (bestsme > error_thresh && bestsme < INT_MAX)
@@ -62,9 +58,8 @@
cpi->find_fractional_mv_step(
x, dst_mv, ref_mv, cpi->common.allow_high_precision_mv, x->errorperbit,
&v_fn_ptr, 0, mv_sf->subpel_iters_per_step,
- cond_cost_list(cpi, cost_list),
- NULL, NULL,
- &distortion, &sse, NULL, 0, 0);
+ cond_cost_list(cpi, cost_list), NULL, NULL, &distortion, &sse, NULL, 0,
+ 0);
}
xd->mi[0]->mbmi.mode = NEWMV;
@@ -107,10 +102,10 @@
// based search as well.
if (ref_mv->row != 0 || ref_mv->col != 0) {
unsigned int tmp_err;
- MV zero_ref_mv = {0, 0}, tmp_mv;
+ MV zero_ref_mv = { 0, 0 }, tmp_mv;
- tmp_err = do_16x16_motion_iteration(cpi, &zero_ref_mv, &tmp_mv,
- mb_row, mb_col);
+ tmp_err =
+ do_16x16_motion_iteration(cpi, &zero_ref_mv, &tmp_mv, mb_row, mb_col);
if (tmp_err < err) {
dst_mv->as_mv = tmp_mv;
err = tmp_err;
@@ -135,7 +130,7 @@
return err;
}
static int find_best_16x16_intra(VP10_COMP *cpi, PREDICTION_MODE *pbest_mode) {
- MACROBLOCK *const x = &cpi->td.mb;
+ MACROBLOCK *const x = &cpi->td.mb;
MACROBLOCKD *const xd = &x->e_mbd;
PREDICTION_MODE best_mode = -1, mode;
unsigned int best_err = INT_MAX;
@@ -146,38 +141,30 @@
unsigned int err;
xd->mi[0]->mbmi.mode = mode;
- vp10_predict_intra_block(xd, 2, 2, TX_16X16, mode,
- x->plane[0].src.buf, x->plane[0].src.stride,
- xd->plane[0].dst.buf, xd->plane[0].dst.stride,
- 0, 0, 0);
+ vp10_predict_intra_block(xd, 2, 2, TX_16X16, mode, x->plane[0].src.buf,
+ x->plane[0].src.stride, xd->plane[0].dst.buf,
+ xd->plane[0].dst.stride, 0, 0, 0);
err = vpx_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride,
xd->plane[0].dst.buf, xd->plane[0].dst.stride);
// find best
if (err < best_err) {
- best_err = err;
+ best_err = err;
best_mode = mode;
}
}
- if (pbest_mode)
- *pbest_mode = best_mode;
+ if (pbest_mode) *pbest_mode = best_mode;
return best_err;
}
-static void update_mbgraph_mb_stats
-(
- VP10_COMP *cpi,
- MBGRAPH_MB_STATS *stats,
- YV12_BUFFER_CONFIG *buf,
- int mb_y_offset,
- YV12_BUFFER_CONFIG *golden_ref,
- const MV *prev_golden_ref_mv,
- YV12_BUFFER_CONFIG *alt_ref,
- int mb_row,
- int mb_col
-) {
+static void update_mbgraph_mb_stats(VP10_COMP *cpi, MBGRAPH_MB_STATS *stats,
+ YV12_BUFFER_CONFIG *buf, int mb_y_offset,
+ YV12_BUFFER_CONFIG *golden_ref,
+ const MV *prev_golden_ref_mv,
+ YV12_BUFFER_CONFIG *alt_ref, int mb_row,
+ int mb_col) {
MACROBLOCK *const x = &cpi->td.mb;
MACROBLOCKD *const xd = &x->e_mbd;
int intra_error;
@@ -191,10 +178,8 @@
xd->plane[0].dst.stride = get_frame_new_buffer(cm)->y_stride;
// do intra 16x16 prediction
- intra_error = find_best_16x16_intra(cpi,
- &stats->ref[INTRA_FRAME].m.mode);
- if (intra_error <= 0)
- intra_error = 1;
+ intra_error = find_best_16x16_intra(cpi, &stats->ref[INTRA_FRAME].m.mode);
+ if (intra_error <= 0) intra_error = 1;
stats->ref[INTRA_FRAME].err = intra_error;
// Golden frame MV search, if it exists and is different than last frame
@@ -202,10 +187,9 @@
int g_motion_error;
xd->plane[0].pre[0].buf = golden_ref->y_buffer + mb_y_offset;
xd->plane[0].pre[0].stride = golden_ref->y_stride;
- g_motion_error = do_16x16_motion_search(cpi,
- prev_golden_ref_mv,
- &stats->ref[GOLDEN_FRAME].m.mv,
- mb_row, mb_col);
+ g_motion_error =
+ do_16x16_motion_search(cpi, prev_golden_ref_mv,
+ &stats->ref[GOLDEN_FRAME].m.mv, mb_row, mb_col);
stats->ref[GOLDEN_FRAME].err = g_motion_error;
} else {
stats->ref[GOLDEN_FRAME].err = INT_MAX;
@@ -218,8 +202,8 @@
int a_motion_error;
xd->plane[0].pre[0].buf = alt_ref->y_buffer + mb_y_offset;
xd->plane[0].pre[0].stride = alt_ref->y_stride;
- a_motion_error = do_16x16_zerozero_search(cpi,
- &stats->ref[ALTREF_FRAME].m.mv);
+ a_motion_error =
+ do_16x16_zerozero_search(cpi, &stats->ref[ALTREF_FRAME].m.mv);
stats->ref[ALTREF_FRAME].err = a_motion_error;
} else {
@@ -239,17 +223,17 @@
int mb_col, mb_row, offset = 0;
int mb_y_offset = 0, arf_y_offset = 0, gld_y_offset = 0;
- MV gld_top_mv = {0, 0};
+ MV gld_top_mv = { 0, 0 };
MODE_INFO mi_local;
vp10_zero(mi_local);
// Set up limit values for motion vectors to prevent them extending outside
// the UMV borders.
- x->mv_row_min = -BORDER_MV_PIXELS_B16;
- x->mv_row_max = (cm->mb_rows - 1) * 8 + BORDER_MV_PIXELS_B16;
- xd->up_available = 0;
- xd->plane[0].dst.stride = buf->y_stride;
- xd->plane[0].pre[0].stride = buf->y_stride;
+ x->mv_row_min = -BORDER_MV_PIXELS_B16;
+ x->mv_row_max = (cm->mb_rows - 1) * 8 + BORDER_MV_PIXELS_B16;
+ xd->up_available = 0;
+ xd->plane[0].dst.stride = buf->y_stride;
+ xd->plane[0].pre[0].stride = buf->y_stride;
xd->plane[1].dst.stride = buf->uv_stride;
xd->mi[0] = &mi_local;
mi_local.mbmi.sb_type = BLOCK_16X16;
@@ -258,41 +242,39 @@
for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) {
MV gld_left_mv = gld_top_mv;
- int mb_y_in_offset = mb_y_offset;
+ int mb_y_in_offset = mb_y_offset;
int arf_y_in_offset = arf_y_offset;
int gld_y_in_offset = gld_y_offset;
// Set up limit values for motion vectors to prevent them extending outside
// the UMV borders.
- x->mv_col_min = -BORDER_MV_PIXELS_B16;
- x->mv_col_max = (cm->mb_cols - 1) * 8 + BORDER_MV_PIXELS_B16;
+ x->mv_col_min = -BORDER_MV_PIXELS_B16;
+ x->mv_col_max = (cm->mb_cols - 1) * 8 + BORDER_MV_PIXELS_B16;
xd->left_available = 0;
for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
MBGRAPH_MB_STATS *mb_stats = &stats->mb_stats[offset + mb_col];
- update_mbgraph_mb_stats(cpi, mb_stats, buf, mb_y_in_offset,
- golden_ref, &gld_left_mv, alt_ref,
- mb_row, mb_col);
+ update_mbgraph_mb_stats(cpi, mb_stats, buf, mb_y_in_offset, golden_ref,
+ &gld_left_mv, alt_ref, mb_row, mb_col);
gld_left_mv = mb_stats->ref[GOLDEN_FRAME].m.mv.as_mv;
if (mb_col == 0) {
gld_top_mv = gld_left_mv;
}
xd->left_available = 1;
- mb_y_in_offset += 16;
- gld_y_in_offset += 16;
- arf_y_in_offset += 16;
- x->mv_col_min -= 16;
- x->mv_col_max -= 16;
+ mb_y_in_offset += 16;
+ gld_y_in_offset += 16;
+ arf_y_in_offset += 16;
+ x->mv_col_min -= 16;
+ x->mv_col_max -= 16;
}
xd->up_available = 1;
- mb_y_offset += buf->y_stride * 16;
- gld_y_offset += golden_ref->y_stride * 16;
- if (alt_ref)
- arf_y_offset += alt_ref->y_stride * 16;
- x->mv_row_min -= 16;
- x->mv_row_max -= 16;
- offset += cm->mb_cols;
+ mb_y_offset += buf->y_stride * 16;
+ gld_y_offset += golden_ref->y_stride * 16;
+ if (alt_ref) arf_y_offset += alt_ref->y_stride * 16;
+ x->mv_row_min -= 16;
+ x->mv_row_max -= 16;
+ offset += cm->mb_cols;
}
}
@@ -306,9 +288,9 @@
int *arf_not_zz;
- CHECK_MEM_ERROR(cm, arf_not_zz,
- vpx_calloc(cm->mb_rows * cm->mb_cols * sizeof(*arf_not_zz),
- 1));
+ CHECK_MEM_ERROR(
+ cm, arf_not_zz,
+ vpx_calloc(cm->mb_rows * cm->mb_cols * sizeof(*arf_not_zz), 1));
// We are not interested in results beyond the alt ref itself.
if (n_frames > cpi->rc.frames_till_gf_update_due)
@@ -324,12 +306,11 @@
MBGRAPH_MB_STATS *mb_stats = &frame_stats->mb_stats[offset + mb_col];
int altref_err = mb_stats->ref[ALTREF_FRAME].err;
- int intra_err = mb_stats->ref[INTRA_FRAME ].err;
+ int intra_err = mb_stats->ref[INTRA_FRAME].err;
int golden_err = mb_stats->ref[GOLDEN_FRAME].err;
// Test for altref vs intra and gf and that its mv was 0,0.
- if (altref_err > 1000 ||
- altref_err > intra_err ||
+ if (altref_err > 1000 || altref_err > intra_err ||
altref_err > golden_err) {
arf_not_zz[offset + mb_col]++;
}
@@ -384,11 +365,9 @@
// we need to look ahead beyond where the ARF transitions into
// being a GF - so exit if we don't look ahead beyond that
- if (n_frames <= cpi->rc.frames_till_gf_update_due)
- return;
+ if (n_frames <= cpi->rc.frames_till_gf_update_due) return;
- if (n_frames > MAX_LAG_BUFFERS)
- n_frames = MAX_LAG_BUFFERS;
+ if (n_frames > MAX_LAG_BUFFERS) n_frames = MAX_LAG_BUFFERS;
cpi->mbgraph_n_frames = n_frames;
for (i = 0; i < n_frames; i++) {
@@ -407,8 +386,8 @@
assert(q_cur != NULL);
- update_mbgraph_frame_stats(cpi, frame_stats, &q_cur->img,
- golden_ref, cpi->Source);
+ update_mbgraph_frame_stats(cpi, frame_stats, &q_cur->img, golden_ref,
+ cpi->Source);
}
vpx_clear_system_state();
diff --git a/vp10/encoder/mbgraph.h b/vp10/encoder/mbgraph.h
index 3408464..e2daae5 100644
--- a/vp10/encoder/mbgraph.h
+++ b/vp10/encoder/mbgraph.h
@@ -25,9 +25,7 @@
} ref[MAX_REF_FRAMES];
} MBGRAPH_MB_STATS;
-typedef struct {
- MBGRAPH_MB_STATS *mb_stats;
-} MBGRAPH_FRAME_STATS;
+typedef struct { MBGRAPH_MB_STATS *mb_stats; } MBGRAPH_FRAME_STATS;
struct VP10_COMP;
diff --git a/vp10/encoder/mcomp.c b/vp10/encoder/mcomp.c
index 8d5b662..17cc2f3 100644
--- a/vp10/encoder/mcomp.c
+++ b/vp10/encoder/mcomp.c
@@ -45,14 +45,10 @@
// Get intersection of UMV window and valid MV window to reduce # of checks
// in diamond search.
- if (x->mv_col_min < col_min)
- x->mv_col_min = col_min;
- if (x->mv_col_max > col_max)
- x->mv_col_max = col_max;
- if (x->mv_row_min < row_min)
- x->mv_row_min = row_min;
- if (x->mv_row_max > row_max)
- x->mv_row_max = row_max;
+ if (x->mv_col_min < col_min) x->mv_col_min = col_min;
+ if (x->mv_col_max > col_max) x->mv_col_max = col_max;
+ if (x->mv_row_min < row_min) x->mv_row_min = row_min;
+ if (x->mv_row_max > row_max) x->mv_row_max = row_max;
}
int vp10_init_search_range(int size) {
@@ -60,44 +56,39 @@
// Minimum search size no matter what the passed in value.
size = VPXMAX(16, size);
- while ((size << sr) < MAX_FULL_PEL_VAL)
- sr++;
+ while ((size << sr) < MAX_FULL_PEL_VAL) sr++;
sr = VPXMIN(sr, MAX_MVSEARCH_STEPS - 2);
return sr;
}
-static INLINE int mv_cost(const MV *mv,
- const int *joint_cost, int *const comp_cost[2]) {
- return joint_cost[vp10_get_mv_joint(mv)] +
- comp_cost[0][mv->row] + comp_cost[1][mv->col];
+static INLINE int mv_cost(const MV *mv, const int *joint_cost,
+ int *const comp_cost[2]) {
+ return joint_cost[vp10_get_mv_joint(mv)] + comp_cost[0][mv->row] +
+ comp_cost[1][mv->col];
}
-int vp10_mv_bit_cost(const MV *mv, const MV *ref,
- const int *mvjcost, int *mvcost[2], int weight) {
- const MV diff = { mv->row - ref->row,
- mv->col - ref->col };
+int vp10_mv_bit_cost(const MV *mv, const MV *ref, const int *mvjcost,
+ int *mvcost[2], int weight) {
+ const MV diff = { mv->row - ref->row, mv->col - ref->col };
return ROUND_POWER_OF_TWO(mv_cost(&diff, mvjcost, mvcost) * weight, 7);
}
-static int mv_err_cost(const MV *mv, const MV *ref,
- const int *mvjcost, int *mvcost[2],
- int error_per_bit) {
+static int mv_err_cost(const MV *mv, const MV *ref, const int *mvjcost,
+ int *mvcost[2], int error_per_bit) {
if (mvcost) {
- const MV diff = { mv->row - ref->row,
- mv->col - ref->col };
- return ROUND_POWER_OF_TWO(mv_cost(&diff, mvjcost, mvcost) *
- error_per_bit, 13);
+ const MV diff = { mv->row - ref->row, mv->col - ref->col };
+ return ROUND_POWER_OF_TWO(mv_cost(&diff, mvjcost, mvcost) * error_per_bit,
+ 13);
}
return 0;
}
static int mvsad_err_cost(const MACROBLOCK *x, const MV *mv, const MV *ref,
int error_per_bit) {
- const MV diff = { mv->row - ref->row,
- mv->col - ref->col };
- return ROUND_POWER_OF_TWO(mv_cost(&diff, x->nmvjointsadcost,
- x->nmvsadcost) * error_per_bit, 8);
+ const MV diff = { mv->row - ref->row, mv->col - ref->col };
+ return ROUND_POWER_OF_TWO(
+ mv_cost(&diff, x->nmvjointsadcost, x->nmvsadcost) * error_per_bit, 8);
}
void vp10_init_dsmotion_compensation(search_site_config *cfg, int stride) {
@@ -108,7 +99,7 @@
for (len = MAX_FIRST_STEP; len > 0; len /= 2) {
// Generate offsets for 4 search sites per step.
- const MV ss_mvs[] = {{-len, 0}, {len, 0}, {0, -len}, {0, len}};
+ const MV ss_mvs[] = { { -len, 0 }, { len, 0 }, { 0, -len }, { 0, len } };
int i;
for (i = 0; i < 4; ++i) {
search_site *const ss = &cfg->ss[ss_count++];
@@ -129,10 +120,9 @@
for (len = MAX_FIRST_STEP; len > 0; len /= 2) {
// Generate offsets for 8 search sites per step.
- const MV ss_mvs[8] = {
- {-len, 0 }, {len, 0 }, { 0, -len}, {0, len},
- {-len, -len}, {-len, len}, {len, -len}, {len, len}
- };
+ const MV ss_mvs[8] = { { -len, 0 }, { len, 0 }, { 0, -len },
+ { 0, len }, { -len, -len }, { -len, len },
+ { len, -len }, { len, len } };
int i;
for (i = 0; i < 8; ++i) {
search_site *const ss = &cfg->ss[ss_count++];
@@ -156,172 +146,147 @@
*/
/* estimated cost of a motion vector (r,c) */
-#define MVC(r, c) \
- (mvcost ? \
- ((mvjcost[((r) != rr) * 2 + ((c) != rc)] + \
- mvcost[0][((r) - rr)] + mvcost[1][((c) - rc)]) * \
- error_per_bit + 4096) >> 13 : 0)
-
+#define MVC(r, c) \
+ (mvcost \
+ ? ((mvjcost[((r) != rr) * 2 + ((c) != rc)] + mvcost[0][((r)-rr)] + \
+ mvcost[1][((c)-rc)]) * \
+ error_per_bit + \
+ 4096) >> \
+ 13 \
+ : 0)
// convert motion vector component to offset for sv[a]f calc
-static INLINE int sp(int x) {
- return x & 7;
-}
+static INLINE int sp(int x) { return x & 7; }
static INLINE const uint8_t *pre(const uint8_t *buf, int stride, int r, int c) {
return &buf[(r >> 3) * stride + (c >> 3)];
}
/* checks if (r, c) has better score than previous best */
-#define CHECK_BETTER(v, r, c) \
- if (c >= minc && c <= maxc && r >= minr && r <= maxr) { \
- if (second_pred == NULL) \
- thismse = vfp->svf(pre(y, y_stride, r, c), y_stride, sp(c), sp(r), z, \
- src_stride, &sse); \
- else \
- thismse = vfp->svaf(pre(y, y_stride, r, c), y_stride, sp(c), sp(r), \
- z, src_stride, &sse, second_pred); \
- if ((v = MVC(r, c) + thismse) < besterr) { \
- besterr = v; \
- br = r; \
- bc = c; \
- *distortion = thismse; \
- *sse1 = sse; \
- } \
- } else { \
- v = INT_MAX; \
+#define CHECK_BETTER(v, r, c) \
+ if (c >= minc && c <= maxc && r >= minr && r <= maxr) { \
+ if (second_pred == NULL) \
+ thismse = vfp->svf(pre(y, y_stride, r, c), y_stride, sp(c), sp(r), z, \
+ src_stride, &sse); \
+ else \
+ thismse = vfp->svaf(pre(y, y_stride, r, c), y_stride, sp(c), sp(r), z, \
+ src_stride, &sse, second_pred); \
+ if ((v = MVC(r, c) + thismse) < besterr) { \
+ besterr = v; \
+ br = r; \
+ bc = c; \
+ *distortion = thismse; \
+ *sse1 = sse; \
+ } \
+ } else { \
+ v = INT_MAX; \
}
-#define FIRST_LEVEL_CHECKS \
- { \
- unsigned int left, right, up, down, diag; \
- CHECK_BETTER(left, tr, tc - hstep); \
- CHECK_BETTER(right, tr, tc + hstep); \
- CHECK_BETTER(up, tr - hstep, tc); \
- CHECK_BETTER(down, tr + hstep, tc); \
- whichdir = (left < right ? 0 : 1) + \
- (up < down ? 0 : 2); \
- switch (whichdir) { \
- case 0: \
- CHECK_BETTER(diag, tr - hstep, tc - hstep); \
- break; \
- case 1: \
- CHECK_BETTER(diag, tr - hstep, tc + hstep); \
- break; \
- case 2: \
- CHECK_BETTER(diag, tr + hstep, tc - hstep); \
- break; \
- case 3: \
- CHECK_BETTER(diag, tr + hstep, tc + hstep); \
- break; \
- } \
+#define FIRST_LEVEL_CHECKS \
+ { \
+ unsigned int left, right, up, down, diag; \
+ CHECK_BETTER(left, tr, tc - hstep); \
+ CHECK_BETTER(right, tr, tc + hstep); \
+ CHECK_BETTER(up, tr - hstep, tc); \
+ CHECK_BETTER(down, tr + hstep, tc); \
+ whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2); \
+ switch (whichdir) { \
+ case 0: CHECK_BETTER(diag, tr - hstep, tc - hstep); break; \
+ case 1: CHECK_BETTER(diag, tr - hstep, tc + hstep); break; \
+ case 2: CHECK_BETTER(diag, tr + hstep, tc - hstep); break; \
+ case 3: CHECK_BETTER(diag, tr + hstep, tc + hstep); break; \
+ } \
}
-#define SECOND_LEVEL_CHECKS \
- { \
- int kr, kc; \
- unsigned int second; \
- if (tr != br && tc != bc) { \
- kr = br - tr; \
- kc = bc - tc; \
- CHECK_BETTER(second, tr + kr, tc + 2 * kc); \
- CHECK_BETTER(second, tr + 2 * kr, tc + kc); \
- } else if (tr == br && tc != bc) { \
- kc = bc - tc; \
- CHECK_BETTER(second, tr + hstep, tc + 2 * kc); \
- CHECK_BETTER(second, tr - hstep, tc + 2 * kc); \
- switch (whichdir) { \
- case 0: \
- case 1: \
- CHECK_BETTER(second, tr + hstep, tc + kc); \
- break; \
- case 2: \
- case 3: \
- CHECK_BETTER(second, tr - hstep, tc + kc); \
- break; \
- } \
- } else if (tr != br && tc == bc) { \
- kr = br - tr; \
- CHECK_BETTER(second, tr + 2 * kr, tc + hstep); \
- CHECK_BETTER(second, tr + 2 * kr, tc - hstep); \
- switch (whichdir) { \
- case 0: \
- case 2: \
- CHECK_BETTER(second, tr + kr, tc + hstep); \
- break; \
- case 1: \
- case 3: \
- CHECK_BETTER(second, tr + kr, tc - hstep); \
- break; \
- } \
- } \
+#define SECOND_LEVEL_CHECKS \
+ { \
+ int kr, kc; \
+ unsigned int second; \
+ if (tr != br && tc != bc) { \
+ kr = br - tr; \
+ kc = bc - tc; \
+ CHECK_BETTER(second, tr + kr, tc + 2 * kc); \
+ CHECK_BETTER(second, tr + 2 * kr, tc + kc); \
+ } else if (tr == br && tc != bc) { \
+ kc = bc - tc; \
+ CHECK_BETTER(second, tr + hstep, tc + 2 * kc); \
+ CHECK_BETTER(second, tr - hstep, tc + 2 * kc); \
+ switch (whichdir) { \
+ case 0: \
+ case 1: CHECK_BETTER(second, tr + hstep, tc + kc); break; \
+ case 2: \
+ case 3: CHECK_BETTER(second, tr - hstep, tc + kc); break; \
+ } \
+ } else if (tr != br && tc == bc) { \
+ kr = br - tr; \
+ CHECK_BETTER(second, tr + 2 * kr, tc + hstep); \
+ CHECK_BETTER(second, tr + 2 * kr, tc - hstep); \
+ switch (whichdir) { \
+ case 0: \
+ case 2: CHECK_BETTER(second, tr + kr, tc + hstep); break; \
+ case 1: \
+ case 3: CHECK_BETTER(second, tr + kr, tc - hstep); break; \
+ } \
+ } \
}
// TODO(yunqingwang): SECOND_LEVEL_CHECKS_BEST was a rewrote of
// SECOND_LEVEL_CHECKS, and SECOND_LEVEL_CHECKS should be rewritten
// later in the same way.
-#define SECOND_LEVEL_CHECKS_BEST \
- { \
- unsigned int second; \
- int br0 = br; \
- int bc0 = bc; \
- assert(tr == br || tc == bc); \
- if (tr == br && tc != bc) { \
- kc = bc - tc; \
- } else if (tr != br && tc == bc) { \
- kr = br - tr; \
- } \
- CHECK_BETTER(second, br0 + kr, bc0); \
- CHECK_BETTER(second, br0, bc0 + kc); \
- if (br0 != br || bc0 != bc) { \
- CHECK_BETTER(second, br0 + kr, bc0 + kc); \
- } \
+#define SECOND_LEVEL_CHECKS_BEST \
+ { \
+ unsigned int second; \
+ int br0 = br; \
+ int bc0 = bc; \
+ assert(tr == br || tc == bc); \
+ if (tr == br && tc != bc) { \
+ kc = bc - tc; \
+ } else if (tr != br && tc == bc) { \
+ kr = br - tr; \
+ } \
+ CHECK_BETTER(second, br0 + kr, bc0); \
+ CHECK_BETTER(second, br0, bc0 + kc); \
+ if (br0 != br || bc0 != bc) { \
+ CHECK_BETTER(second, br0 + kr, bc0 + kc); \
+ } \
}
-#define SETUP_SUBPEL_SEARCH \
- const uint8_t *const z = x->plane[0].src.buf; \
- const int src_stride = x->plane[0].src.stride; \
- const MACROBLOCKD *xd = &x->e_mbd; \
- unsigned int besterr = INT_MAX; \
- unsigned int sse; \
- unsigned int whichdir; \
- int thismse; \
- const unsigned int halfiters = iters_per_step; \
- const unsigned int quarteriters = iters_per_step; \
- const unsigned int eighthiters = iters_per_step; \
- const int y_stride = xd->plane[0].pre[0].stride; \
- const int offset = bestmv->row * y_stride + bestmv->col; \
- const uint8_t *const y = xd->plane[0].pre[0].buf; \
- \
- int rr = ref_mv->row; \
- int rc = ref_mv->col; \
- int br = bestmv->row * 8; \
- int bc = bestmv->col * 8; \
- int hstep = 4; \
- const int minc = VPXMAX(x->mv_col_min * 8, ref_mv->col - MV_MAX); \
- const int maxc = VPXMIN(x->mv_col_max * 8, ref_mv->col + MV_MAX); \
- const int minr = VPXMAX(x->mv_row_min * 8, ref_mv->row - MV_MAX); \
- const int maxr = VPXMIN(x->mv_row_max * 8, ref_mv->row + MV_MAX); \
- int tr = br; \
- int tc = bc; \
- \
- bestmv->row *= 8; \
+#define SETUP_SUBPEL_SEARCH \
+ const uint8_t *const z = x->plane[0].src.buf; \
+ const int src_stride = x->plane[0].src.stride; \
+ const MACROBLOCKD *xd = &x->e_mbd; \
+ unsigned int besterr = INT_MAX; \
+ unsigned int sse; \
+ unsigned int whichdir; \
+ int thismse; \
+ const unsigned int halfiters = iters_per_step; \
+ const unsigned int quarteriters = iters_per_step; \
+ const unsigned int eighthiters = iters_per_step; \
+ const int y_stride = xd->plane[0].pre[0].stride; \
+ const int offset = bestmv->row * y_stride + bestmv->col; \
+ const uint8_t *const y = xd->plane[0].pre[0].buf; \
+ \
+ int rr = ref_mv->row; \
+ int rc = ref_mv->col; \
+ int br = bestmv->row * 8; \
+ int bc = bestmv->col * 8; \
+ int hstep = 4; \
+ const int minc = VPXMAX(x->mv_col_min * 8, ref_mv->col - MV_MAX); \
+ const int maxc = VPXMIN(x->mv_col_max * 8, ref_mv->col + MV_MAX); \
+ const int minr = VPXMAX(x->mv_row_min * 8, ref_mv->row - MV_MAX); \
+ const int maxr = VPXMIN(x->mv_row_max * 8, ref_mv->row + MV_MAX); \
+ int tr = br; \
+ int tc = bc; \
+ \
+ bestmv->row *= 8; \
bestmv->col *= 8;
-static unsigned int setup_center_error(const MACROBLOCKD *xd,
- const MV *bestmv,
- const MV *ref_mv,
- int error_per_bit,
- const vpx_variance_fn_ptr_t *vfp,
- const uint8_t *const src,
- const int src_stride,
- const uint8_t *const y,
- int y_stride,
- const uint8_t *second_pred,
- int w, int h, int offset,
- int *mvjcost, int *mvcost[2],
- unsigned int *sse1,
- int *distortion) {
+static unsigned int setup_center_error(
+ const MACROBLOCKD *xd, const MV *bestmv, const MV *ref_mv,
+ int error_per_bit, const vpx_variance_fn_ptr_t *vfp,
+ const uint8_t *const src, const int src_stride, const uint8_t *const y,
+ int y_stride, const uint8_t *second_pred, int w, int h, int offset,
+ int *mvjcost, int *mvcost[2], unsigned int *sse1, int *distortion) {
unsigned int besterr;
#if CONFIG_VPX_HIGHBITDEPTH
if (second_pred != NULL) {
@@ -329,8 +294,8 @@
DECLARE_ALIGNED(16, uint16_t, comp_pred16[64 * 64]);
vpx_highbd_comp_avg_pred(comp_pred16, second_pred, w, h, y + offset,
y_stride);
- besterr = vfp->vf(CONVERT_TO_BYTEPTR(comp_pred16), w, src, src_stride,
- sse1);
+ besterr =
+ vfp->vf(CONVERT_TO_BYTEPTR(comp_pred16), w, src, src_stride, sse1);
} else {
DECLARE_ALIGNED(16, uint8_t, comp_pred[64 * 64]);
vpx_comp_avg_pred(comp_pred, second_pred, w, h, y + offset, y_stride);
@@ -342,7 +307,7 @@
*distortion = besterr;
besterr += mv_err_cost(bestmv, ref_mv, mvjcost, mvcost, error_per_bit);
#else
- (void) xd;
+ (void)xd;
if (second_pred != NULL) {
DECLARE_ALIGNED(16, uint8_t, comp_pred[64 * 64]);
vpx_comp_avg_pred(comp_pred, second_pred, w, h, y + offset, y_stride);
@@ -361,10 +326,8 @@
}
static INLINE int is_cost_list_wellbehaved(int *cost_list) {
- return cost_list[0] < cost_list[1] &&
- cost_list[0] < cost_list[2] &&
- cost_list[0] < cost_list[3] &&
- cost_list[0] < cost_list[4];
+ return cost_list[0] < cost_list[1] && cost_list[0] < cost_list[2] &&
+ cost_list[0] < cost_list[3] && cost_list[0] < cost_list[4];
}
// Returns surface minima estimate at given precision in 1/2^n bits.
@@ -375,8 +338,7 @@
// x0 = 1/2 (S1 - S3)/(S1 + S3 - 2*S0),
// y0 = 1/2 (S4 - S2)/(S4 + S2 - 2*S0).
// The code below is an integerized version of that.
-static void get_cost_surf_min(int *cost_list, int *ir, int *ic,
- int bits) {
+static void get_cost_surf_min(int *cost_list, int *ir, int *ic, int bits) {
*ic = divide_and_round((cost_list[1] - cost_list[3]) * (1 << (bits - 1)),
(cost_list[1] - 2 * cost_list[0] + cost_list[3]));
*ir = divide_and_round((cost_list[4] - cost_list[2]) * (1 << (bits - 1)),
@@ -384,37 +346,26 @@
}
int vp10_find_best_sub_pixel_tree_pruned_evenmore(
- const MACROBLOCK *x,
- MV *bestmv, const MV *ref_mv,
- int allow_hp,
- int error_per_bit,
- const vpx_variance_fn_ptr_t *vfp,
- int forced_stop,
- int iters_per_step,
- int *cost_list,
- int *mvjcost, int *mvcost[2],
- int *distortion,
- unsigned int *sse1,
- const uint8_t *second_pred,
- int w, int h) {
+ const MACROBLOCK *x, MV *bestmv, const MV *ref_mv, int allow_hp,
+ int error_per_bit, const vpx_variance_fn_ptr_t *vfp, int forced_stop,
+ int iters_per_step, int *cost_list, int *mvjcost, int *mvcost[2],
+ int *distortion, unsigned int *sse1, const uint8_t *second_pred, int w,
+ int h) {
SETUP_SUBPEL_SEARCH;
- besterr = setup_center_error(xd, bestmv, ref_mv, error_per_bit, vfp,
- z, src_stride, y, y_stride, second_pred,
- w, h, offset, mvjcost, mvcost,
- sse1, distortion);
- (void) halfiters;
- (void) quarteriters;
- (void) eighthiters;
- (void) whichdir;
- (void) allow_hp;
- (void) forced_stop;
- (void) hstep;
+ besterr = setup_center_error(xd, bestmv, ref_mv, error_per_bit, vfp, z,
+ src_stride, y, y_stride, second_pred, w, h,
+ offset, mvjcost, mvcost, sse1, distortion);
+ (void)halfiters;
+ (void)quarteriters;
+ (void)eighthiters;
+ (void)whichdir;
+ (void)allow_hp;
+ (void)forced_stop;
+ (void)hstep;
- if (cost_list &&
- cost_list[0] != INT_MAX && cost_list[1] != INT_MAX &&
+ if (cost_list && cost_list[0] != INT_MAX && cost_list[1] != INT_MAX &&
cost_list[2] != INT_MAX && cost_list[3] != INT_MAX &&
- cost_list[4] != INT_MAX &&
- is_cost_list_wellbehaved(cost_list)) {
+ cost_list[4] != INT_MAX && is_cost_list_wellbehaved(cost_list)) {
int ir, ic;
unsigned int minpt;
get_cost_surf_min(cost_list, &ir, &ic, 2);
@@ -463,29 +414,19 @@
return besterr;
}
-int vp10_find_best_sub_pixel_tree_pruned_more(const MACROBLOCK *x,
- MV *bestmv, const MV *ref_mv,
- int allow_hp,
- int error_per_bit,
- const vpx_variance_fn_ptr_t *vfp,
- int forced_stop,
- int iters_per_step,
- int *cost_list,
- int *mvjcost, int *mvcost[2],
- int *distortion,
- unsigned int *sse1,
- const uint8_t *second_pred,
- int w, int h) {
+int vp10_find_best_sub_pixel_tree_pruned_more(
+ const MACROBLOCK *x, MV *bestmv, const MV *ref_mv, int allow_hp,
+ int error_per_bit, const vpx_variance_fn_ptr_t *vfp, int forced_stop,
+ int iters_per_step, int *cost_list, int *mvjcost, int *mvcost[2],
+ int *distortion, unsigned int *sse1, const uint8_t *second_pred, int w,
+ int h) {
SETUP_SUBPEL_SEARCH;
- besterr = setup_center_error(xd, bestmv, ref_mv, error_per_bit, vfp,
- z, src_stride, y, y_stride, second_pred,
- w, h, offset, mvjcost, mvcost,
- sse1, distortion);
- if (cost_list &&
- cost_list[0] != INT_MAX && cost_list[1] != INT_MAX &&
+ besterr = setup_center_error(xd, bestmv, ref_mv, error_per_bit, vfp, z,
+ src_stride, y, y_stride, second_pred, w, h,
+ offset, mvjcost, mvcost, sse1, distortion);
+ if (cost_list && cost_list[0] != INT_MAX && cost_list[1] != INT_MAX &&
cost_list[2] != INT_MAX && cost_list[3] != INT_MAX &&
- cost_list[4] != INT_MAX &&
- is_cost_list_wellbehaved(cost_list)) {
+ cost_list[4] != INT_MAX && is_cost_list_wellbehaved(cost_list)) {
unsigned int minpt;
int ir, ic;
get_cost_surf_min(cost_list, &ir, &ic, 1);
@@ -524,8 +465,8 @@
}
// These lines insure static analysis doesn't warn that
// tr and tc aren't used after the above point.
- (void) tr;
- (void) tc;
+ (void)tr;
+ (void)tc;
bestmv->row = br;
bestmv->col = bc;
@@ -537,26 +478,17 @@
return besterr;
}
-int vp10_find_best_sub_pixel_tree_pruned(const MACROBLOCK *x,
- MV *bestmv, const MV *ref_mv,
- int allow_hp,
- int error_per_bit,
- const vpx_variance_fn_ptr_t *vfp,
- int forced_stop,
- int iters_per_step,
- int *cost_list,
- int *mvjcost, int *mvcost[2],
- int *distortion,
- unsigned int *sse1,
- const uint8_t *second_pred,
- int w, int h) {
+int vp10_find_best_sub_pixel_tree_pruned(
+ const MACROBLOCK *x, MV *bestmv, const MV *ref_mv, int allow_hp,
+ int error_per_bit, const vpx_variance_fn_ptr_t *vfp, int forced_stop,
+ int iters_per_step, int *cost_list, int *mvjcost, int *mvcost[2],
+ int *distortion, unsigned int *sse1, const uint8_t *second_pred, int w,
+ int h) {
SETUP_SUBPEL_SEARCH;
- besterr = setup_center_error(xd, bestmv, ref_mv, error_per_bit, vfp,
- z, src_stride, y, y_stride, second_pred,
- w, h, offset, mvjcost, mvcost,
- sse1, distortion);
- if (cost_list &&
- cost_list[0] != INT_MAX && cost_list[1] != INT_MAX &&
+ besterr = setup_center_error(xd, bestmv, ref_mv, error_per_bit, vfp, z,
+ src_stride, y, y_stride, second_pred, w, h,
+ offset, mvjcost, mvcost, sse1, distortion);
+ if (cost_list && cost_list[0] != INT_MAX && cost_list[1] != INT_MAX &&
cost_list[2] != INT_MAX && cost_list[3] != INT_MAX &&
cost_list[4] != INT_MAX) {
unsigned int left, right, up, down, diag;
@@ -619,8 +551,8 @@
}
// These lines insure static analysis doesn't warn that
// tr and tc aren't used after the above point.
- (void) tr;
- (void) tc;
+ (void)tr;
+ (void)tc;
bestmv->row = br;
bestmv->col = bc;
@@ -633,25 +565,19 @@
}
static const MV search_step_table[12] = {
- // left, right, up, down
- {0, -4}, {0, 4}, {-4, 0}, {4, 0},
- {0, -2}, {0, 2}, {-2, 0}, {2, 0},
- {0, -1}, {0, 1}, {-1, 0}, {1, 0}
+ // left, right, up, down
+ { 0, -4 }, { 0, 4 }, { -4, 0 }, { 4, 0 }, { 0, -2 }, { 0, 2 },
+ { -2, 0 }, { 2, 0 }, { 0, -1 }, { 0, 1 }, { -1, 0 }, { 1, 0 }
};
-int vp10_find_best_sub_pixel_tree(const MACROBLOCK *x,
- MV *bestmv, const MV *ref_mv,
- int allow_hp,
- int error_per_bit,
- const vpx_variance_fn_ptr_t *vfp,
- int forced_stop,
- int iters_per_step,
- int *cost_list,
- int *mvjcost, int *mvcost[2],
- int *distortion,
- unsigned int *sse1,
- const uint8_t *second_pred,
- int w, int h) {
+int vp10_find_best_sub_pixel_tree(const MACROBLOCK *x, MV *bestmv,
+ const MV *ref_mv, int allow_hp,
+ int error_per_bit,
+ const vpx_variance_fn_ptr_t *vfp,
+ int forced_stop, int iters_per_step,
+ int *cost_list, int *mvjcost, int *mvcost[2],
+ int *distortion, unsigned int *sse1,
+ const uint8_t *second_pred, int w, int h) {
const uint8_t *const z = x->plane[0].src.buf;
const uint8_t *const src_address = z;
const int src_stride = x->plane[0].src.stride;
@@ -681,18 +607,16 @@
int kr, kc;
if (!(allow_hp && vp10_use_mv_hp(ref_mv)))
- if (round == 3)
- round = 2;
+ if (round == 3) round = 2;
bestmv->row *= 8;
bestmv->col *= 8;
- besterr = setup_center_error(xd, bestmv, ref_mv, error_per_bit, vfp,
- z, src_stride, y, y_stride, second_pred,
- w, h, offset, mvjcost, mvcost,
- sse1, distortion);
+ besterr = setup_center_error(xd, bestmv, ref_mv, error_per_bit, vfp, z,
+ src_stride, y, y_stride, second_pred, w, h,
+ offset, mvjcost, mvcost, sse1, distortion);
- (void) cost_list; // to silence compiler warning
+ (void)cost_list; // to silence compiler warning
for (iter = 0; iter < round; ++iter) {
// Check vertical and horizontal sub-pixel positions.
@@ -705,13 +629,13 @@
this_mv.row = tr;
this_mv.col = tc;
if (second_pred == NULL)
- thismse = vfp->svf(pre_address, y_stride, sp(tc), sp(tr),
- src_address, src_stride, &sse);
+ thismse = vfp->svf(pre_address, y_stride, sp(tc), sp(tr), src_address,
+ src_stride, &sse);
else
thismse = vfp->svaf(pre_address, y_stride, sp(tc), sp(tr),
src_address, src_stride, &sse, second_pred);
- cost_array[idx] = thismse +
- mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit);
+ cost_array[idx] = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost,
+ mvcost, error_per_bit);
if (cost_array[idx] < besterr) {
best_idx = idx;
@@ -732,15 +656,15 @@
tr = br + kr;
if (tc >= minc && tc <= maxc && tr >= minr && tr <= maxr) {
const uint8_t *const pre_address = y + (tr >> 3) * y_stride + (tc >> 3);
- MV this_mv = {tr, tc};
+ MV this_mv = { tr, tc };
if (second_pred == NULL)
- thismse = vfp->svf(pre_address, y_stride, sp(tc), sp(tr),
- src_address, src_stride, &sse);
+ thismse = vfp->svf(pre_address, y_stride, sp(tc), sp(tr), src_address,
+ src_stride, &sse);
else
- thismse = vfp->svaf(pre_address, y_stride, sp(tc), sp(tr),
- src_address, src_stride, &sse, second_pred);
- cost_array[4] = thismse +
- mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit);
+ thismse = vfp->svaf(pre_address, y_stride, sp(tc), sp(tr), src_address,
+ src_stride, &sse, second_pred);
+ cost_array[4] = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost,
+ error_per_bit);
if (cost_array[4] < besterr) {
best_idx = 4;
@@ -760,8 +684,7 @@
bc = tc;
}
- if (iters_per_step > 1 && best_idx != -1)
- SECOND_LEVEL_CHECKS_BEST;
+ if (iters_per_step > 1 && best_idx != -1) SECOND_LEVEL_CHECKS_BEST;
tr = br;
tc = bc;
@@ -776,8 +699,8 @@
// These lines insure static analysis doesn't warn that
// tr and tc aren't used after the above point.
- (void) tr;
- (void) tc;
+ (void)tr;
+ (void)tc;
bestmv->row = br;
bestmv->col = bc;
@@ -795,10 +718,8 @@
static INLINE int check_bounds(const MACROBLOCK *x, int row, int col,
int range) {
- return ((row - range) >= x->mv_row_min) &
- ((row + range) <= x->mv_row_max) &
- ((col - range) >= x->mv_col_min) &
- ((col + range) <= x->mv_col_max);
+ return ((row - range) >= x->mv_row_min) & ((row + range) <= x->mv_row_max) &
+ ((col - range) >= x->mv_col_min) & ((col + range) <= x->mv_col_max);
}
static INLINE int is_mv_in(const MACROBLOCK *x, const MV *mv) {
@@ -806,33 +727,31 @@
(mv->row >= x->mv_row_min) && (mv->row <= x->mv_row_max);
}
-#define CHECK_BETTER \
- {\
- if (thissad < bestsad) {\
- if (use_mvcost) \
- thissad += mvsad_err_cost(x, &this_mv, &fcenter_mv, sad_per_bit);\
- if (thissad < bestsad) {\
- bestsad = thissad;\
- best_site = i;\
- }\
- }\
+#define CHECK_BETTER \
+ { \
+ if (thissad < bestsad) { \
+ if (use_mvcost) \
+ thissad += mvsad_err_cost(x, &this_mv, &fcenter_mv, sad_per_bit); \
+ if (thissad < bestsad) { \
+ bestsad = thissad; \
+ best_site = i; \
+ } \
+ } \
}
-#define MAX_PATTERN_SCALES 11
-#define MAX_PATTERN_CANDIDATES 8 // max number of canddiates per scale
-#define PATTERN_CANDIDATES_REF 3 // number of refinement candidates
+#define MAX_PATTERN_SCALES 11
+#define MAX_PATTERN_CANDIDATES 8 // max number of canddiates per scale
+#define PATTERN_CANDIDATES_REF 3 // number of refinement candidates
// Calculate and return a sad+mvcost list around an integer best pel.
-static INLINE void calc_int_cost_list(const MACROBLOCK *x,
- const MV *ref_mv,
+static INLINE void calc_int_cost_list(const MACROBLOCK *x, const MV *ref_mv,
int sadpb,
const vpx_variance_fn_ptr_t *fn_ptr,
- const MV *best_mv,
- int *cost_list) {
- static const MV neighbors[4] = {{0, -1}, {1, 0}, {0, 1}, {-1, 0}};
+ const MV *best_mv, int *cost_list) {
+ static const MV neighbors[4] = { { 0, -1 }, { 1, 0 }, { 0, 1 }, { -1, 0 } };
const struct buf_2d *const what = &x->plane[0].src;
const struct buf_2d *const in_what = &x->e_mbd.plane[0].pre[0];
- const MV fcenter_mv = {ref_mv->row >> 3, ref_mv->col >> 3};
+ const MV fcenter_mv = { ref_mv->row >> 3, ref_mv->col >> 3 };
int br = best_mv->row;
int bc = best_mv->col;
MV this_mv;
@@ -841,34 +760,32 @@
this_mv.row = br;
this_mv.col = bc;
- cost_list[0] = fn_ptr->vf(what->buf, what->stride,
- get_buf_from_mv(in_what, &this_mv),
- in_what->stride, &sse) +
+ cost_list[0] =
+ fn_ptr->vf(what->buf, what->stride, get_buf_from_mv(in_what, &this_mv),
+ in_what->stride, &sse) +
mvsad_err_cost(x, &this_mv, &fcenter_mv, sadpb);
if (check_bounds(x, br, bc, 1)) {
for (i = 0; i < 4; i++) {
- const MV this_mv = {br + neighbors[i].row,
- bc + neighbors[i].col};
+ const MV this_mv = { br + neighbors[i].row, bc + neighbors[i].col };
cost_list[i + 1] = fn_ptr->vf(what->buf, what->stride,
get_buf_from_mv(in_what, &this_mv),
in_what->stride, &sse) +
- // mvsad_err_cost(x, &this_mv, &fcenter_mv, sadpb);
- mv_err_cost(&this_mv, &fcenter_mv, x->nmvjointcost, x->mvcost,
- x->errorperbit);
+ // mvsad_err_cost(x, &this_mv, &fcenter_mv, sadpb);
+ mv_err_cost(&this_mv, &fcenter_mv, x->nmvjointcost,
+ x->mvcost, x->errorperbit);
}
} else {
for (i = 0; i < 4; i++) {
- const MV this_mv = {br + neighbors[i].row,
- bc + neighbors[i].col};
+ const MV this_mv = { br + neighbors[i].row, bc + neighbors[i].col };
if (!is_mv_in(x, &this_mv))
cost_list[i + 1] = INT_MAX;
else
cost_list[i + 1] = fn_ptr->vf(what->buf, what->stride,
get_buf_from_mv(in_what, &this_mv),
in_what->stride, &sse) +
- // mvsad_err_cost(x, &this_mv, &fcenter_mv, sadpb);
- mv_err_cost(&this_mv, &fcenter_mv, x->nmvjointcost, x->mvcost,
- x->errorperbit);
+ // mvsad_err_cost(x, &this_mv, &fcenter_mv, sadpb);
+ mv_err_cost(&this_mv, &fcenter_mv, x->nmvjointcost,
+ x->mvcost, x->errorperbit);
}
}
}
@@ -878,19 +795,12 @@
// candidates as indicated in the num_candidates and candidates arrays
// passed into this function
//
-static int vp10_pattern_search(const MACROBLOCK *x,
- MV *ref_mv,
- int search_param,
- int sad_per_bit,
- int do_init_search,
- int *cost_list,
- const vpx_variance_fn_ptr_t *vfp,
- int use_mvcost,
- const MV *center_mv,
- MV *best_mv,
- const int num_candidates[MAX_PATTERN_SCALES],
- const MV candidates[MAX_PATTERN_SCALES]
- [MAX_PATTERN_CANDIDATES]) {
+static int vp10_pattern_search(
+ const MACROBLOCK *x, MV *ref_mv, int search_param, int sad_per_bit,
+ int do_init_search, int *cost_list, const vpx_variance_fn_ptr_t *vfp,
+ int use_mvcost, const MV *center_mv, MV *best_mv,
+ const int num_candidates[MAX_PATTERN_SCALES],
+ const MV candidates[MAX_PATTERN_SCALES][MAX_PATTERN_CANDIDATES]) {
const MACROBLOCKD *const xd = &x->e_mbd;
static const int search_param_to_steps[MAX_MVSEARCH_STEPS] = {
10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
@@ -902,7 +812,7 @@
int bestsad = INT_MAX;
int thissad;
int k = -1;
- const MV fcenter_mv = {center_mv->row >> 3, center_mv->col >> 3};
+ const MV fcenter_mv = { center_mv->row >> 3, center_mv->col >> 3 };
int best_init_s = search_param_to_steps[search_param];
// adjust ref_mv to make sure it is within MV range
clamp_mv(ref_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max);
@@ -910,9 +820,9 @@
bc = ref_mv->col;
// Work out the start point for the search
- bestsad = vfp->sdf(what->buf, what->stride,
- get_buf_from_mv(in_what, ref_mv), in_what->stride) +
- mvsad_err_cost(x, ref_mv, &fcenter_mv, sad_per_bit);
+ bestsad = vfp->sdf(what->buf, what->stride, get_buf_from_mv(in_what, ref_mv),
+ in_what->stride) +
+ mvsad_err_cost(x, ref_mv, &fcenter_mv, sad_per_bit);
// Search all possible scales upto the search param around the center point
// pick the scale of the point that is best as the starting scale of
@@ -924,22 +834,21 @@
int best_site = -1;
if (check_bounds(x, br, bc, 1 << t)) {
for (i = 0; i < num_candidates[t]; i++) {
- const MV this_mv = {br + candidates[t][i].row,
- bc + candidates[t][i].col};
- thissad = vfp->sdf(what->buf, what->stride,
- get_buf_from_mv(in_what, &this_mv),
- in_what->stride);
+ const MV this_mv = { br + candidates[t][i].row,
+ bc + candidates[t][i].col };
+ thissad =
+ vfp->sdf(what->buf, what->stride,
+ get_buf_from_mv(in_what, &this_mv), in_what->stride);
CHECK_BETTER
}
} else {
for (i = 0; i < num_candidates[t]; i++) {
- const MV this_mv = {br + candidates[t][i].row,
- bc + candidates[t][i].col};
- if (!is_mv_in(x, &this_mv))
- continue;
- thissad = vfp->sdf(what->buf, what->stride,
- get_buf_from_mv(in_what, &this_mv),
- in_what->stride);
+ const MV this_mv = { br + candidates[t][i].row,
+ bc + candidates[t][i].col };
+ if (!is_mv_in(x, &this_mv)) continue;
+ thissad =
+ vfp->sdf(what->buf, what->stride,
+ get_buf_from_mv(in_what, &this_mv), in_what->stride);
CHECK_BETTER
}
}
@@ -967,22 +876,21 @@
if (!do_init_search || s != best_init_s) {
if (check_bounds(x, br, bc, 1 << s)) {
for (i = 0; i < num_candidates[s]; i++) {
- const MV this_mv = {br + candidates[s][i].row,
- bc + candidates[s][i].col};
- thissad = vfp->sdf(what->buf, what->stride,
- get_buf_from_mv(in_what, &this_mv),
- in_what->stride);
+ const MV this_mv = { br + candidates[s][i].row,
+ bc + candidates[s][i].col };
+ thissad =
+ vfp->sdf(what->buf, what->stride,
+ get_buf_from_mv(in_what, &this_mv), in_what->stride);
CHECK_BETTER
}
} else {
for (i = 0; i < num_candidates[s]; i++) {
- const MV this_mv = {br + candidates[s][i].row,
- bc + candidates[s][i].col};
- if (!is_mv_in(x, &this_mv))
- continue;
- thissad = vfp->sdf(what->buf, what->stride,
- get_buf_from_mv(in_what, &this_mv),
- in_what->stride);
+ const MV this_mv = { br + candidates[s][i].row,
+ bc + candidates[s][i].col };
+ if (!is_mv_in(x, &this_mv)) continue;
+ thissad =
+ vfp->sdf(what->buf, what->stride,
+ get_buf_from_mv(in_what, &this_mv), in_what->stride);
CHECK_BETTER
}
}
@@ -1005,22 +913,25 @@
if (check_bounds(x, br, bc, 1 << s)) {
for (i = 0; i < PATTERN_CANDIDATES_REF; i++) {
- const MV this_mv = {br + candidates[s][next_chkpts_indices[i]].row,
- bc + candidates[s][next_chkpts_indices[i]].col};
- thissad = vfp->sdf(what->buf, what->stride,
- get_buf_from_mv(in_what, &this_mv),
- in_what->stride);
+ const MV this_mv = {
+ br + candidates[s][next_chkpts_indices[i]].row,
+ bc + candidates[s][next_chkpts_indices[i]].col
+ };
+ thissad =
+ vfp->sdf(what->buf, what->stride,
+ get_buf_from_mv(in_what, &this_mv), in_what->stride);
CHECK_BETTER
}
} else {
for (i = 0; i < PATTERN_CANDIDATES_REF; i++) {
- const MV this_mv = {br + candidates[s][next_chkpts_indices[i]].row,
- bc + candidates[s][next_chkpts_indices[i]].col};
- if (!is_mv_in(x, &this_mv))
- continue;
- thissad = vfp->sdf(what->buf, what->stride,
- get_buf_from_mv(in_what, &this_mv),
- in_what->stride);
+ const MV this_mv = {
+ br + candidates[s][next_chkpts_indices[i]].row,
+ bc + candidates[s][next_chkpts_indices[i]].col
+ };
+ if (!is_mv_in(x, &this_mv)) continue;
+ thissad =
+ vfp->sdf(what->buf, what->stride,
+ get_buf_from_mv(in_what, &this_mv), in_what->stride);
CHECK_BETTER
}
}
@@ -1053,19 +964,12 @@
// are 4 1-away neighbors, and cost_list is non-null
// TODO(debargha): Merge this function with the one above. Also remove
// use_mvcost option since it is always 1, to save unnecessary branches.
-static int vp10_pattern_search_sad(const MACROBLOCK *x,
- MV *ref_mv,
- int search_param,
- int sad_per_bit,
- int do_init_search,
- int *cost_list,
- const vpx_variance_fn_ptr_t *vfp,
- int use_mvcost,
- const MV *center_mv,
- MV *best_mv,
- const int num_candidates[MAX_PATTERN_SCALES],
- const MV candidates[MAX_PATTERN_SCALES]
- [MAX_PATTERN_CANDIDATES]) {
+static int vp10_pattern_search_sad(
+ const MACROBLOCK *x, MV *ref_mv, int search_param, int sad_per_bit,
+ int do_init_search, int *cost_list, const vpx_variance_fn_ptr_t *vfp,
+ int use_mvcost, const MV *center_mv, MV *best_mv,
+ const int num_candidates[MAX_PATTERN_SCALES],
+ const MV candidates[MAX_PATTERN_SCALES][MAX_PATTERN_CANDIDATES]) {
const MACROBLOCKD *const xd = &x->e_mbd;
static const int search_param_to_steps[MAX_MVSEARCH_STEPS] = {
10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
@@ -1077,7 +981,7 @@
int bestsad = INT_MAX;
int thissad;
int k = -1;
- const MV fcenter_mv = {center_mv->row >> 3, center_mv->col >> 3};
+ const MV fcenter_mv = { center_mv->row >> 3, center_mv->col >> 3 };
int best_init_s = search_param_to_steps[search_param];
// adjust ref_mv to make sure it is within MV range
clamp_mv(ref_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max);
@@ -1089,9 +993,9 @@
}
// Work out the start point for the search
- bestsad = vfp->sdf(what->buf, what->stride,
- get_buf_from_mv(in_what, ref_mv), in_what->stride) +
- mvsad_err_cost(x, ref_mv, &fcenter_mv, sad_per_bit);
+ bestsad = vfp->sdf(what->buf, what->stride, get_buf_from_mv(in_what, ref_mv),
+ in_what->stride) +
+ mvsad_err_cost(x, ref_mv, &fcenter_mv, sad_per_bit);
// Search all possible scales upto the search param around the center point
// pick the scale of the point that is best as the starting scale of
@@ -1103,22 +1007,21 @@
int best_site = -1;
if (check_bounds(x, br, bc, 1 << t)) {
for (i = 0; i < num_candidates[t]; i++) {
- const MV this_mv = {br + candidates[t][i].row,
- bc + candidates[t][i].col};
- thissad = vfp->sdf(what->buf, what->stride,
- get_buf_from_mv(in_what, &this_mv),
- in_what->stride);
+ const MV this_mv = { br + candidates[t][i].row,
+ bc + candidates[t][i].col };
+ thissad =
+ vfp->sdf(what->buf, what->stride,
+ get_buf_from_mv(in_what, &this_mv), in_what->stride);
CHECK_BETTER
}
} else {
for (i = 0; i < num_candidates[t]; i++) {
- const MV this_mv = {br + candidates[t][i].row,
- bc + candidates[t][i].col};
- if (!is_mv_in(x, &this_mv))
- continue;
- thissad = vfp->sdf(what->buf, what->stride,
- get_buf_from_mv(in_what, &this_mv),
- in_what->stride);
+ const MV this_mv = { br + candidates[t][i].row,
+ bc + candidates[t][i].col };
+ if (!is_mv_in(x, &this_mv)) continue;
+ thissad =
+ vfp->sdf(what->buf, what->stride,
+ get_buf_from_mv(in_what, &this_mv), in_what->stride);
CHECK_BETTER
}
}
@@ -1146,22 +1049,21 @@
if (!do_init_search || s != best_init_s) {
if (check_bounds(x, br, bc, 1 << s)) {
for (i = 0; i < num_candidates[s]; i++) {
- const MV this_mv = {br + candidates[s][i].row,
- bc + candidates[s][i].col};
- thissad = vfp->sdf(what->buf, what->stride,
- get_buf_from_mv(in_what, &this_mv),
- in_what->stride);
+ const MV this_mv = { br + candidates[s][i].row,
+ bc + candidates[s][i].col };
+ thissad =
+ vfp->sdf(what->buf, what->stride,
+ get_buf_from_mv(in_what, &this_mv), in_what->stride);
CHECK_BETTER
}
} else {
for (i = 0; i < num_candidates[s]; i++) {
- const MV this_mv = {br + candidates[s][i].row,
- bc + candidates[s][i].col};
- if (!is_mv_in(x, &this_mv))
- continue;
- thissad = vfp->sdf(what->buf, what->stride,
- get_buf_from_mv(in_what, &this_mv),
- in_what->stride);
+ const MV this_mv = { br + candidates[s][i].row,
+ bc + candidates[s][i].col };
+ if (!is_mv_in(x, &this_mv)) continue;
+ thissad =
+ vfp->sdf(what->buf, what->stride,
+ get_buf_from_mv(in_what, &this_mv), in_what->stride);
CHECK_BETTER
}
}
@@ -1184,22 +1086,25 @@
if (check_bounds(x, br, bc, 1 << s)) {
for (i = 0; i < PATTERN_CANDIDATES_REF; i++) {
- const MV this_mv = {br + candidates[s][next_chkpts_indices[i]].row,
- bc + candidates[s][next_chkpts_indices[i]].col};
- thissad = vfp->sdf(what->buf, what->stride,
- get_buf_from_mv(in_what, &this_mv),
- in_what->stride);
+ const MV this_mv = {
+ br + candidates[s][next_chkpts_indices[i]].row,
+ bc + candidates[s][next_chkpts_indices[i]].col
+ };
+ thissad =
+ vfp->sdf(what->buf, what->stride,
+ get_buf_from_mv(in_what, &this_mv), in_what->stride);
CHECK_BETTER
}
} else {
for (i = 0; i < PATTERN_CANDIDATES_REF; i++) {
- const MV this_mv = {br + candidates[s][next_chkpts_indices[i]].row,
- bc + candidates[s][next_chkpts_indices[i]].col};
- if (!is_mv_in(x, &this_mv))
- continue;
- thissad = vfp->sdf(what->buf, what->stride,
- get_buf_from_mv(in_what, &this_mv),
- in_what->stride);
+ const MV this_mv = {
+ br + candidates[s][next_chkpts_indices[i]].row,
+ bc + candidates[s][next_chkpts_indices[i]].col
+ };
+ if (!is_mv_in(x, &this_mv)) continue;
+ thissad =
+ vfp->sdf(what->buf, what->stride,
+ get_buf_from_mv(in_what, &this_mv), in_what->stride);
CHECK_BETTER
}
}
@@ -1218,24 +1123,21 @@
if (!do_init_search || s != best_init_s) {
if (check_bounds(x, br, bc, 1 << s)) {
for (i = 0; i < num_candidates[s]; i++) {
- const MV this_mv = {br + candidates[s][i].row,
- bc + candidates[s][i].col};
- cost_list[i + 1] =
- thissad = vfp->sdf(what->buf, what->stride,
- get_buf_from_mv(in_what, &this_mv),
- in_what->stride);
+ const MV this_mv = { br + candidates[s][i].row,
+ bc + candidates[s][i].col };
+ cost_list[i + 1] = thissad =
+ vfp->sdf(what->buf, what->stride,
+ get_buf_from_mv(in_what, &this_mv), in_what->stride);
CHECK_BETTER
}
} else {
for (i = 0; i < num_candidates[s]; i++) {
- const MV this_mv = {br + candidates[s][i].row,
- bc + candidates[s][i].col};
- if (!is_mv_in(x, &this_mv))
- continue;
- cost_list[i + 1] =
- thissad = vfp->sdf(what->buf, what->stride,
- get_buf_from_mv(in_what, &this_mv),
- in_what->stride);
+ const MV this_mv = { br + candidates[s][i].row,
+ bc + candidates[s][i].col };
+ if (!is_mv_in(x, &this_mv)) continue;
+ cost_list[i + 1] = thissad =
+ vfp->sdf(what->buf, what->stride,
+ get_buf_from_mv(in_what, &this_mv), in_what->stride);
CHECK_BETTER
}
}
@@ -1258,26 +1160,28 @@
if (check_bounds(x, br, bc, 1 << s)) {
for (i = 0; i < PATTERN_CANDIDATES_REF; i++) {
- const MV this_mv = {br + candidates[s][next_chkpts_indices[i]].row,
- bc + candidates[s][next_chkpts_indices[i]].col};
- cost_list[next_chkpts_indices[i] + 1] =
- thissad = vfp->sdf(what->buf, what->stride,
- get_buf_from_mv(in_what, &this_mv),
- in_what->stride);
+ const MV this_mv = {
+ br + candidates[s][next_chkpts_indices[i]].row,
+ bc + candidates[s][next_chkpts_indices[i]].col
+ };
+ cost_list[next_chkpts_indices[i] + 1] = thissad =
+ vfp->sdf(what->buf, what->stride,
+ get_buf_from_mv(in_what, &this_mv), in_what->stride);
CHECK_BETTER
}
} else {
for (i = 0; i < PATTERN_CANDIDATES_REF; i++) {
- const MV this_mv = {br + candidates[s][next_chkpts_indices[i]].row,
- bc + candidates[s][next_chkpts_indices[i]].col};
+ const MV this_mv = {
+ br + candidates[s][next_chkpts_indices[i]].row,
+ bc + candidates[s][next_chkpts_indices[i]].col
+ };
if (!is_mv_in(x, &this_mv)) {
cost_list[next_chkpts_indices[i] + 1] = INT_MAX;
continue;
}
- cost_list[next_chkpts_indices[i] + 1] =
- thissad = vfp->sdf(what->buf, what->stride,
- get_buf_from_mv(in_what, &this_mv),
- in_what->stride);
+ cost_list[next_chkpts_indices[i] + 1] = thissad =
+ vfp->sdf(what->buf, what->stride,
+ get_buf_from_mv(in_what, &this_mv), in_what->stride);
CHECK_BETTER
}
}
@@ -1298,34 +1202,31 @@
// cost_list[3]: sad at delta { 0, 1} (right) from the best integer pel
// cost_list[4]: sad at delta {-1, 0} (top) from the best integer pel
if (cost_list) {
- static const MV neighbors[4] = {{0, -1}, {1, 0}, {0, 1}, {-1, 0}};
+ static const MV neighbors[4] = { { 0, -1 }, { 1, 0 }, { 0, 1 }, { -1, 0 } };
if (cost_list[0] == INT_MAX) {
cost_list[0] = bestsad;
if (check_bounds(x, br, bc, 1)) {
for (i = 0; i < 4; i++) {
- const MV this_mv = { br + neighbors[i].row,
- bc + neighbors[i].col };
- cost_list[i + 1] = vfp->sdf(what->buf, what->stride,
- get_buf_from_mv(in_what, &this_mv),
- in_what->stride);
+ const MV this_mv = { br + neighbors[i].row, bc + neighbors[i].col };
+ cost_list[i + 1] =
+ vfp->sdf(what->buf, what->stride,
+ get_buf_from_mv(in_what, &this_mv), in_what->stride);
}
} else {
for (i = 0; i < 4; i++) {
- const MV this_mv = {br + neighbors[i].row,
- bc + neighbors[i].col};
+ const MV this_mv = { br + neighbors[i].row, bc + neighbors[i].col };
if (!is_mv_in(x, &this_mv))
cost_list[i + 1] = INT_MAX;
else
- cost_list[i + 1] = vfp->sdf(what->buf, what->stride,
- get_buf_from_mv(in_what, &this_mv),
- in_what->stride);
+ cost_list[i + 1] =
+ vfp->sdf(what->buf, what->stride,
+ get_buf_from_mv(in_what, &this_mv), in_what->stride);
}
}
} else {
if (use_mvcost) {
for (i = 0; i < 4; i++) {
- const MV this_mv = {br + neighbors[i].row,
- bc + neighbors[i].col};
+ const MV this_mv = { br + neighbors[i].row, bc + neighbors[i].col };
if (cost_list[i + 1] != INT_MAX) {
cost_list[i + 1] +=
mvsad_err_cost(x, &this_mv, &fcenter_mv, sad_per_bit);
@@ -1339,183 +1240,320 @@
return bestsad;
}
-int vp10_get_mvpred_var(const MACROBLOCK *x,
- const MV *best_mv, const MV *center_mv,
- const vpx_variance_fn_ptr_t *vfp,
- int use_mvcost) {
+int vp10_get_mvpred_var(const MACROBLOCK *x, const MV *best_mv,
+ const MV *center_mv, const vpx_variance_fn_ptr_t *vfp,
+ int use_mvcost) {
const MACROBLOCKD *const xd = &x->e_mbd;
const struct buf_2d *const what = &x->plane[0].src;
const struct buf_2d *const in_what = &xd->plane[0].pre[0];
- const MV mv = {best_mv->row * 8, best_mv->col * 8};
+ const MV mv = { best_mv->row * 8, best_mv->col * 8 };
unsigned int unused;
- return vfp->vf(what->buf, what->stride,
- get_buf_from_mv(in_what, best_mv), in_what->stride, &unused) +
- (use_mvcost ? mv_err_cost(&mv, center_mv, x->nmvjointcost,
- x->mvcost, x->errorperbit) : 0);
+ return vfp->vf(what->buf, what->stride, get_buf_from_mv(in_what, best_mv),
+ in_what->stride, &unused) +
+ (use_mvcost ? mv_err_cost(&mv, center_mv, x->nmvjointcost, x->mvcost,
+ x->errorperbit)
+ : 0);
}
-int vp10_get_mvpred_av_var(const MACROBLOCK *x,
- const MV *best_mv, const MV *center_mv,
- const uint8_t *second_pred,
- const vpx_variance_fn_ptr_t *vfp,
- int use_mvcost) {
+int vp10_get_mvpred_av_var(const MACROBLOCK *x, const MV *best_mv,
+ const MV *center_mv, const uint8_t *second_pred,
+ const vpx_variance_fn_ptr_t *vfp, int use_mvcost) {
const MACROBLOCKD *const xd = &x->e_mbd;
const struct buf_2d *const what = &x->plane[0].src;
const struct buf_2d *const in_what = &xd->plane[0].pre[0];
- const MV mv = {best_mv->row * 8, best_mv->col * 8};
+ const MV mv = { best_mv->row * 8, best_mv->col * 8 };
unsigned int unused;
return vfp->svaf(get_buf_from_mv(in_what, best_mv), in_what->stride, 0, 0,
what->buf, what->stride, &unused, second_pred) +
- (use_mvcost ? mv_err_cost(&mv, center_mv, x->nmvjointcost,
- x->mvcost, x->errorperbit) : 0);
+ (use_mvcost ? mv_err_cost(&mv, center_mv, x->nmvjointcost, x->mvcost,
+ x->errorperbit)
+ : 0);
}
-int vp10_hex_search(const MACROBLOCK *x,
- MV *ref_mv,
- int search_param,
- int sad_per_bit,
- int do_init_search,
- int *cost_list,
- const vpx_variance_fn_ptr_t *vfp,
- int use_mvcost,
- const MV *center_mv, MV *best_mv) {
+int vp10_hex_search(const MACROBLOCK *x, MV *ref_mv, int search_param,
+ int sad_per_bit, int do_init_search, int *cost_list,
+ const vpx_variance_fn_ptr_t *vfp, int use_mvcost,
+ const MV *center_mv, MV *best_mv) {
// First scale has 8-closest points, the rest have 6 points in hex shape
// at increasing scales
- static const int hex_num_candidates[MAX_PATTERN_SCALES] = {
- 8, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6
- };
+ static const int hex_num_candidates[MAX_PATTERN_SCALES] = { 8, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 6 };
// Note that the largest candidate step at each scale is 2^scale
static const MV hex_candidates[MAX_PATTERN_SCALES][MAX_PATTERN_CANDIDATES] = {
- {{-1, -1}, {0, -1}, {1, -1}, {1, 0}, {1, 1}, { 0, 1}, { -1, 1}, {-1, 0}},
- {{-1, -2}, {1, -2}, {2, 0}, {1, 2}, { -1, 2}, { -2, 0}},
- {{-2, -4}, {2, -4}, {4, 0}, {2, 4}, { -2, 4}, { -4, 0}},
- {{-4, -8}, {4, -8}, {8, 0}, {4, 8}, { -4, 8}, { -8, 0}},
- {{-8, -16}, {8, -16}, {16, 0}, {8, 16}, { -8, 16}, { -16, 0}},
- {{-16, -32}, {16, -32}, {32, 0}, {16, 32}, { -16, 32}, { -32, 0}},
- {{-32, -64}, {32, -64}, {64, 0}, {32, 64}, { -32, 64}, { -64, 0}},
- {{-64, -128}, {64, -128}, {128, 0}, {64, 128}, { -64, 128}, { -128, 0}},
- {{-128, -256}, {128, -256}, {256, 0}, {128, 256}, { -128, 256}, { -256, 0}},
- {{-256, -512}, {256, -512}, {512, 0}, {256, 512}, { -256, 512}, { -512, 0}},
- {{-512, -1024}, {512, -1024}, {1024, 0}, {512, 1024}, { -512, 1024},
- { -1024, 0}},
+ { { -1, -1 },
+ { 0, -1 },
+ { 1, -1 },
+ { 1, 0 },
+ { 1, 1 },
+ { 0, 1 },
+ { -1, 1 },
+ { -1, 0 } },
+ { { -1, -2 }, { 1, -2 }, { 2, 0 }, { 1, 2 }, { -1, 2 }, { -2, 0 } },
+ { { -2, -4 }, { 2, -4 }, { 4, 0 }, { 2, 4 }, { -2, 4 }, { -4, 0 } },
+ { { -4, -8 }, { 4, -8 }, { 8, 0 }, { 4, 8 }, { -4, 8 }, { -8, 0 } },
+ { { -8, -16 }, { 8, -16 }, { 16, 0 }, { 8, 16 }, { -8, 16 }, { -16, 0 } },
+ { { -16, -32 },
+ { 16, -32 },
+ { 32, 0 },
+ { 16, 32 },
+ { -16, 32 },
+ { -32, 0 } },
+ { { -32, -64 },
+ { 32, -64 },
+ { 64, 0 },
+ { 32, 64 },
+ { -32, 64 },
+ { -64, 0 } },
+ { { -64, -128 },
+ { 64, -128 },
+ { 128, 0 },
+ { 64, 128 },
+ { -64, 128 },
+ { -128, 0 } },
+ { { -128, -256 },
+ { 128, -256 },
+ { 256, 0 },
+ { 128, 256 },
+ { -128, 256 },
+ { -256, 0 } },
+ { { -256, -512 },
+ { 256, -512 },
+ { 512, 0 },
+ { 256, 512 },
+ { -256, 512 },
+ { -512, 0 } },
+ { { -512, -1024 },
+ { 512, -1024 },
+ { 1024, 0 },
+ { 512, 1024 },
+ { -512, 1024 },
+ { -1024, 0 } },
};
- return vp10_pattern_search(x, ref_mv, search_param, sad_per_bit,
- do_init_search, cost_list, vfp, use_mvcost,
- center_mv, best_mv,
- hex_num_candidates, hex_candidates);
+ return vp10_pattern_search(
+ x, ref_mv, search_param, sad_per_bit, do_init_search, cost_list, vfp,
+ use_mvcost, center_mv, best_mv, hex_num_candidates, hex_candidates);
}
-int vp10_bigdia_search(const MACROBLOCK *x,
- MV *ref_mv,
- int search_param,
- int sad_per_bit,
- int do_init_search,
- int *cost_list,
- const vpx_variance_fn_ptr_t *vfp,
- int use_mvcost,
- const MV *center_mv,
- MV *best_mv) {
+int vp10_bigdia_search(const MACROBLOCK *x, MV *ref_mv, int search_param,
+ int sad_per_bit, int do_init_search, int *cost_list,
+ const vpx_variance_fn_ptr_t *vfp, int use_mvcost,
+ const MV *center_mv, MV *best_mv) {
// First scale has 4-closest points, the rest have 8 points in diamond
// shape at increasing scales
static const int bigdia_num_candidates[MAX_PATTERN_SCALES] = {
4, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
};
// Note that the largest candidate step at each scale is 2^scale
- static const MV bigdia_candidates[MAX_PATTERN_SCALES]
- [MAX_PATTERN_CANDIDATES] = {
- {{0, -1}, {1, 0}, { 0, 1}, {-1, 0}},
- {{-1, -1}, {0, -2}, {1, -1}, {2, 0}, {1, 1}, {0, 2}, {-1, 1}, {-2, 0}},
- {{-2, -2}, {0, -4}, {2, -2}, {4, 0}, {2, 2}, {0, 4}, {-2, 2}, {-4, 0}},
- {{-4, -4}, {0, -8}, {4, -4}, {8, 0}, {4, 4}, {0, 8}, {-4, 4}, {-8, 0}},
- {{-8, -8}, {0, -16}, {8, -8}, {16, 0}, {8, 8}, {0, 16}, {-8, 8}, {-16, 0}},
- {{-16, -16}, {0, -32}, {16, -16}, {32, 0}, {16, 16}, {0, 32},
- {-16, 16}, {-32, 0}},
- {{-32, -32}, {0, -64}, {32, -32}, {64, 0}, {32, 32}, {0, 64},
- {-32, 32}, {-64, 0}},
- {{-64, -64}, {0, -128}, {64, -64}, {128, 0}, {64, 64}, {0, 128},
- {-64, 64}, {-128, 0}},
- {{-128, -128}, {0, -256}, {128, -128}, {256, 0}, {128, 128}, {0, 256},
- {-128, 128}, {-256, 0}},
- {{-256, -256}, {0, -512}, {256, -256}, {512, 0}, {256, 256}, {0, 512},
- {-256, 256}, {-512, 0}},
- {{-512, -512}, {0, -1024}, {512, -512}, {1024, 0}, {512, 512}, {0, 1024},
- {-512, 512}, {-1024, 0}},
- };
- return vp10_pattern_search_sad(x, ref_mv, search_param, sad_per_bit,
- do_init_search, cost_list, vfp, use_mvcost,
- center_mv, best_mv,
- bigdia_num_candidates, bigdia_candidates);
+ static const MV
+ bigdia_candidates[MAX_PATTERN_SCALES][MAX_PATTERN_CANDIDATES] = {
+ { { 0, -1 }, { 1, 0 }, { 0, 1 }, { -1, 0 } },
+ { { -1, -1 },
+ { 0, -2 },
+ { 1, -1 },
+ { 2, 0 },
+ { 1, 1 },
+ { 0, 2 },
+ { -1, 1 },
+ { -2, 0 } },
+ { { -2, -2 },
+ { 0, -4 },
+ { 2, -2 },
+ { 4, 0 },
+ { 2, 2 },
+ { 0, 4 },
+ { -2, 2 },
+ { -4, 0 } },
+ { { -4, -4 },
+ { 0, -8 },
+ { 4, -4 },
+ { 8, 0 },
+ { 4, 4 },
+ { 0, 8 },
+ { -4, 4 },
+ { -8, 0 } },
+ { { -8, -8 },
+ { 0, -16 },
+ { 8, -8 },
+ { 16, 0 },
+ { 8, 8 },
+ { 0, 16 },
+ { -8, 8 },
+ { -16, 0 } },
+ { { -16, -16 },
+ { 0, -32 },
+ { 16, -16 },
+ { 32, 0 },
+ { 16, 16 },
+ { 0, 32 },
+ { -16, 16 },
+ { -32, 0 } },
+ { { -32, -32 },
+ { 0, -64 },
+ { 32, -32 },
+ { 64, 0 },
+ { 32, 32 },
+ { 0, 64 },
+ { -32, 32 },
+ { -64, 0 } },
+ { { -64, -64 },
+ { 0, -128 },
+ { 64, -64 },
+ { 128, 0 },
+ { 64, 64 },
+ { 0, 128 },
+ { -64, 64 },
+ { -128, 0 } },
+ { { -128, -128 },
+ { 0, -256 },
+ { 128, -128 },
+ { 256, 0 },
+ { 128, 128 },
+ { 0, 256 },
+ { -128, 128 },
+ { -256, 0 } },
+ { { -256, -256 },
+ { 0, -512 },
+ { 256, -256 },
+ { 512, 0 },
+ { 256, 256 },
+ { 0, 512 },
+ { -256, 256 },
+ { -512, 0 } },
+ { { -512, -512 },
+ { 0, -1024 },
+ { 512, -512 },
+ { 1024, 0 },
+ { 512, 512 },
+ { 0, 1024 },
+ { -512, 512 },
+ { -1024, 0 } },
+ };
+ return vp10_pattern_search_sad(
+ x, ref_mv, search_param, sad_per_bit, do_init_search, cost_list, vfp,
+ use_mvcost, center_mv, best_mv, bigdia_num_candidates, bigdia_candidates);
}
-int vp10_square_search(const MACROBLOCK *x,
- MV *ref_mv,
- int search_param,
- int sad_per_bit,
- int do_init_search,
- int *cost_list,
- const vpx_variance_fn_ptr_t *vfp,
- int use_mvcost,
- const MV *center_mv,
- MV *best_mv) {
+int vp10_square_search(const MACROBLOCK *x, MV *ref_mv, int search_param,
+ int sad_per_bit, int do_init_search, int *cost_list,
+ const vpx_variance_fn_ptr_t *vfp, int use_mvcost,
+ const MV *center_mv, MV *best_mv) {
// All scales have 8 closest points in square shape
static const int square_num_candidates[MAX_PATTERN_SCALES] = {
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
};
// Note that the largest candidate step at each scale is 2^scale
- static const MV square_candidates[MAX_PATTERN_SCALES]
- [MAX_PATTERN_CANDIDATES] = {
- {{-1, -1}, {0, -1}, {1, -1}, {1, 0}, {1, 1}, {0, 1}, {-1, 1}, {-1, 0}},
- {{-2, -2}, {0, -2}, {2, -2}, {2, 0}, {2, 2}, {0, 2}, {-2, 2}, {-2, 0}},
- {{-4, -4}, {0, -4}, {4, -4}, {4, 0}, {4, 4}, {0, 4}, {-4, 4}, {-4, 0}},
- {{-8, -8}, {0, -8}, {8, -8}, {8, 0}, {8, 8}, {0, 8}, {-8, 8}, {-8, 0}},
- {{-16, -16}, {0, -16}, {16, -16}, {16, 0}, {16, 16}, {0, 16},
- {-16, 16}, {-16, 0}},
- {{-32, -32}, {0, -32}, {32, -32}, {32, 0}, {32, 32}, {0, 32},
- {-32, 32}, {-32, 0}},
- {{-64, -64}, {0, -64}, {64, -64}, {64, 0}, {64, 64}, {0, 64},
- {-64, 64}, {-64, 0}},
- {{-128, -128}, {0, -128}, {128, -128}, {128, 0}, {128, 128}, {0, 128},
- {-128, 128}, {-128, 0}},
- {{-256, -256}, {0, -256}, {256, -256}, {256, 0}, {256, 256}, {0, 256},
- {-256, 256}, {-256, 0}},
- {{-512, -512}, {0, -512}, {512, -512}, {512, 0}, {512, 512}, {0, 512},
- {-512, 512}, {-512, 0}},
- {{-1024, -1024}, {0, -1024}, {1024, -1024}, {1024, 0}, {1024, 1024},
- {0, 1024}, {-1024, 1024}, {-1024, 0}},
- };
- return vp10_pattern_search(x, ref_mv, search_param, sad_per_bit,
- do_init_search, cost_list, vfp, use_mvcost,
- center_mv, best_mv,
- square_num_candidates, square_candidates);
+ static const MV
+ square_candidates[MAX_PATTERN_SCALES][MAX_PATTERN_CANDIDATES] = {
+ { { -1, -1 },
+ { 0, -1 },
+ { 1, -1 },
+ { 1, 0 },
+ { 1, 1 },
+ { 0, 1 },
+ { -1, 1 },
+ { -1, 0 } },
+ { { -2, -2 },
+ { 0, -2 },
+ { 2, -2 },
+ { 2, 0 },
+ { 2, 2 },
+ { 0, 2 },
+ { -2, 2 },
+ { -2, 0 } },
+ { { -4, -4 },
+ { 0, -4 },
+ { 4, -4 },
+ { 4, 0 },
+ { 4, 4 },
+ { 0, 4 },
+ { -4, 4 },
+ { -4, 0 } },
+ { { -8, -8 },
+ { 0, -8 },
+ { 8, -8 },
+ { 8, 0 },
+ { 8, 8 },
+ { 0, 8 },
+ { -8, 8 },
+ { -8, 0 } },
+ { { -16, -16 },
+ { 0, -16 },
+ { 16, -16 },
+ { 16, 0 },
+ { 16, 16 },
+ { 0, 16 },
+ { -16, 16 },
+ { -16, 0 } },
+ { { -32, -32 },
+ { 0, -32 },
+ { 32, -32 },
+ { 32, 0 },
+ { 32, 32 },
+ { 0, 32 },
+ { -32, 32 },
+ { -32, 0 } },
+ { { -64, -64 },
+ { 0, -64 },
+ { 64, -64 },
+ { 64, 0 },
+ { 64, 64 },
+ { 0, 64 },
+ { -64, 64 },
+ { -64, 0 } },
+ { { -128, -128 },
+ { 0, -128 },
+ { 128, -128 },
+ { 128, 0 },
+ { 128, 128 },
+ { 0, 128 },
+ { -128, 128 },
+ { -128, 0 } },
+ { { -256, -256 },
+ { 0, -256 },
+ { 256, -256 },
+ { 256, 0 },
+ { 256, 256 },
+ { 0, 256 },
+ { -256, 256 },
+ { -256, 0 } },
+ { { -512, -512 },
+ { 0, -512 },
+ { 512, -512 },
+ { 512, 0 },
+ { 512, 512 },
+ { 0, 512 },
+ { -512, 512 },
+ { -512, 0 } },
+ { { -1024, -1024 },
+ { 0, -1024 },
+ { 1024, -1024 },
+ { 1024, 0 },
+ { 1024, 1024 },
+ { 0, 1024 },
+ { -1024, 1024 },
+ { -1024, 0 } },
+ };
+ return vp10_pattern_search(
+ x, ref_mv, search_param, sad_per_bit, do_init_search, cost_list, vfp,
+ use_mvcost, center_mv, best_mv, square_num_candidates, square_candidates);
}
-int vp10_fast_hex_search(const MACROBLOCK *x,
- MV *ref_mv,
- int search_param,
- int sad_per_bit,
- int do_init_search, // must be zero for fast_hex
- int *cost_list,
- const vpx_variance_fn_ptr_t *vfp,
- int use_mvcost,
- const MV *center_mv,
- MV *best_mv) {
+int vp10_fast_hex_search(const MACROBLOCK *x, MV *ref_mv, int search_param,
+ int sad_per_bit,
+ int do_init_search, // must be zero for fast_hex
+ int *cost_list, const vpx_variance_fn_ptr_t *vfp,
+ int use_mvcost, const MV *center_mv, MV *best_mv) {
return vp10_hex_search(
x, ref_mv, VPXMAX(MAX_MVSEARCH_STEPS - 2, search_param), sad_per_bit,
do_init_search, cost_list, vfp, use_mvcost, center_mv, best_mv);
}
-int vp10_fast_dia_search(const MACROBLOCK *x,
- MV *ref_mv,
- int search_param,
- int sad_per_bit,
- int do_init_search,
- int *cost_list,
- const vpx_variance_fn_ptr_t *vfp,
- int use_mvcost,
- const MV *center_mv,
- MV *best_mv) {
+int vp10_fast_dia_search(const MACROBLOCK *x, MV *ref_mv, int search_param,
+ int sad_per_bit, int do_init_search, int *cost_list,
+ const vpx_variance_fn_ptr_t *vfp, int use_mvcost,
+ const MV *center_mv, MV *best_mv) {
return vp10_bigdia_search(
x, ref_mv, VPXMAX(MAX_MVSEARCH_STEPS - 2, search_param), sad_per_bit,
do_init_search, cost_list, vfp, use_mvcost, center_mv, best_mv);
@@ -1525,15 +1563,14 @@
// Exhuastive motion search around a given centre position with a given
// step size.
-static int exhuastive_mesh_search(const MACROBLOCK *x,
- MV *ref_mv, MV *best_mv,
+static int exhuastive_mesh_search(const MACROBLOCK *x, MV *ref_mv, MV *best_mv,
int range, int step, int sad_per_bit,
const vpx_variance_fn_ptr_t *fn_ptr,
const MV *center_mv) {
const MACROBLOCKD *const xd = &x->e_mbd;
const struct buf_2d *const what = &x->plane[0].src;
const struct buf_2d *const in_what = &xd->plane[0].pre[0];
- MV fcenter_mv = {center_mv->row, center_mv->col};
+ MV fcenter_mv = { center_mv->row, center_mv->col };
unsigned int best_sad = INT_MAX;
int r, c, i;
int start_col, end_col, start_row, end_row;
@@ -1541,12 +1578,13 @@
assert(step >= 1);
- clamp_mv(&fcenter_mv, x->mv_col_min, x->mv_col_max,
- x->mv_row_min, x->mv_row_max);
+ clamp_mv(&fcenter_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min,
+ x->mv_row_max);
*best_mv = fcenter_mv;
- best_sad = fn_ptr->sdf(what->buf, what->stride,
- get_buf_from_mv(in_what, &fcenter_mv), in_what->stride) +
- mvsad_err_cost(x, &fcenter_mv, ref_mv, sad_per_bit);
+ best_sad =
+ fn_ptr->sdf(what->buf, what->stride,
+ get_buf_from_mv(in_what, &fcenter_mv), in_what->stride) +
+ mvsad_err_cost(x, &fcenter_mv, ref_mv, sad_per_bit);
start_row = VPXMAX(-range, x->mv_row_min - fcenter_mv.row);
start_col = VPXMAX(-range, x->mv_col_min - fcenter_mv.col);
end_row = VPXMIN(range, x->mv_row_max - fcenter_mv.row);
@@ -1556,9 +1594,10 @@
for (c = start_col; c <= end_col; c += col_step) {
// Step > 1 means we are not checking every location in this pass.
if (step > 1) {
- const MV mv = {fcenter_mv.row + r, fcenter_mv.col + c};
- unsigned int sad = fn_ptr->sdf(what->buf, what->stride,
- get_buf_from_mv(in_what, &mv), in_what->stride);
+ const MV mv = { fcenter_mv.row + r, fcenter_mv.col + c };
+ unsigned int sad =
+ fn_ptr->sdf(what->buf, what->stride, get_buf_from_mv(in_what, &mv),
+ in_what->stride);
if (sad < best_sad) {
sad += mvsad_err_cost(x, &mv, ref_mv, sad_per_bit);
if (sad < best_sad) {
@@ -1572,17 +1611,16 @@
unsigned int sads[4];
const uint8_t *addrs[4];
for (i = 0; i < 4; ++i) {
- const MV mv = {fcenter_mv.row + r, fcenter_mv.col + c + i};
+ const MV mv = { fcenter_mv.row + r, fcenter_mv.col + c + i };
addrs[i] = get_buf_from_mv(in_what, &mv);
}
- fn_ptr->sdx4df(what->buf, what->stride, addrs,
- in_what->stride, sads);
+ fn_ptr->sdx4df(what->buf, what->stride, addrs, in_what->stride, sads);
for (i = 0; i < 4; ++i) {
if (sads[i] < best_sad) {
- const MV mv = {fcenter_mv.row + r, fcenter_mv.col + c + i};
- const unsigned int sad = sads[i] +
- mvsad_err_cost(x, &mv, ref_mv, sad_per_bit);
+ const MV mv = { fcenter_mv.row + r, fcenter_mv.col + c + i };
+ const unsigned int sad =
+ sads[i] + mvsad_err_cost(x, &mv, ref_mv, sad_per_bit);
if (sad < best_sad) {
best_sad = sad;
*best_mv = mv;
@@ -1591,9 +1629,10 @@
}
} else {
for (i = 0; i < end_col - c; ++i) {
- const MV mv = {fcenter_mv.row + r, fcenter_mv.col + c + i};
- unsigned int sad = fn_ptr->sdf(what->buf, what->stride,
- get_buf_from_mv(in_what, &mv), in_what->stride);
+ const MV mv = { fcenter_mv.row + r, fcenter_mv.col + c + i };
+ unsigned int sad =
+ fn_ptr->sdf(what->buf, what->stride,
+ get_buf_from_mv(in_what, &mv), in_what->stride);
if (sad < best_sad) {
sad += mvsad_err_cost(x, &mv, ref_mv, sad_per_bit);
if (sad < best_sad) {
@@ -1611,11 +1650,10 @@
}
int vp10_diamond_search_sad_c(const MACROBLOCK *x,
- const search_site_config *cfg,
- MV *ref_mv, MV *best_mv, int search_param,
- int sad_per_bit, int *num00,
- const vpx_variance_fn_ptr_t *fn_ptr,
- const MV *center_mv) {
+ const search_site_config *cfg, MV *ref_mv,
+ MV *best_mv, int search_param, int sad_per_bit,
+ int *num00, const vpx_variance_fn_ptr_t *fn_ptr,
+ const MV *center_mv) {
int i, j, step;
const MACROBLOCKD *const xd = &x->e_mbd;
@@ -1640,7 +1678,7 @@
const search_site *ss = &cfg->ss[search_param * cfg->searches_per_step];
const int tot_steps = (cfg->ss_count / cfg->searches_per_step) - search_param;
- const MV fcenter_mv = {center_mv->row >> 3, center_mv->col >> 3};
+ const MV fcenter_mv = { center_mv->row >> 3, center_mv->col >> 3 };
clamp_mv(ref_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max);
ref_row = ref_mv->row;
ref_col = ref_mv->col;
@@ -1653,8 +1691,8 @@
best_address = in_what;
// Check the starting position
- bestsad = fn_ptr->sdf(what, what_stride, in_what, in_what_stride)
- + mvsad_err_cost(x, best_mv, &fcenter_mv, sad_per_bit);
+ bestsad = fn_ptr->sdf(what, what_stride, in_what, in_what_stride) +
+ mvsad_err_cost(x, best_mv, &fcenter_mv, sad_per_bit);
i = 1;
@@ -1685,10 +1723,10 @@
for (t = 0; t < 4; t++, i++) {
if (sad_array[t] < bestsad) {
- const MV this_mv = {best_mv->row + ss[i].mv.row,
- best_mv->col + ss[i].mv.col};
- sad_array[t] += mvsad_err_cost(x, &this_mv, &fcenter_mv,
- sad_per_bit);
+ const MV this_mv = { best_mv->row + ss[i].mv.row,
+ best_mv->col + ss[i].mv.col };
+ sad_array[t] +=
+ mvsad_err_cost(x, &this_mv, &fcenter_mv, sad_per_bit);
if (sad_array[t] < bestsad) {
bestsad = sad_array[t];
best_site = i;
@@ -1699,13 +1737,13 @@
} else {
for (j = 0; j < cfg->searches_per_step; j++) {
// Trap illegal vectors
- const MV this_mv = {best_mv->row + ss[i].mv.row,
- best_mv->col + ss[i].mv.col};
+ const MV this_mv = { best_mv->row + ss[i].mv.row,
+ best_mv->col + ss[i].mv.col };
if (is_mv_in(x, &this_mv)) {
const uint8_t *const check_here = ss[i].offset + best_address;
- unsigned int thissad = fn_ptr->sdf(what, what_stride, check_here,
- in_what_stride);
+ unsigned int thissad =
+ fn_ptr->sdf(what, what_stride, check_here, in_what_stride);
if (thissad < bestsad) {
thissad += mvsad_err_cost(x, &this_mv, &fcenter_mv, sad_per_bit);
@@ -1725,12 +1763,12 @@
last_site = best_site;
#if defined(NEW_DIAMOND_SEARCH)
while (1) {
- const MV this_mv = {best_mv->row + ss[best_site].mv.row,
- best_mv->col + ss[best_site].mv.col};
+ const MV this_mv = { best_mv->row + ss[best_site].mv.row,
+ best_mv->col + ss[best_site].mv.col };
if (is_mv_in(x, &this_mv)) {
const uint8_t *const check_here = ss[best_site].offset + best_address;
- unsigned int thissad = fn_ptr->sdf(what, what_stride, check_here,
- in_what_stride);
+ unsigned int thissad =
+ fn_ptr->sdf(what, what_stride, check_here, in_what_stride);
if (thissad < bestsad) {
thissad += mvsad_err_cost(x, &this_mv, &fcenter_mv, sad_per_bit);
if (thissad < bestsad) {
@@ -1770,8 +1808,7 @@
for (d = -8; d <= 8; d += 16) {
int this_pos = offset + d;
// check limit
- if (this_pos < 0 || this_pos > bw)
- continue;
+ if (this_pos < 0 || this_pos > bw) continue;
this_sad = vpx_vector_var(&ref[this_pos], src, bwl);
if (this_sad < best_sad) {
best_sad = this_sad;
@@ -1783,8 +1820,7 @@
for (d = -4; d <= 4; d += 8) {
int this_pos = offset + d;
// check limit
- if (this_pos < 0 || this_pos > bw)
- continue;
+ if (this_pos < 0 || this_pos > bw) continue;
this_sad = vpx_vector_var(&ref[this_pos], src, bwl);
if (this_sad < best_sad) {
best_sad = this_sad;
@@ -1796,8 +1832,7 @@
for (d = -2; d <= 2; d += 4) {
int this_pos = offset + d;
// check limit
- if (this_pos < 0 || this_pos > bw)
- continue;
+ if (this_pos < 0 || this_pos > bw) continue;
this_sad = vpx_vector_var(&ref[this_pos], src, bwl);
if (this_sad < best_sad) {
best_sad = this_sad;
@@ -1809,8 +1844,7 @@
for (d = -1; d <= 1; d += 2) {
int this_pos = offset + d;
// check limit
- if (this_pos < 0 || this_pos > bw)
- continue;
+ if (this_pos < 0 || this_pos > bw) continue;
this_sad = vpx_vector_var(&ref[this_pos], src, bwl);
if (this_sad < best_sad) {
best_sad = this_sad;
@@ -1822,15 +1856,15 @@
}
static const MV search_pos[4] = {
- {-1, 0}, {0, -1}, {0, 1}, {1, 0},
+ { -1, 0 }, { 0, -1 }, { 0, 1 }, { 1, 0 },
};
unsigned int vp10_int_pro_motion_estimation(const VP10_COMP *cpi, MACROBLOCK *x,
- BLOCK_SIZE bsize,
- int mi_row, int mi_col) {
+ BLOCK_SIZE bsize, int mi_row,
+ int mi_col) {
MACROBLOCKD *xd = &x->e_mbd;
MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
- struct buf_2d backup_yv12[MAX_MB_PLANE] = {{0, 0}};
+ struct buf_2d backup_yv12[MAX_MB_PLANE] = { { 0, 0 } };
DECLARE_ALIGNED(16, int16_t, hbuf[128]);
DECLARE_ALIGNED(16, int16_t, vbuf[128]);
DECLARE_ALIGNED(16, int16_t, src_hbuf[64]);
@@ -1855,8 +1889,7 @@
// Swap out the reference frame for a version that's been scaled to
// match the resolution of the current frame, allowing the existing
// motion search code to be used without additional modifications.
- for (i = 0; i < MAX_MB_PLANE; i++)
- backup_yv12[i] = xd->plane[i].pre[0];
+ for (i = 0; i < MAX_MB_PLANE; i++) backup_yv12[i] = xd->plane[i].pre[0];
vp10_setup_pre_planes(xd, 0, scaled_ref_frame, mi_row, mi_col, NULL);
}
@@ -1870,8 +1903,7 @@
if (scaled_ref_frame) {
int i;
- for (i = 0; i < MAX_MB_PLANE; i++)
- xd->plane[i].pre[0] = backup_yv12[i];
+ for (i = 0; i < MAX_MB_PLANE; i++) xd->plane[i].pre[0] = backup_yv12[i];
}
return this_sad;
}
@@ -1912,11 +1944,8 @@
best_sad = cpi->fn_ptr[bsize].sdf(src_buf, src_stride, ref_buf, ref_stride);
{
- const uint8_t * const pos[4] = {
- ref_buf - ref_stride,
- ref_buf - 1,
- ref_buf + 1,
- ref_buf + ref_stride,
+ const uint8_t *const pos[4] = {
+ ref_buf - ref_stride, ref_buf - 1, ref_buf + 1, ref_buf + ref_stride,
};
cpi->fn_ptr[bsize].sdx4df(src_buf, src_stride, pos, ref_stride, this_sad);
@@ -1942,8 +1971,7 @@
ref_buf = xd->plane[0].pre[0].buf + this_mv.row * ref_stride + this_mv.col;
- tmp_sad = cpi->fn_ptr[bsize].sdf(src_buf, src_stride,
- ref_buf, ref_stride);
+ tmp_sad = cpi->fn_ptr[bsize].sdf(src_buf, src_stride, ref_buf, ref_stride);
if (best_sad > tmp_sad) {
*tmp_mv = this_mv;
best_sad = tmp_sad;
@@ -1954,8 +1982,7 @@
if (scaled_ref_frame) {
int i;
- for (i = 0; i < MAX_MB_PLANE; i++)
- xd->plane[i].pre[0] = backup_yv12[i];
+ for (i = 0; i < MAX_MB_PLANE; i++) xd->plane[i].pre[0] = backup_yv12[i];
}
return best_sad;
@@ -1964,25 +1991,22 @@
/* do_refine: If last step (1-away) of n-step search doesn't pick the center
point as the best match, we will do a final 1-away diamond
refining search */
-int vp10_full_pixel_diamond(const VP10_COMP *cpi, MACROBLOCK *x,
- MV *mvp_full, int step_param,
- int sadpb, int further_steps, int do_refine,
- int *cost_list,
- const vpx_variance_fn_ptr_t *fn_ptr,
- const MV *ref_mv, MV *dst_mv) {
+int vp10_full_pixel_diamond(const VP10_COMP *cpi, MACROBLOCK *x, MV *mvp_full,
+ int step_param, int sadpb, int further_steps,
+ int do_refine, int *cost_list,
+ const vpx_variance_fn_ptr_t *fn_ptr,
+ const MV *ref_mv, MV *dst_mv) {
MV temp_mv;
int thissme, n, num00 = 0;
int bestsme = cpi->diamond_search_sad(x, &cpi->ss_cfg, mvp_full, &temp_mv,
- step_param, sadpb, &n,
- fn_ptr, ref_mv);
+ step_param, sadpb, &n, fn_ptr, ref_mv);
if (bestsme < INT_MAX)
bestsme = vp10_get_mvpred_var(x, &temp_mv, ref_mv, fn_ptr, 1);
*dst_mv = temp_mv;
// If there won't be more n-step search, check to see if refining search is
// needed.
- if (n > further_steps)
- do_refine = 0;
+ if (n > further_steps) do_refine = 0;
while (n < further_steps) {
++n;
@@ -1991,14 +2015,13 @@
num00--;
} else {
thissme = cpi->diamond_search_sad(x, &cpi->ss_cfg, mvp_full, &temp_mv,
- step_param + n, sadpb, &num00,
- fn_ptr, ref_mv);
+ step_param + n, sadpb, &num00, fn_ptr,
+ ref_mv);
if (thissme < INT_MAX)
thissme = vp10_get_mvpred_var(x, &temp_mv, ref_mv, fn_ptr, 1);
// check to see if refining search is needed.
- if (num00 > further_steps - n)
- do_refine = 0;
+ if (num00 > further_steps - n) do_refine = 0;
if (thissme < bestsme) {
bestsme = thissme;
@@ -2011,8 +2034,8 @@
if (do_refine) {
const int search_range = 8;
MV best_mv = *dst_mv;
- thissme = vp10_refining_search_sad(x, &best_mv, sadpb, search_range,
- fn_ptr, ref_mv);
+ thissme = vp10_refining_search_sad(x, &best_mv, sadpb, search_range, fn_ptr,
+ ref_mv);
if (thissme < INT_MAX)
thissme = vp10_get_mvpred_var(x, &best_mv, ref_mv, fn_ptr, 1);
if (thissme < bestsme) {
@@ -2034,12 +2057,12 @@
// Runs an limited range exhaustive mesh search using a pattern set
// according to the encode speed profile.
static int full_pixel_exhaustive(VP10_COMP *cpi, MACROBLOCK *x,
- MV *centre_mv_full, int sadpb, int *cost_list,
+ MV *centre_mv_full, int sadpb, int *cost_list,
const vpx_variance_fn_ptr_t *fn_ptr,
const MV *ref_mv, MV *dst_mv) {
const SPEED_FEATURES *const sf = &cpi->sf;
- MV temp_mv = {centre_mv_full->row, centre_mv_full->col};
- MV f_ref_mv = {ref_mv->row >> 3, ref_mv->col >> 3};
+ MV temp_mv = { centre_mv_full->row, centre_mv_full->col };
+ MV f_ref_mv = { ref_mv->row >> 3, ref_mv->col >> 3 };
int bestsme;
int i;
int interval = sf->mesh_patterns[0].interval;
@@ -2050,8 +2073,8 @@
++(*x->ex_search_count_ptr);
// Trap illegal values for interval and range for this function.
- if ((range < MIN_RANGE) || (range > MAX_RANGE) ||
- (interval < MIN_INTERVAL) || (interval > range))
+ if ((range < MIN_RANGE) || (range > MAX_RANGE) || (interval < MIN_INTERVAL) ||
+ (interval > range))
return INT_MAX;
baseline_interval_divisor = range / interval;
@@ -2063,21 +2086,19 @@
interval = VPXMAX(interval, range / baseline_interval_divisor);
// initial search
- bestsme = exhuastive_mesh_search(x, &f_ref_mv, &temp_mv, range,
- interval, sadpb, fn_ptr, &temp_mv);
+ bestsme = exhuastive_mesh_search(x, &f_ref_mv, &temp_mv, range, interval,
+ sadpb, fn_ptr, &temp_mv);
if ((interval > MIN_INTERVAL) && (range > MIN_RANGE)) {
// Progressive searches with range and step size decreasing each time
// till we reach a step size of 1. Then break out.
for (i = 1; i < MAX_MESH_STEP; ++i) {
// First pass with coarser step and longer range
- bestsme = exhuastive_mesh_search(x, &f_ref_mv, &temp_mv,
- sf->mesh_patterns[i].range,
- sf->mesh_patterns[i].interval,
- sadpb, fn_ptr, &temp_mv);
+ bestsme = exhuastive_mesh_search(
+ x, &f_ref_mv, &temp_mv, sf->mesh_patterns[i].range,
+ sf->mesh_patterns[i].interval, sadpb, fn_ptr, &temp_mv);
- if (sf->mesh_patterns[i].interval == 1)
- break;
+ if (sf->mesh_patterns[i].interval == 1) break;
}
}
@@ -2093,9 +2114,9 @@
}
int vp10_full_search_sad_c(const MACROBLOCK *x, const MV *ref_mv,
- int sad_per_bit, int distance,
- const vpx_variance_fn_ptr_t *fn_ptr,
- const MV *center_mv, MV *best_mv) {
+ int sad_per_bit, int distance,
+ const vpx_variance_fn_ptr_t *fn_ptr,
+ const MV *center_mv, MV *best_mv) {
int r, c;
const MACROBLOCKD *const xd = &x->e_mbd;
const struct buf_2d *const what = &x->plane[0].src;
@@ -2104,18 +2125,20 @@
const int row_max = VPXMIN(ref_mv->row + distance, x->mv_row_max);
const int col_min = VPXMAX(ref_mv->col - distance, x->mv_col_min);
const int col_max = VPXMIN(ref_mv->col + distance, x->mv_col_max);
- const MV fcenter_mv = {center_mv->row >> 3, center_mv->col >> 3};
- int best_sad = fn_ptr->sdf(what->buf, what->stride,
- get_buf_from_mv(in_what, ref_mv), in_what->stride) +
+ const MV fcenter_mv = { center_mv->row >> 3, center_mv->col >> 3 };
+ int best_sad =
+ fn_ptr->sdf(what->buf, what->stride, get_buf_from_mv(in_what, ref_mv),
+ in_what->stride) +
mvsad_err_cost(x, ref_mv, &fcenter_mv, sad_per_bit);
*best_mv = *ref_mv;
for (r = row_min; r < row_max; ++r) {
for (c = col_min; c < col_max; ++c) {
- const MV mv = {r, c};
- const int sad = fn_ptr->sdf(what->buf, what->stride,
- get_buf_from_mv(in_what, &mv), in_what->stride) +
- mvsad_err_cost(x, &mv, &fcenter_mv, sad_per_bit);
+ const MV mv = { r, c };
+ const int sad =
+ fn_ptr->sdf(what->buf, what->stride, get_buf_from_mv(in_what, &mv),
+ in_what->stride) +
+ mvsad_err_cost(x, &mv, &fcenter_mv, sad_per_bit);
if (sad < best_sad) {
best_sad = sad;
*best_mv = mv;
@@ -2126,9 +2149,9 @@
}
int vp10_full_search_sadx3(const MACROBLOCK *x, const MV *ref_mv,
- int sad_per_bit, int distance,
- const vpx_variance_fn_ptr_t *fn_ptr,
- const MV *center_mv, MV *best_mv) {
+ int sad_per_bit, int distance,
+ const vpx_variance_fn_ptr_t *fn_ptr,
+ const MV *center_mv, MV *best_mv) {
int r;
const MACROBLOCKD *const xd = &x->e_mbd;
const struct buf_2d *const what = &x->plane[0].src;
@@ -2137,9 +2160,10 @@
const int row_max = VPXMIN(ref_mv->row + distance, x->mv_row_max);
const int col_min = VPXMAX(ref_mv->col - distance, x->mv_col_min);
const int col_max = VPXMIN(ref_mv->col + distance, x->mv_col_max);
- const MV fcenter_mv = {center_mv->row >> 3, center_mv->col >> 3};
- unsigned int best_sad = fn_ptr->sdf(what->buf, what->stride,
- get_buf_from_mv(in_what, ref_mv), in_what->stride) +
+ const MV fcenter_mv = { center_mv->row >> 3, center_mv->col >> 3 };
+ unsigned int best_sad =
+ fn_ptr->sdf(what->buf, what->stride, get_buf_from_mv(in_what, ref_mv),
+ in_what->stride) +
mvsad_err_cost(x, ref_mv, &fcenter_mv, sad_per_bit);
*best_mv = *ref_mv;
@@ -2158,7 +2182,7 @@
for (i = 0; i < 3; ++i) {
unsigned int sad = sads[i];
if (sad < best_sad) {
- const MV mv = {r, c};
+ const MV mv = { r, c };
sad += mvsad_err_cost(x, &mv, &fcenter_mv, sad_per_bit);
if (sad < best_sad) {
best_sad = sad;
@@ -2172,10 +2196,10 @@
}
while (c < col_max) {
- unsigned int sad = fn_ptr->sdf(what->buf, what->stride,
- check_here, in_what->stride);
+ unsigned int sad =
+ fn_ptr->sdf(what->buf, what->stride, check_here, in_what->stride);
if (sad < best_sad) {
- const MV mv = {r, c};
+ const MV mv = { r, c };
sad += mvsad_err_cost(x, &mv, &fcenter_mv, sad_per_bit);
if (sad < best_sad) {
best_sad = sad;
@@ -2191,9 +2215,9 @@
}
int vp10_full_search_sadx8(const MACROBLOCK *x, const MV *ref_mv,
- int sad_per_bit, int distance,
- const vpx_variance_fn_ptr_t *fn_ptr,
- const MV *center_mv, MV *best_mv) {
+ int sad_per_bit, int distance,
+ const vpx_variance_fn_ptr_t *fn_ptr,
+ const MV *center_mv, MV *best_mv) {
int r;
const MACROBLOCKD *const xd = &x->e_mbd;
const struct buf_2d *const what = &x->plane[0].src;
@@ -2202,9 +2226,10 @@
const int row_max = VPXMIN(ref_mv->row + distance, x->mv_row_max);
const int col_min = VPXMAX(ref_mv->col - distance, x->mv_col_min);
const int col_max = VPXMIN(ref_mv->col + distance, x->mv_col_max);
- const MV fcenter_mv = {center_mv->row >> 3, center_mv->col >> 3};
- unsigned int best_sad = fn_ptr->sdf(what->buf, what->stride,
- get_buf_from_mv(in_what, ref_mv), in_what->stride) +
+ const MV fcenter_mv = { center_mv->row >> 3, center_mv->col >> 3 };
+ unsigned int best_sad =
+ fn_ptr->sdf(what->buf, what->stride, get_buf_from_mv(in_what, ref_mv),
+ in_what->stride) +
mvsad_err_cost(x, ref_mv, &fcenter_mv, sad_per_bit);
*best_mv = *ref_mv;
@@ -2223,7 +2248,7 @@
for (i = 0; i < 8; ++i) {
unsigned int sad = sads[i];
if (sad < best_sad) {
- const MV mv = {r, c};
+ const MV mv = { r, c };
sad += mvsad_err_cost(x, &mv, &fcenter_mv, sad_per_bit);
if (sad < best_sad) {
best_sad = sad;
@@ -2247,7 +2272,7 @@
for (i = 0; i < 3; ++i) {
unsigned int sad = sads[i];
if (sad < best_sad) {
- const MV mv = {r, c};
+ const MV mv = { r, c };
sad += mvsad_err_cost(x, &mv, &fcenter_mv, sad_per_bit);
if (sad < best_sad) {
best_sad = sad;
@@ -2261,10 +2286,10 @@
}
while (c < col_max) {
- unsigned int sad = fn_ptr->sdf(what->buf, what->stride,
- check_here, in_what->stride);
+ unsigned int sad =
+ fn_ptr->sdf(what->buf, what->stride, check_here, in_what->stride);
if (sad < best_sad) {
- const MV mv = {r, c};
+ const MV mv = { r, c };
sad += mvsad_err_cost(x, &mv, &fcenter_mv, sad_per_bit);
if (sad < best_sad) {
best_sad = sad;
@@ -2279,19 +2304,18 @@
return best_sad;
}
-int vp10_refining_search_sad(const MACROBLOCK *x,
- MV *ref_mv, int error_per_bit,
- int search_range,
- const vpx_variance_fn_ptr_t *fn_ptr,
- const MV *center_mv) {
+int vp10_refining_search_sad(const MACROBLOCK *x, MV *ref_mv, int error_per_bit,
+ int search_range,
+ const vpx_variance_fn_ptr_t *fn_ptr,
+ const MV *center_mv) {
const MACROBLOCKD *const xd = &x->e_mbd;
- const MV neighbors[4] = {{ -1, 0}, {0, -1}, {0, 1}, {1, 0}};
+ const MV neighbors[4] = { { -1, 0 }, { 0, -1 }, { 0, 1 }, { 1, 0 } };
const struct buf_2d *const what = &x->plane[0].src;
const struct buf_2d *const in_what = &xd->plane[0].pre[0];
- const MV fcenter_mv = {center_mv->row >> 3, center_mv->col >> 3};
+ const MV fcenter_mv = { center_mv->row >> 3, center_mv->col >> 3 };
const uint8_t *best_address = get_buf_from_mv(in_what, ref_mv);
- unsigned int best_sad = fn_ptr->sdf(what->buf, what->stride, best_address,
- in_what->stride) +
+ unsigned int best_sad =
+ fn_ptr->sdf(what->buf, what->stride, best_address, in_what->stride) +
mvsad_err_cost(x, ref_mv, &fcenter_mv, error_per_bit);
int i, j;
@@ -2304,19 +2328,16 @@
if (all_in) {
unsigned int sads[4];
- const uint8_t *const positions[4] = {
- best_address - in_what->stride,
- best_address - 1,
- best_address + 1,
- best_address + in_what->stride
- };
+ const uint8_t *const positions[4] = { best_address - in_what->stride,
+ best_address - 1, best_address + 1,
+ best_address + in_what->stride };
fn_ptr->sdx4df(what->buf, what->stride, positions, in_what->stride, sads);
for (j = 0; j < 4; ++j) {
if (sads[j] < best_sad) {
- const MV mv = {ref_mv->row + neighbors[j].row,
- ref_mv->col + neighbors[j].col};
+ const MV mv = { ref_mv->row + neighbors[j].row,
+ ref_mv->col + neighbors[j].col };
sads[j] += mvsad_err_cost(x, &mv, &fcenter_mv, error_per_bit);
if (sads[j] < best_sad) {
best_sad = sads[j];
@@ -2326,13 +2347,13 @@
}
} else {
for (j = 0; j < 4; ++j) {
- const MV mv = {ref_mv->row + neighbors[j].row,
- ref_mv->col + neighbors[j].col};
+ const MV mv = { ref_mv->row + neighbors[j].row,
+ ref_mv->col + neighbors[j].col };
if (is_mv_in(x, &mv)) {
- unsigned int sad = fn_ptr->sdf(what->buf, what->stride,
- get_buf_from_mv(in_what, &mv),
- in_what->stride);
+ unsigned int sad =
+ fn_ptr->sdf(what->buf, what->stride,
+ get_buf_from_mv(in_what, &mv), in_what->stride);
if (sad < best_sad) {
sad += mvsad_err_cost(x, &mv, &fcenter_mv, error_per_bit);
if (sad < best_sad) {
@@ -2358,20 +2379,19 @@
// This function is called when we do joint motion search in comp_inter_inter
// mode.
-int vp10_refining_search_8p_c(const MACROBLOCK *x,
- MV *ref_mv, int error_per_bit,
- int search_range,
- const vpx_variance_fn_ptr_t *fn_ptr,
- const MV *center_mv,
- const uint8_t *second_pred) {
- const MV neighbors[8] = {{-1, 0}, {0, -1}, {0, 1}, {1, 0},
- {-1, -1}, {1, -1}, {-1, 1}, {1, 1}};
+int vp10_refining_search_8p_c(const MACROBLOCK *x, MV *ref_mv,
+ int error_per_bit, int search_range,
+ const vpx_variance_fn_ptr_t *fn_ptr,
+ const MV *center_mv, const uint8_t *second_pred) {
+ const MV neighbors[8] = { { -1, 0 }, { 0, -1 }, { 0, 1 }, { 1, 0 },
+ { -1, -1 }, { 1, -1 }, { -1, 1 }, { 1, 1 } };
const MACROBLOCKD *const xd = &x->e_mbd;
const struct buf_2d *const what = &x->plane[0].src;
const struct buf_2d *const in_what = &xd->plane[0].pre[0];
- const MV fcenter_mv = {center_mv->row >> 3, center_mv->col >> 3};
- unsigned int best_sad = fn_ptr->sdaf(what->buf, what->stride,
- get_buf_from_mv(in_what, ref_mv), in_what->stride, second_pred) +
+ const MV fcenter_mv = { center_mv->row >> 3, center_mv->col >> 3 };
+ unsigned int best_sad =
+ fn_ptr->sdaf(what->buf, what->stride, get_buf_from_mv(in_what, ref_mv),
+ in_what->stride, second_pred) +
mvsad_err_cost(x, ref_mv, &fcenter_mv, error_per_bit);
int i, j;
@@ -2379,12 +2399,13 @@
int best_site = -1;
for (j = 0; j < 8; ++j) {
- const MV mv = {ref_mv->row + neighbors[j].row,
- ref_mv->col + neighbors[j].col};
+ const MV mv = { ref_mv->row + neighbors[j].row,
+ ref_mv->col + neighbors[j].col };
if (is_mv_in(x, &mv)) {
- unsigned int sad = fn_ptr->sdaf(what->buf, what->stride,
- get_buf_from_mv(in_what, &mv), in_what->stride, second_pred);
+ unsigned int sad =
+ fn_ptr->sdaf(what->buf, what->stride, get_buf_from_mv(in_what, &mv),
+ in_what->stride, second_pred);
if (sad < best_sad) {
sad += mvsad_err_cost(x, &mv, &fcenter_mv, error_per_bit);
if (sad < best_sad) {
@@ -2408,21 +2429,19 @@
#define MIN_EX_SEARCH_LIMIT 128
static int is_exhaustive_allowed(VP10_COMP *cpi, MACROBLOCK *x) {
const SPEED_FEATURES *const sf = &cpi->sf;
- const int max_ex = VPXMAX(MIN_EX_SEARCH_LIMIT,
- (*x->m_search_count_ptr * sf->max_exaustive_pct) / 100);
+ const int max_ex =
+ VPXMAX(MIN_EX_SEARCH_LIMIT,
+ (*x->m_search_count_ptr * sf->max_exaustive_pct) / 100);
return sf->allow_exhaustive_searches &&
- (sf->exhaustive_searches_thresh < INT_MAX) &&
- (*x->ex_search_count_ptr <= max_ex) &&
- !cpi->rc.is_src_frame_alt_ref;
+ (sf->exhaustive_searches_thresh < INT_MAX) &&
+ (*x->ex_search_count_ptr <= max_ex) && !cpi->rc.is_src_frame_alt_ref;
}
-int vp10_full_pixel_search(VP10_COMP *cpi, MACROBLOCK *x,
- BLOCK_SIZE bsize, MV *mvp_full,
- int step_param, int error_per_bit,
- int *cost_list,
- const MV *ref_mv, MV *tmp_mv,
- int var_max, int rd) {
+int vp10_full_pixel_search(VP10_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
+ MV *mvp_full, int step_param, int error_per_bit,
+ int *cost_list, const MV *ref_mv, MV *tmp_mv,
+ int var_max, int rd) {
const SPEED_FEATURES *const sf = &cpi->sf;
const SEARCH_METHODS method = sf->mv.search_method;
vpx_variance_fn_ptr_t *fn_ptr = &cpi->fn_ptr[bsize];
@@ -2441,42 +2460,41 @@
switch (method) {
case FAST_DIAMOND:
var = vp10_fast_dia_search(x, mvp_full, step_param, error_per_bit, 0,
- cost_list, fn_ptr, 1, ref_mv, tmp_mv);
+ cost_list, fn_ptr, 1, ref_mv, tmp_mv);
break;
case FAST_HEX:
var = vp10_fast_hex_search(x, mvp_full, step_param, error_per_bit, 0,
- cost_list, fn_ptr, 1, ref_mv, tmp_mv);
+ cost_list, fn_ptr, 1, ref_mv, tmp_mv);
break;
case HEX:
var = vp10_hex_search(x, mvp_full, step_param, error_per_bit, 1,
- cost_list, fn_ptr, 1, ref_mv, tmp_mv);
+ cost_list, fn_ptr, 1, ref_mv, tmp_mv);
break;
case SQUARE:
var = vp10_square_search(x, mvp_full, step_param, error_per_bit, 1,
- cost_list, fn_ptr, 1, ref_mv, tmp_mv);
+ cost_list, fn_ptr, 1, ref_mv, tmp_mv);
break;
case BIGDIA:
var = vp10_bigdia_search(x, mvp_full, step_param, error_per_bit, 1,
- cost_list, fn_ptr, 1, ref_mv, tmp_mv);
+ cost_list, fn_ptr, 1, ref_mv, tmp_mv);
break;
case NSTEP:
var = vp10_full_pixel_diamond(cpi, x, mvp_full, step_param, error_per_bit,
- MAX_MVSEARCH_STEPS - 1 - step_param,
- 1, cost_list, fn_ptr, ref_mv, tmp_mv);
+ MAX_MVSEARCH_STEPS - 1 - step_param, 1,
+ cost_list, fn_ptr, ref_mv, tmp_mv);
// Should we allow a follow on exhaustive search?
if (is_exhaustive_allowed(cpi, x)) {
int64_t exhuastive_thr = sf->exhaustive_searches_thresh;
- exhuastive_thr >>= 8 - (b_width_log2_lookup[bsize] +
- b_height_log2_lookup[bsize]);
+ exhuastive_thr >>=
+ 8 - (b_width_log2_lookup[bsize] + b_height_log2_lookup[bsize]);
// Threshold variance for an exhaustive full search.
if (var > exhuastive_thr) {
- int var_ex;
+ int var_ex;
MV tmp_mv_ex;
- var_ex = full_pixel_exhaustive(cpi, x, tmp_mv,
- error_per_bit, cost_list, fn_ptr,
- ref_mv, &tmp_mv_ex);
+ var_ex = full_pixel_exhaustive(cpi, x, tmp_mv, error_per_bit,
+ cost_list, fn_ptr, ref_mv, &tmp_mv_ex);
if (var_ex < var) {
var = var_ex;
@@ -2487,8 +2505,7 @@
break;
break;
- default:
- assert(0 && "Invalid search method.");
+ default: assert(0 && "Invalid search method.");
}
if (method != NSTEP && rd && var < var_max)
diff --git a/vp10/encoder/mcomp.h b/vp10/encoder/mcomp.h
index 7597981..e17264b 100644
--- a/vp10/encoder/mcomp.h
+++ b/vp10/encoder/mcomp.h
@@ -8,7 +8,6 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
#ifndef VP10_ENCODER_MCOMP_H_
#define VP10_ENCODER_MCOMP_H_
@@ -26,7 +25,7 @@
// Enable the use of motion vector in range [-1023, 1023].
#define MAX_FULL_PEL_VAL ((1 << (MAX_MVSEARCH_STEPS - 1)) - 1)
// Maximum size of the first step in full pel units
-#define MAX_FIRST_STEP (1 << (MAX_MVSEARCH_STEPS-1))
+#define MAX_FIRST_STEP (1 << (MAX_MVSEARCH_STEPS - 1))
// Allowed motion vector pixel distance outside image border
// for Block_16x16
#define BORDER_MV_PIXELS_B16 (16 + VPX_INTERP_EXTEND)
@@ -44,59 +43,48 @@
} search_site_config;
void vp10_init_dsmotion_compensation(search_site_config *cfg, int stride);
-void vp10_init3smotion_compensation(search_site_config *cfg, int stride);
+void vp10_init3smotion_compensation(search_site_config *cfg, int stride);
void vp10_set_mv_search_range(MACROBLOCK *x, const MV *mv);
-int vp10_mv_bit_cost(const MV *mv, const MV *ref,
- const int *mvjcost, int *mvcost[2], int weight);
+int vp10_mv_bit_cost(const MV *mv, const MV *ref, const int *mvjcost,
+ int *mvcost[2], int weight);
// Utility to compute variance + MV rate cost for a given MV
-int vp10_get_mvpred_var(const MACROBLOCK *x,
- const MV *best_mv, const MV *center_mv,
- const vpx_variance_fn_ptr_t *vfp,
- int use_mvcost);
-int vp10_get_mvpred_av_var(const MACROBLOCK *x,
- const MV *best_mv, const MV *center_mv,
- const uint8_t *second_pred,
- const vpx_variance_fn_ptr_t *vfp,
- int use_mvcost);
+int vp10_get_mvpred_var(const MACROBLOCK *x, const MV *best_mv,
+ const MV *center_mv, const vpx_variance_fn_ptr_t *vfp,
+ int use_mvcost);
+int vp10_get_mvpred_av_var(const MACROBLOCK *x, const MV *best_mv,
+ const MV *center_mv, const uint8_t *second_pred,
+ const vpx_variance_fn_ptr_t *vfp, int use_mvcost);
struct VP10_COMP;
struct SPEED_FEATURES;
int vp10_init_search_range(int size);
-int vp10_refining_search_sad(const struct macroblock *x,
- struct mv *ref_mv,
- int sad_per_bit, int distance,
- const struct vpx_variance_vtable *fn_ptr,
- const struct mv *center_mv);
+int vp10_refining_search_sad(const struct macroblock *x, struct mv *ref_mv,
+ int sad_per_bit, int distance,
+ const struct vpx_variance_vtable *fn_ptr,
+ const struct mv *center_mv);
// Runs sequence of diamond searches in smaller steps for RD.
int vp10_full_pixel_diamond(const struct VP10_COMP *cpi, MACROBLOCK *x,
- MV *mvp_full, int step_param,
- int sadpb, int further_steps, int do_refine,
- int *cost_list,
- const vpx_variance_fn_ptr_t *fn_ptr,
- const MV *ref_mv, MV *dst_mv);
+ MV *mvp_full, int step_param, int sadpb,
+ int further_steps, int do_refine, int *cost_list,
+ const vpx_variance_fn_ptr_t *fn_ptr,
+ const MV *ref_mv, MV *dst_mv);
// Perform integral projection based motion estimation.
unsigned int vp10_int_pro_motion_estimation(const struct VP10_COMP *cpi,
- MACROBLOCK *x,
- BLOCK_SIZE bsize,
- int mi_row, int mi_col);
+ MACROBLOCK *x, BLOCK_SIZE bsize,
+ int mi_row, int mi_col);
-typedef int (integer_mv_pattern_search_fn) (
- const MACROBLOCK *x,
- MV *ref_mv,
- int search_param,
- int error_per_bit,
- int do_init_search,
- int *cost_list,
- const vpx_variance_fn_ptr_t *vf,
- int use_mvcost,
- const MV *center_mv,
- MV *best_mv);
+typedef int(integer_mv_pattern_search_fn)(const MACROBLOCK *x, MV *ref_mv,
+ int search_param, int error_per_bit,
+ int do_init_search, int *cost_list,
+ const vpx_variance_fn_ptr_t *vf,
+ int use_mvcost, const MV *center_mv,
+ MV *best_mv);
integer_mv_pattern_search_fn vp10_hex_search;
integer_mv_pattern_search_fn vp10_bigdia_search;
@@ -104,59 +92,45 @@
integer_mv_pattern_search_fn vp10_fast_hex_search;
integer_mv_pattern_search_fn vp10_fast_dia_search;
-typedef int (fractional_mv_step_fp) (
- const MACROBLOCK *x,
- MV *bestmv, const MV *ref_mv,
- int allow_hp,
- int error_per_bit,
- const vpx_variance_fn_ptr_t *vfp,
+typedef int(fractional_mv_step_fp)(
+ const MACROBLOCK *x, MV *bestmv, const MV *ref_mv, int allow_hp,
+ int error_per_bit, const vpx_variance_fn_ptr_t *vfp,
int forced_stop, // 0 - full, 1 - qtr only, 2 - half only
- int iters_per_step,
- int *cost_list,
- int *mvjcost, int *mvcost[2],
- int *distortion, unsigned int *sse1,
- const uint8_t *second_pred,
- int w, int h);
+ int iters_per_step, int *cost_list, int *mvjcost, int *mvcost[2],
+ int *distortion, unsigned int *sse1, const uint8_t *second_pred, int w,
+ int h);
extern fractional_mv_step_fp vp10_find_best_sub_pixel_tree;
extern fractional_mv_step_fp vp10_find_best_sub_pixel_tree_pruned;
extern fractional_mv_step_fp vp10_find_best_sub_pixel_tree_pruned_more;
extern fractional_mv_step_fp vp10_find_best_sub_pixel_tree_pruned_evenmore;
-typedef int (*vp10_full_search_fn_t)(const MACROBLOCK *x,
- const MV *ref_mv, int sad_per_bit,
- int distance,
- const vpx_variance_fn_ptr_t *fn_ptr,
- const MV *center_mv, MV *best_mv);
+typedef int (*vp10_full_search_fn_t)(const MACROBLOCK *x, const MV *ref_mv,
+ int sad_per_bit, int distance,
+ const vpx_variance_fn_ptr_t *fn_ptr,
+ const MV *center_mv, MV *best_mv);
-typedef int (*vp10_refining_search_fn_t)(const MACROBLOCK *x,
- MV *ref_mv, int sad_per_bit,
- int distance,
- const vpx_variance_fn_ptr_t *fn_ptr,
- const MV *center_mv);
+typedef int (*vp10_refining_search_fn_t)(const MACROBLOCK *x, MV *ref_mv,
+ int sad_per_bit, int distance,
+ const vpx_variance_fn_ptr_t *fn_ptr,
+ const MV *center_mv);
-typedef int (*vp10_diamond_search_fn_t)(const MACROBLOCK *x,
- const search_site_config *cfg,
- MV *ref_mv, MV *best_mv,
- int search_param, int sad_per_bit,
- int *num00,
- const vpx_variance_fn_ptr_t *fn_ptr,
- const MV *center_mv);
+typedef int (*vp10_diamond_search_fn_t)(
+ const MACROBLOCK *x, const search_site_config *cfg, MV *ref_mv, MV *best_mv,
+ int search_param, int sad_per_bit, int *num00,
+ const vpx_variance_fn_ptr_t *fn_ptr, const MV *center_mv);
-int vp10_refining_search_8p_c(const MACROBLOCK *x,
- MV *ref_mv, int error_per_bit,
- int search_range,
- const vpx_variance_fn_ptr_t *fn_ptr,
- const MV *center_mv, const uint8_t *second_pred);
+int vp10_refining_search_8p_c(const MACROBLOCK *x, MV *ref_mv,
+ int error_per_bit, int search_range,
+ const vpx_variance_fn_ptr_t *fn_ptr,
+ const MV *center_mv, const uint8_t *second_pred);
struct VP10_COMP;
int vp10_full_pixel_search(struct VP10_COMP *cpi, MACROBLOCK *x,
- BLOCK_SIZE bsize, MV *mvp_full,
- int step_param, int error_per_bit,
- int *cost_list,
- const MV *ref_mv, MV *tmp_mv,
- int var_max, int rd);
+ BLOCK_SIZE bsize, MV *mvp_full, int step_param,
+ int error_per_bit, int *cost_list, const MV *ref_mv,
+ MV *tmp_mv, int var_max, int rd);
#ifdef __cplusplus
} // extern "C"
diff --git a/vp10/encoder/mips/msa/error_msa.c b/vp10/encoder/mips/msa/error_msa.c
index dacca32..9a098ea 100644
--- a/vp10/encoder/mips/msa/error_msa.c
+++ b/vp10/encoder/mips/msa/error_msa.c
@@ -11,100 +11,93 @@
#include "./vp10_rtcd.h"
#include "vpx_dsp/mips/macros_msa.h"
-#define BLOCK_ERROR_BLOCKSIZE_MSA(BSize) \
-static int64_t block_error_##BSize##size_msa(const int16_t *coeff_ptr, \
- const int16_t *dq_coeff_ptr, \
- int64_t *ssz) { \
- int64_t err = 0; \
- uint32_t loop_cnt; \
- v8i16 coeff, dq_coeff, coeff_r_h, coeff_l_h; \
- v4i32 diff_r, diff_l, coeff_r_w, coeff_l_w; \
- v2i64 sq_coeff_r, sq_coeff_l; \
- v2i64 err0, err_dup0, err1, err_dup1; \
- \
- coeff = LD_SH(coeff_ptr); \
- dq_coeff = LD_SH(dq_coeff_ptr); \
- UNPCK_SH_SW(coeff, coeff_r_w, coeff_l_w); \
- ILVRL_H2_SH(coeff, dq_coeff, coeff_r_h, coeff_l_h); \
- HSUB_UH2_SW(coeff_r_h, coeff_l_h, diff_r, diff_l); \
- DOTP_SW2_SD(coeff_r_w, coeff_l_w, coeff_r_w, coeff_l_w, \
- sq_coeff_r, sq_coeff_l); \
- DOTP_SW2_SD(diff_r, diff_l, diff_r, diff_l, err0, err1); \
- \
- coeff = LD_SH(coeff_ptr + 8); \
- dq_coeff = LD_SH(dq_coeff_ptr + 8); \
- UNPCK_SH_SW(coeff, coeff_r_w, coeff_l_w); \
- ILVRL_H2_SH(coeff, dq_coeff, coeff_r_h, coeff_l_h); \
- HSUB_UH2_SW(coeff_r_h, coeff_l_h, diff_r, diff_l); \
- DPADD_SD2_SD(coeff_r_w, coeff_l_w, sq_coeff_r, sq_coeff_l); \
- DPADD_SD2_SD(diff_r, diff_l, err0, err1); \
- \
- coeff_ptr += 16; \
- dq_coeff_ptr += 16; \
- \
- for (loop_cnt = ((BSize >> 4) - 1); loop_cnt--;) { \
- coeff = LD_SH(coeff_ptr); \
- dq_coeff = LD_SH(dq_coeff_ptr); \
- UNPCK_SH_SW(coeff, coeff_r_w, coeff_l_w); \
- ILVRL_H2_SH(coeff, dq_coeff, coeff_r_h, coeff_l_h); \
- HSUB_UH2_SW(coeff_r_h, coeff_l_h, diff_r, diff_l); \
- DPADD_SD2_SD(coeff_r_w, coeff_l_w, sq_coeff_r, sq_coeff_l); \
- DPADD_SD2_SD(diff_r, diff_l, err0, err1); \
- \
- coeff = LD_SH(coeff_ptr + 8); \
- dq_coeff = LD_SH(dq_coeff_ptr + 8); \
- UNPCK_SH_SW(coeff, coeff_r_w, coeff_l_w); \
- ILVRL_H2_SH(coeff, dq_coeff, coeff_r_h, coeff_l_h); \
- HSUB_UH2_SW(coeff_r_h, coeff_l_h, diff_r, diff_l); \
- DPADD_SD2_SD(coeff_r_w, coeff_l_w, sq_coeff_r, sq_coeff_l); \
- DPADD_SD2_SD(diff_r, diff_l, err0, err1); \
- \
- coeff_ptr += 16; \
- dq_coeff_ptr += 16; \
- } \
- \
- err_dup0 = __msa_splati_d(sq_coeff_r, 1); \
- err_dup1 = __msa_splati_d(sq_coeff_l, 1); \
- sq_coeff_r += err_dup0; \
- sq_coeff_l += err_dup1; \
- *ssz = __msa_copy_s_d(sq_coeff_r, 0); \
- *ssz += __msa_copy_s_d(sq_coeff_l, 0); \
- \
- err_dup0 = __msa_splati_d(err0, 1); \
- err_dup1 = __msa_splati_d(err1, 1); \
- err0 += err_dup0; \
- err1 += err_dup1; \
- err = __msa_copy_s_d(err0, 0); \
- err += __msa_copy_s_d(err1, 0); \
- \
- return err; \
-}
+#define BLOCK_ERROR_BLOCKSIZE_MSA(BSize) \
+ static int64_t block_error_##BSize##size_msa( \
+ const int16_t *coeff_ptr, const int16_t *dq_coeff_ptr, int64_t *ssz) { \
+ int64_t err = 0; \
+ uint32_t loop_cnt; \
+ v8i16 coeff, dq_coeff, coeff_r_h, coeff_l_h; \
+ v4i32 diff_r, diff_l, coeff_r_w, coeff_l_w; \
+ v2i64 sq_coeff_r, sq_coeff_l; \
+ v2i64 err0, err_dup0, err1, err_dup1; \
+ \
+ coeff = LD_SH(coeff_ptr); \
+ dq_coeff = LD_SH(dq_coeff_ptr); \
+ UNPCK_SH_SW(coeff, coeff_r_w, coeff_l_w); \
+ ILVRL_H2_SH(coeff, dq_coeff, coeff_r_h, coeff_l_h); \
+ HSUB_UH2_SW(coeff_r_h, coeff_l_h, diff_r, diff_l); \
+ DOTP_SW2_SD(coeff_r_w, coeff_l_w, coeff_r_w, coeff_l_w, sq_coeff_r, \
+ sq_coeff_l); \
+ DOTP_SW2_SD(diff_r, diff_l, diff_r, diff_l, err0, err1); \
+ \
+ coeff = LD_SH(coeff_ptr + 8); \
+ dq_coeff = LD_SH(dq_coeff_ptr + 8); \
+ UNPCK_SH_SW(coeff, coeff_r_w, coeff_l_w); \
+ ILVRL_H2_SH(coeff, dq_coeff, coeff_r_h, coeff_l_h); \
+ HSUB_UH2_SW(coeff_r_h, coeff_l_h, diff_r, diff_l); \
+ DPADD_SD2_SD(coeff_r_w, coeff_l_w, sq_coeff_r, sq_coeff_l); \
+ DPADD_SD2_SD(diff_r, diff_l, err0, err1); \
+ \
+ coeff_ptr += 16; \
+ dq_coeff_ptr += 16; \
+ \
+ for (loop_cnt = ((BSize >> 4) - 1); loop_cnt--;) { \
+ coeff = LD_SH(coeff_ptr); \
+ dq_coeff = LD_SH(dq_coeff_ptr); \
+ UNPCK_SH_SW(coeff, coeff_r_w, coeff_l_w); \
+ ILVRL_H2_SH(coeff, dq_coeff, coeff_r_h, coeff_l_h); \
+ HSUB_UH2_SW(coeff_r_h, coeff_l_h, diff_r, diff_l); \
+ DPADD_SD2_SD(coeff_r_w, coeff_l_w, sq_coeff_r, sq_coeff_l); \
+ DPADD_SD2_SD(diff_r, diff_l, err0, err1); \
+ \
+ coeff = LD_SH(coeff_ptr + 8); \
+ dq_coeff = LD_SH(dq_coeff_ptr + 8); \
+ UNPCK_SH_SW(coeff, coeff_r_w, coeff_l_w); \
+ ILVRL_H2_SH(coeff, dq_coeff, coeff_r_h, coeff_l_h); \
+ HSUB_UH2_SW(coeff_r_h, coeff_l_h, diff_r, diff_l); \
+ DPADD_SD2_SD(coeff_r_w, coeff_l_w, sq_coeff_r, sq_coeff_l); \
+ DPADD_SD2_SD(diff_r, diff_l, err0, err1); \
+ \
+ coeff_ptr += 16; \
+ dq_coeff_ptr += 16; \
+ } \
+ \
+ err_dup0 = __msa_splati_d(sq_coeff_r, 1); \
+ err_dup1 = __msa_splati_d(sq_coeff_l, 1); \
+ sq_coeff_r += err_dup0; \
+ sq_coeff_l += err_dup1; \
+ *ssz = __msa_copy_s_d(sq_coeff_r, 0); \
+ *ssz += __msa_copy_s_d(sq_coeff_l, 0); \
+ \
+ err_dup0 = __msa_splati_d(err0, 1); \
+ err_dup1 = __msa_splati_d(err1, 1); \
+ err0 += err_dup0; \
+ err1 += err_dup1; \
+ err = __msa_copy_s_d(err0, 0); \
+ err += __msa_copy_s_d(err1, 0); \
+ \
+ return err; \
+ }
-BLOCK_ERROR_BLOCKSIZE_MSA(16);
-BLOCK_ERROR_BLOCKSIZE_MSA(64);
-BLOCK_ERROR_BLOCKSIZE_MSA(256);
-BLOCK_ERROR_BLOCKSIZE_MSA(1024);
+/* clang-format off */
+BLOCK_ERROR_BLOCKSIZE_MSA(16)
+BLOCK_ERROR_BLOCKSIZE_MSA(64)
+BLOCK_ERROR_BLOCKSIZE_MSA(256)
+BLOCK_ERROR_BLOCKSIZE_MSA(1024)
+/* clang-format on */
int64_t vp10_block_error_msa(const tran_low_t *coeff_ptr,
- const tran_low_t *dq_coeff_ptr,
- intptr_t blk_size, int64_t *ssz) {
+ const tran_low_t *dq_coeff_ptr, intptr_t blk_size,
+ int64_t *ssz) {
int64_t err;
const int16_t *coeff = (const int16_t *)coeff_ptr;
const int16_t *dq_coeff = (const int16_t *)dq_coeff_ptr;
switch (blk_size) {
- case 16:
- err = block_error_16size_msa(coeff, dq_coeff, ssz);
- break;
- case 64:
- err = block_error_64size_msa(coeff, dq_coeff, ssz);
- break;
- case 256:
- err = block_error_256size_msa(coeff, dq_coeff, ssz);
- break;
- case 1024:
- err = block_error_1024size_msa(coeff, dq_coeff, ssz);
- break;
+ case 16: err = block_error_16size_msa(coeff, dq_coeff, ssz); break;
+ case 64: err = block_error_64size_msa(coeff, dq_coeff, ssz); break;
+ case 256: err = block_error_256size_msa(coeff, dq_coeff, ssz); break;
+ case 1024: err = block_error_1024size_msa(coeff, dq_coeff, ssz); break;
default:
err = vp10_block_error_c(coeff_ptr, dq_coeff_ptr, blk_size, ssz);
break;
diff --git a/vp10/encoder/mips/msa/fdct16x16_msa.c b/vp10/encoder/mips/msa/fdct16x16_msa.c
index d78fc64..c39a292 100644
--- a/vp10/encoder/mips/msa/fdct16x16_msa.c
+++ b/vp10/encoder/mips/msa/fdct16x16_msa.c
@@ -159,8 +159,8 @@
/* load input data */
LD_SH8(input, 16, l0, l1, l2, l3, l4, l5, l6, l7);
- TRANSPOSE8x8_SH_SH(l0, l1, l2, l3, l4, l5, l6, l7,
- r0, r1, r2, r3, r4, r5, r6, r7);
+ TRANSPOSE8x8_SH_SH(l0, l1, l2, l3, l4, l5, l6, l7, r0, r1, r2, r3, r4, r5, r6,
+ r7);
FDCT_POSTPROC_2V_NEG_H(r0, r1);
FDCT_POSTPROC_2V_NEG_H(r2, r3);
FDCT_POSTPROC_2V_NEG_H(r4, r5);
@@ -169,8 +169,8 @@
out += 64;
LD_SH8(input + 8, 16, l8, l9, l10, l11, l12, l13, l14, l15);
- TRANSPOSE8x8_SH_SH(l8, l9, l10, l11, l12, l13, l14, l15,
- r8, r9, r10, r11, r12, r13, r14, r15);
+ TRANSPOSE8x8_SH_SH(l8, l9, l10, l11, l12, l13, l14, l15, r8, r9, r10, r11,
+ r12, r13, r14, r15);
FDCT_POSTPROC_2V_NEG_H(r8, r9);
FDCT_POSTPROC_2V_NEG_H(r10, r11);
FDCT_POSTPROC_2V_NEG_H(r12, r13);
@@ -181,8 +181,8 @@
/* load input data */
input += 128;
LD_SH8(input, 16, l0, l1, l2, l3, l4, l5, l6, l7);
- TRANSPOSE8x8_SH_SH(l0, l1, l2, l3, l4, l5, l6, l7,
- r0, r1, r2, r3, r4, r5, r6, r7);
+ TRANSPOSE8x8_SH_SH(l0, l1, l2, l3, l4, l5, l6, l7, r0, r1, r2, r3, r4, r5, r6,
+ r7);
FDCT_POSTPROC_2V_NEG_H(r0, r1);
FDCT_POSTPROC_2V_NEG_H(r2, r3);
FDCT_POSTPROC_2V_NEG_H(r4, r5);
@@ -191,8 +191,8 @@
out += 64;
LD_SH8(input + 8, 16, l8, l9, l10, l11, l12, l13, l14, l15);
- TRANSPOSE8x8_SH_SH(l8, l9, l10, l11, l12, l13, l14, l15,
- r8, r9, r10, r11, r12, r13, r14, r15);
+ TRANSPOSE8x8_SH_SH(l8, l9, l10, l11, l12, l13, l14, l15, r8, r9, r10, r11,
+ r12, r13, r14, r15);
FDCT_POSTPROC_2V_NEG_H(r8, r9);
FDCT_POSTPROC_2V_NEG_H(r10, r11);
FDCT_POSTPROC_2V_NEG_H(r12, r13);
@@ -339,24 +339,24 @@
v8i16 l0, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12, l13, l14, l15;
/* load input data */
- LD_SH16(input, 8, l0, l8, l1, l9, l2, l10, l3, l11,
- l4, l12, l5, l13, l6, l14, l7, l15);
- TRANSPOSE8x8_SH_SH(l0, l1, l2, l3, l4, l5, l6, l7,
- r0, r1, r2, r3, r4, r5, r6, r7);
- TRANSPOSE8x8_SH_SH(l8, l9, l10, l11, l12, l13, l14, l15,
- r8, r9, r10, r11, r12, r13, r14, r15);
+ LD_SH16(input, 8, l0, l8, l1, l9, l2, l10, l3, l11, l4, l12, l5, l13, l6, l14,
+ l7, l15);
+ TRANSPOSE8x8_SH_SH(l0, l1, l2, l3, l4, l5, l6, l7, r0, r1, r2, r3, r4, r5, r6,
+ r7);
+ TRANSPOSE8x8_SH_SH(l8, l9, l10, l11, l12, l13, l14, l15, r8, r9, r10, r11,
+ r12, r13, r14, r15);
ST_SH8(r0, r8, r1, r9, r2, r10, r3, r11, out, 8);
ST_SH8(r4, r12, r5, r13, r6, r14, r7, r15, (out + 64), 8);
out += 16 * 8;
/* load input data */
input += 128;
- LD_SH16(input, 8, l0, l8, l1, l9, l2, l10, l3, l11,
- l4, l12, l5, l13, l6, l14, l7, l15);
- TRANSPOSE8x8_SH_SH(l0, l1, l2, l3, l4, l5, l6, l7,
- r0, r1, r2, r3, r4, r5, r6, r7);
- TRANSPOSE8x8_SH_SH(l8, l9, l10, l11, l12, l13, l14, l15,
- r8, r9, r10, r11, r12, r13, r14, r15);
+ LD_SH16(input, 8, l0, l8, l1, l9, l2, l10, l3, l11, l4, l12, l5, l13, l6, l14,
+ l7, l15);
+ TRANSPOSE8x8_SH_SH(l0, l1, l2, l3, l4, l5, l6, l7, r0, r1, r2, r3, r4, r5, r6,
+ r7);
+ TRANSPOSE8x8_SH_SH(l8, l9, l10, l11, l12, l13, l14, l15, r8, r9, r10, r11,
+ r12, r13, r14, r15);
ST_SH8(r0, r8, r1, r9, r2, r10, r3, r11, out, 8);
ST_SH8(r4, r12, r5, r13, r6, r14, r7, r15, (out + 64), 8);
}
@@ -371,10 +371,10 @@
LD_SH8(temp, 16, in0, in1, in2, in3, in4, in5, in6, in7);
temp = intermediate + 8;
LD_SH8(temp, 16, in8, in9, in10, in11, in12, in13, in14, in15);
- TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
- in0, in1, in2, in3, in4, in5, in6, in7);
- TRANSPOSE8x8_SH_SH(in8, in9, in10, in11, in12, in13, in14, in15,
- in8, in9, in10, in11, in12, in13, in14, in15);
+ TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
+ in4, in5, in6, in7);
+ TRANSPOSE8x8_SH_SH(in8, in9, in10, in11, in12, in13, in14, in15, in8, in9,
+ in10, in11, in12, in13, in14, in15);
FDCT_POSTPROC_2V_NEG_H(in0, in1);
FDCT_POSTPROC_2V_NEG_H(in2, in3);
FDCT_POSTPROC_2V_NEG_H(in4, in5);
@@ -383,29 +383,28 @@
FDCT_POSTPROC_2V_NEG_H(in10, in11);
FDCT_POSTPROC_2V_NEG_H(in12, in13);
FDCT_POSTPROC_2V_NEG_H(in14, in15);
- BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7,
- in8, in9, in10, in11, in12, in13, in14, in15,
- tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7,
- in8, in9, in10, in11, in12, in13, in14, in15);
+ BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, in11,
+ in12, in13, in14, in15, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6,
+ tmp7, in8, in9, in10, in11, in12, in13, in14, in15);
temp = intermediate;
ST_SH8(in8, in9, in10, in11, in12, in13, in14, in15, temp, 16);
- FDCT8x16_EVEN(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7,
- tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7);
+ FDCT8x16_EVEN(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp0, tmp1,
+ tmp2, tmp3, tmp4, tmp5, tmp6, tmp7);
temp = intermediate;
LD_SH8(temp, 16, in8, in9, in10, in11, in12, in13, in14, in15);
- FDCT8x16_ODD(in8, in9, in10, in11, in12, in13, in14, in15,
- in0, in1, in2, in3, in4, in5, in6, in7);
- TRANSPOSE8x8_SH_SH(tmp0, in0, tmp1, in1, tmp2, in2, tmp3, in3,
- tmp0, in0, tmp1, in1, tmp2, in2, tmp3, in3);
+ FDCT8x16_ODD(in8, in9, in10, in11, in12, in13, in14, in15, in0, in1, in2, in3,
+ in4, in5, in6, in7);
+ TRANSPOSE8x8_SH_SH(tmp0, in0, tmp1, in1, tmp2, in2, tmp3, in3, tmp0, in0,
+ tmp1, in1, tmp2, in2, tmp3, in3);
ST_SH8(tmp0, in0, tmp1, in1, tmp2, in2, tmp3, in3, out, 16);
- TRANSPOSE8x8_SH_SH(tmp4, in4, tmp5, in5, tmp6, in6, tmp7, in7,
- tmp4, in4, tmp5, in5, tmp6, in6, tmp7, in7);
+ TRANSPOSE8x8_SH_SH(tmp4, in4, tmp5, in5, tmp6, in6, tmp7, in7, tmp4, in4,
+ tmp5, in5, tmp6, in6, tmp7, in7);
out = output + 8;
ST_SH8(tmp4, in4, tmp5, in5, tmp6, in6, tmp7, in7, out, 16);
}
-void vp10_fht16x16_msa(const int16_t *input, int16_t *output,
- int32_t stride, int32_t tx_type) {
+void vp10_fht16x16_msa(const int16_t *input, int16_t *output, int32_t stride,
+ int32_t tx_type) {
DECLARE_ALIGNED(32, int16_t, tmp[256]);
DECLARE_ALIGNED(32, int16_t, trans_buf[256]);
DECLARE_ALIGNED(32, int16_t, tmp_buf[128]);
@@ -413,35 +412,31 @@
int16_t *ptmpbuf = &tmp_buf[0];
int16_t *trans = &trans_buf[0];
const int32_t const_arr[29 * 4] = {
- 52707308, 52707308, 52707308, 52707308,
- -1072430300, -1072430300, -1072430300, -1072430300,
- 795618043, 795618043, 795618043, 795618043,
- -721080468, -721080468, -721080468, -721080468,
- 459094491, 459094491, 459094491, 459094491,
- -970646691, -970646691, -970646691, -970646691,
- 1010963856, 1010963856, 1010963856, 1010963856,
- -361743294, -361743294, -361743294, -361743294,
- 209469125, 209469125, 209469125, 209469125,
- -1053094788, -1053094788, -1053094788, -1053094788,
- 1053160324, 1053160324, 1053160324, 1053160324,
- 639644520, 639644520, 639644520, 639644520,
- -862444000, -862444000, -862444000, -862444000,
- 1062144356, 1062144356, 1062144356, 1062144356,
- -157532337, -157532337, -157532337, -157532337,
- 260914709, 260914709, 260914709, 260914709,
- -1041559667, -1041559667, -1041559667, -1041559667,
- 920985831, 920985831, 920985831, 920985831,
- -551995675, -551995675, -551995675, -551995675,
- 596522295, 596522295, 596522295, 596522295,
- 892853362, 892853362, 892853362, 892853362,
- -892787826, -892787826, -892787826, -892787826,
- 410925857, 410925857, 410925857, 410925857,
- -992012162, -992012162, -992012162, -992012162,
- 992077698, 992077698, 992077698, 992077698,
- 759246145, 759246145, 759246145, 759246145,
- -759180609, -759180609, -759180609, -759180609,
- -759222975, -759222975, -759222975, -759222975,
- 759288511, 759288511, 759288511, 759288511 };
+ 52707308, 52707308, 52707308, 52707308, -1072430300,
+ -1072430300, -1072430300, -1072430300, 795618043, 795618043,
+ 795618043, 795618043, -721080468, -721080468, -721080468,
+ -721080468, 459094491, 459094491, 459094491, 459094491,
+ -970646691, -970646691, -970646691, -970646691, 1010963856,
+ 1010963856, 1010963856, 1010963856, -361743294, -361743294,
+ -361743294, -361743294, 209469125, 209469125, 209469125,
+ 209469125, -1053094788, -1053094788, -1053094788, -1053094788,
+ 1053160324, 1053160324, 1053160324, 1053160324, 639644520,
+ 639644520, 639644520, 639644520, -862444000, -862444000,
+ -862444000, -862444000, 1062144356, 1062144356, 1062144356,
+ 1062144356, -157532337, -157532337, -157532337, -157532337,
+ 260914709, 260914709, 260914709, 260914709, -1041559667,
+ -1041559667, -1041559667, -1041559667, 920985831, 920985831,
+ 920985831, 920985831, -551995675, -551995675, -551995675,
+ -551995675, 596522295, 596522295, 596522295, 596522295,
+ 892853362, 892853362, 892853362, 892853362, -892787826,
+ -892787826, -892787826, -892787826, 410925857, 410925857,
+ 410925857, 410925857, -992012162, -992012162, -992012162,
+ -992012162, 992077698, 992077698, 992077698, 992077698,
+ 759246145, 759246145, 759246145, 759246145, -759180609,
+ -759180609, -759180609, -759180609, -759222975, -759222975,
+ -759222975, -759222975, 759288511, 759288511, 759288511,
+ 759288511
+ };
switch (tx_type) {
case DCT_DCT:
@@ -500,8 +495,6 @@
fadst16_transpose_msa(tmp, output);
break;
- default:
- assert(0);
- break;
+ default: assert(0); break;
}
}
diff --git a/vp10/encoder/mips/msa/fdct4x4_msa.c b/vp10/encoder/mips/msa/fdct4x4_msa.c
index f9028f0..def9b45 100644
--- a/vp10/encoder/mips/msa/fdct4x4_msa.c
+++ b/vp10/encoder/mips/msa/fdct4x4_msa.c
@@ -14,7 +14,7 @@
#include "vp10/encoder/mips/msa/fdct_msa.h"
void vp10_fwht4x4_msa(const int16_t *input, int16_t *output,
- int32_t src_stride) {
+ int32_t src_stride) {
v8i16 in0, in1, in2, in3, in4;
LD_SH4(input, src_stride, in0, in1, in2, in3);
@@ -46,7 +46,7 @@
}
void vp10_fht4x4_msa(const int16_t *input, int16_t *output, int32_t stride,
- int32_t tx_type) {
+ int32_t tx_type) {
v8i16 in0, in1, in2, in3;
LD_SH4(input, stride, in0, in1, in2, in3);
@@ -86,9 +86,7 @@
TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
VPX_FADST4(in0, in1, in2, in3, in0, in1, in2, in3);
break;
- default:
- assert(0);
- break;
+ default: assert(0); break;
}
TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
diff --git a/vp10/encoder/mips/msa/fdct8x8_msa.c b/vp10/encoder/mips/msa/fdct8x8_msa.c
index 8e1e111..7843a62 100644
--- a/vp10/encoder/mips/msa/fdct8x8_msa.c
+++ b/vp10/encoder/mips/msa/fdct8x8_msa.c
@@ -14,7 +14,7 @@
#include "vp10/encoder/mips/msa/fdct_msa.h"
void vp10_fht8x8_msa(const int16_t *input, int16_t *output, int32_t stride,
- int32_t tx_type) {
+ int32_t tx_type) {
v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
LD_SH8(input, stride, in0, in1, in2, in3, in4, in5, in6, in7);
@@ -23,44 +23,42 @@
switch (tx_type) {
case DCT_DCT:
- VPX_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7,
- in0, in1, in2, in3, in4, in5, in6, in7);
- TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
- in0, in1, in2, in3, in4, in5, in6, in7);
- VPX_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7,
- in0, in1, in2, in3, in4, in5, in6, in7);
+ VPX_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+ in5, in6, in7);
+ TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2,
+ in3, in4, in5, in6, in7);
+ VPX_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+ in5, in6, in7);
break;
case ADST_DCT:
- VPX_ADST8(in0, in1, in2, in3, in4, in5, in6, in7,
- in0, in1, in2, in3, in4, in5, in6, in7);
- TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
- in0, in1, in2, in3, in4, in5, in6, in7);
- VPX_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7,
- in0, in1, in2, in3, in4, in5, in6, in7);
+ VPX_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+ in5, in6, in7);
+ TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2,
+ in3, in4, in5, in6, in7);
+ VPX_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+ in5, in6, in7);
break;
case DCT_ADST:
- VPX_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7,
- in0, in1, in2, in3, in4, in5, in6, in7);
- TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
- in0, in1, in2, in3, in4, in5, in6, in7);
- VPX_ADST8(in0, in1, in2, in3, in4, in5, in6, in7,
- in0, in1, in2, in3, in4, in5, in6, in7);
+ VPX_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+ in5, in6, in7);
+ TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2,
+ in3, in4, in5, in6, in7);
+ VPX_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+ in5, in6, in7);
break;
case ADST_ADST:
- VPX_ADST8(in0, in1, in2, in3, in4, in5, in6, in7,
- in0, in1, in2, in3, in4, in5, in6, in7);
- TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
- in0, in1, in2, in3, in4, in5, in6, in7);
- VPX_ADST8(in0, in1, in2, in3, in4, in5, in6, in7,
- in0, in1, in2, in3, in4, in5, in6, in7);
+ VPX_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+ in5, in6, in7);
+ TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2,
+ in3, in4, in5, in6, in7);
+ VPX_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
+ in5, in6, in7);
break;
- default:
- assert(0);
- break;
+ default: assert(0); break;
}
- TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
- in0, in1, in2, in3, in4, in5, in6, in7);
+ TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
+ in4, in5, in6, in7);
SRLI_AVE_S_4V_H(in0, in1, in2, in3, in4, in5, in6, in7);
ST_SH8(in0, in1, in2, in3, in4, in5, in6, in7, output, 8);
}
diff --git a/vp10/encoder/mips/msa/fdct_msa.h b/vp10/encoder/mips/msa/fdct_msa.h
index bf56251..9a14625 100644
--- a/vp10/encoder/mips/msa/fdct_msa.h
+++ b/vp10/encoder/mips/msa/fdct_msa.h
@@ -15,103 +15,102 @@
#include "vpx_dsp/mips/txfm_macros_msa.h"
#include "vpx_ports/mem.h"
-#define VPX_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, \
- out0, out1, out2, out3, out4, out5, out6, out7) { \
- v8i16 cnst0_m, cnst1_m, cnst2_m, cnst3_m, cnst4_m; \
- v8i16 vec0_m, vec1_m, vec2_m, vec3_m, s0_m, s1_m; \
- v8i16 coeff0_m = { cospi_2_64, cospi_6_64, cospi_10_64, cospi_14_64, \
- cospi_18_64, cospi_22_64, cospi_26_64, cospi_30_64 }; \
- v8i16 coeff1_m = { cospi_8_64, -cospi_8_64, cospi_16_64, -cospi_16_64, \
- cospi_24_64, -cospi_24_64, 0, 0 }; \
- \
- SPLATI_H2_SH(coeff0_m, 0, 7, cnst0_m, cnst1_m); \
- cnst2_m = -cnst0_m; \
- ILVEV_H2_SH(cnst0_m, cnst1_m, cnst1_m, cnst2_m, cnst0_m, cnst1_m); \
- SPLATI_H2_SH(coeff0_m, 4, 3, cnst2_m, cnst3_m); \
- cnst4_m = -cnst2_m; \
- ILVEV_H2_SH(cnst2_m, cnst3_m, cnst3_m, cnst4_m, cnst2_m, cnst3_m); \
- \
- ILVRL_H2_SH(in0, in7, vec1_m, vec0_m); \
- ILVRL_H2_SH(in4, in3, vec3_m, vec2_m); \
- DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m, \
- cnst1_m, cnst2_m, cnst3_m, in7, in0, \
- in4, in3); \
- \
- SPLATI_H2_SH(coeff0_m, 2, 5, cnst0_m, cnst1_m); \
- cnst2_m = -cnst0_m; \
- ILVEV_H2_SH(cnst0_m, cnst1_m, cnst1_m, cnst2_m, cnst0_m, cnst1_m); \
- SPLATI_H2_SH(coeff0_m, 6, 1, cnst2_m, cnst3_m); \
- cnst4_m = -cnst2_m; \
- ILVEV_H2_SH(cnst2_m, cnst3_m, cnst3_m, cnst4_m, cnst2_m, cnst3_m); \
- \
- ILVRL_H2_SH(in2, in5, vec1_m, vec0_m); \
- ILVRL_H2_SH(in6, in1, vec3_m, vec2_m); \
- \
- DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m, \
- cnst1_m, cnst2_m, cnst3_m, in5, in2, \
- in6, in1); \
- BUTTERFLY_4(in7, in0, in2, in5, s1_m, s0_m, in2, in5); \
- out7 = -s0_m; \
- out0 = s1_m; \
- \
- SPLATI_H4_SH(coeff1_m, 0, 4, 1, 5, cnst0_m, cnst1_m, cnst2_m, cnst3_m); \
- \
- ILVEV_H2_SH(cnst3_m, cnst0_m, cnst1_m, cnst2_m, cnst3_m, cnst2_m); \
- cnst0_m = __msa_ilvev_h(cnst1_m, cnst0_m); \
- cnst1_m = cnst0_m; \
- \
- ILVRL_H2_SH(in4, in3, vec1_m, vec0_m); \
- ILVRL_H2_SH(in6, in1, vec3_m, vec2_m); \
- DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m, \
- cnst2_m, cnst3_m, cnst1_m, out1, out6, \
- s0_m, s1_m); \
- \
- SPLATI_H2_SH(coeff1_m, 2, 3, cnst0_m, cnst1_m); \
- cnst1_m = __msa_ilvev_h(cnst1_m, cnst0_m); \
- \
- ILVRL_H2_SH(in2, in5, vec1_m, vec0_m); \
- ILVRL_H2_SH(s0_m, s1_m, vec3_m, vec2_m); \
- out3 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst0_m); \
- out4 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst1_m); \
- out2 = DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst0_m); \
- out5 = DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst1_m); \
- \
- out1 = -out1; \
- out3 = -out3; \
- out5 = -out5; \
-}
+#define VPX_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, \
+ out3, out4, out5, out6, out7) \
+ { \
+ v8i16 cnst0_m, cnst1_m, cnst2_m, cnst3_m, cnst4_m; \
+ v8i16 vec0_m, vec1_m, vec2_m, vec3_m, s0_m, s1_m; \
+ v8i16 coeff0_m = { cospi_2_64, cospi_6_64, cospi_10_64, cospi_14_64, \
+ cospi_18_64, cospi_22_64, cospi_26_64, cospi_30_64 }; \
+ v8i16 coeff1_m = { cospi_8_64, -cospi_8_64, cospi_16_64, -cospi_16_64, \
+ cospi_24_64, -cospi_24_64, 0, 0 }; \
+ \
+ SPLATI_H2_SH(coeff0_m, 0, 7, cnst0_m, cnst1_m); \
+ cnst2_m = -cnst0_m; \
+ ILVEV_H2_SH(cnst0_m, cnst1_m, cnst1_m, cnst2_m, cnst0_m, cnst1_m); \
+ SPLATI_H2_SH(coeff0_m, 4, 3, cnst2_m, cnst3_m); \
+ cnst4_m = -cnst2_m; \
+ ILVEV_H2_SH(cnst2_m, cnst3_m, cnst3_m, cnst4_m, cnst2_m, cnst3_m); \
+ \
+ ILVRL_H2_SH(in0, in7, vec1_m, vec0_m); \
+ ILVRL_H2_SH(in4, in3, vec3_m, vec2_m); \
+ DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m, cnst1_m, \
+ cnst2_m, cnst3_m, in7, in0, in4, in3); \
+ \
+ SPLATI_H2_SH(coeff0_m, 2, 5, cnst0_m, cnst1_m); \
+ cnst2_m = -cnst0_m; \
+ ILVEV_H2_SH(cnst0_m, cnst1_m, cnst1_m, cnst2_m, cnst0_m, cnst1_m); \
+ SPLATI_H2_SH(coeff0_m, 6, 1, cnst2_m, cnst3_m); \
+ cnst4_m = -cnst2_m; \
+ ILVEV_H2_SH(cnst2_m, cnst3_m, cnst3_m, cnst4_m, cnst2_m, cnst3_m); \
+ \
+ ILVRL_H2_SH(in2, in5, vec1_m, vec0_m); \
+ ILVRL_H2_SH(in6, in1, vec3_m, vec2_m); \
+ \
+ DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m, cnst1_m, \
+ cnst2_m, cnst3_m, in5, in2, in6, in1); \
+ BUTTERFLY_4(in7, in0, in2, in5, s1_m, s0_m, in2, in5); \
+ out7 = -s0_m; \
+ out0 = s1_m; \
+ \
+ SPLATI_H4_SH(coeff1_m, 0, 4, 1, 5, cnst0_m, cnst1_m, cnst2_m, cnst3_m); \
+ \
+ ILVEV_H2_SH(cnst3_m, cnst0_m, cnst1_m, cnst2_m, cnst3_m, cnst2_m); \
+ cnst0_m = __msa_ilvev_h(cnst1_m, cnst0_m); \
+ cnst1_m = cnst0_m; \
+ \
+ ILVRL_H2_SH(in4, in3, vec1_m, vec0_m); \
+ ILVRL_H2_SH(in6, in1, vec3_m, vec2_m); \
+ DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m, cnst2_m, \
+ cnst3_m, cnst1_m, out1, out6, s0_m, s1_m); \
+ \
+ SPLATI_H2_SH(coeff1_m, 2, 3, cnst0_m, cnst1_m); \
+ cnst1_m = __msa_ilvev_h(cnst1_m, cnst0_m); \
+ \
+ ILVRL_H2_SH(in2, in5, vec1_m, vec0_m); \
+ ILVRL_H2_SH(s0_m, s1_m, vec3_m, vec2_m); \
+ out3 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst0_m); \
+ out4 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst1_m); \
+ out2 = DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst0_m); \
+ out5 = DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst1_m); \
+ \
+ out1 = -out1; \
+ out3 = -out3; \
+ out5 = -out5; \
+ }
-#define VPX_FADST4(in0, in1, in2, in3, out0, out1, out2, out3) { \
- v4i32 s0_m, s1_m, s2_m, s3_m, constant_m; \
- v4i32 in0_r_m, in1_r_m, in2_r_m, in3_r_m; \
- \
- UNPCK_R_SH_SW(in0, in0_r_m); \
- UNPCK_R_SH_SW(in1, in1_r_m); \
- UNPCK_R_SH_SW(in2, in2_r_m); \
- UNPCK_R_SH_SW(in3, in3_r_m); \
- \
- constant_m = __msa_fill_w(sinpi_4_9); \
- MUL2(in0_r_m, constant_m, in3_r_m, constant_m, s1_m, s0_m); \
- \
- constant_m = __msa_fill_w(sinpi_1_9); \
- s0_m += in0_r_m * constant_m; \
- s1_m -= in1_r_m * constant_m; \
- \
- constant_m = __msa_fill_w(sinpi_2_9); \
- s0_m += in1_r_m * constant_m; \
- s1_m += in3_r_m * constant_m; \
- \
- s2_m = in0_r_m + in1_r_m - in3_r_m; \
- \
- constant_m = __msa_fill_w(sinpi_3_9); \
- MUL2(in2_r_m, constant_m, s2_m, constant_m, s3_m, in1_r_m); \
- \
- in0_r_m = s0_m + s3_m; \
- s2_m = s1_m - s3_m; \
- s3_m = s1_m - s0_m + s3_m; \
- \
- SRARI_W4_SW(in0_r_m, in1_r_m, s2_m, s3_m, DCT_CONST_BITS); \
- PCKEV_H4_SH(in0_r_m, in0_r_m, in1_r_m, in1_r_m, s2_m, s2_m, \
- s3_m, s3_m, out0, out1, out2, out3); \
-}
-#endif // VP10_ENCODER_MIPS_MSA_VP10_FDCT_MSA_H_
+#define VPX_FADST4(in0, in1, in2, in3, out0, out1, out2, out3) \
+ { \
+ v4i32 s0_m, s1_m, s2_m, s3_m, constant_m; \
+ v4i32 in0_r_m, in1_r_m, in2_r_m, in3_r_m; \
+ \
+ UNPCK_R_SH_SW(in0, in0_r_m); \
+ UNPCK_R_SH_SW(in1, in1_r_m); \
+ UNPCK_R_SH_SW(in2, in2_r_m); \
+ UNPCK_R_SH_SW(in3, in3_r_m); \
+ \
+ constant_m = __msa_fill_w(sinpi_4_9); \
+ MUL2(in0_r_m, constant_m, in3_r_m, constant_m, s1_m, s0_m); \
+ \
+ constant_m = __msa_fill_w(sinpi_1_9); \
+ s0_m += in0_r_m * constant_m; \
+ s1_m -= in1_r_m * constant_m; \
+ \
+ constant_m = __msa_fill_w(sinpi_2_9); \
+ s0_m += in1_r_m * constant_m; \
+ s1_m += in3_r_m * constant_m; \
+ \
+ s2_m = in0_r_m + in1_r_m - in3_r_m; \
+ \
+ constant_m = __msa_fill_w(sinpi_3_9); \
+ MUL2(in2_r_m, constant_m, s2_m, constant_m, s3_m, in1_r_m); \
+ \
+ in0_r_m = s0_m + s3_m; \
+ s2_m = s1_m - s3_m; \
+ s3_m = s1_m - s0_m + s3_m; \
+ \
+ SRARI_W4_SW(in0_r_m, in1_r_m, s2_m, s3_m, DCT_CONST_BITS); \
+ PCKEV_H4_SH(in0_r_m, in0_r_m, in1_r_m, in1_r_m, s2_m, s2_m, s3_m, s3_m, \
+ out0, out1, out2, out3); \
+ }
+#endif // VP10_ENCODER_MIPS_MSA_VP10_FDCT_MSA_H_
diff --git a/vp10/encoder/mips/msa/temporal_filter_msa.c b/vp10/encoder/mips/msa/temporal_filter_msa.c
index 5d4558b..5feeab3 100644
--- a/vp10/encoder/mips/msa/temporal_filter_msa.c
+++ b/vp10/encoder/mips/msa/temporal_filter_msa.c
@@ -11,12 +11,9 @@
#include "./vp10_rtcd.h"
#include "vpx_dsp/mips/macros_msa.h"
-static void temporal_filter_apply_8size_msa(uint8_t *frm1_ptr,
- uint32_t stride,
- uint8_t *frm2_ptr,
- int32_t filt_sth,
- int32_t filt_wgt,
- uint32_t *acc,
+static void temporal_filter_apply_8size_msa(uint8_t *frm1_ptr, uint32_t stride,
+ uint8_t *frm2_ptr, int32_t filt_sth,
+ int32_t filt_wgt, uint32_t *acc,
uint16_t *cnt) {
uint32_t row;
uint64_t f0, f1, f2, f3;
@@ -54,10 +51,10 @@
HSUB_UB2_SH(frm_r, frm_l, diff0, diff1);
UNPCK_SH_SW(diff0, diff0_r, diff0_l);
UNPCK_SH_SW(diff1, diff1_r, diff1_l);
- MUL4(diff0_r, diff0_r, diff0_l, diff0_l, diff1_r, diff1_r, diff1_l,
- diff1_l, mod0_w, mod1_w, mod2_w, mod3_w);
- MUL4(mod0_w, cnst3, mod1_w, cnst3, mod2_w, cnst3, mod3_w, cnst3,
+ MUL4(diff0_r, diff0_r, diff0_l, diff0_l, diff1_r, diff1_r, diff1_l, diff1_l,
mod0_w, mod1_w, mod2_w, mod3_w);
+ MUL4(mod0_w, cnst3, mod1_w, cnst3, mod2_w, cnst3, mod3_w, cnst3, mod0_w,
+ mod1_w, mod2_w, mod3_w);
SRAR_W4_SW(mod0_w, mod1_w, mod2_w, mod3_w, strength);
diff0_r = (mod0_w < cnst16);
@@ -65,8 +62,8 @@
diff1_r = (mod2_w < cnst16);
diff1_l = (mod3_w < cnst16);
- SUB4(cnst16, mod0_w, cnst16, mod1_w, cnst16, mod2_w, cnst16, mod3_w,
- mod0_w, mod1_w, mod2_w, mod3_w);
+ SUB4(cnst16, mod0_w, cnst16, mod1_w, cnst16, mod2_w, cnst16, mod3_w, mod0_w,
+ mod1_w, mod2_w, mod3_w);
mod0_w = diff0_r & mod0_w;
mod1_w = diff0_l & mod1_w;
@@ -85,8 +82,8 @@
UNPCK_SH_SW(frm2_l, frm2_lr, frm2_ll);
MUL4(mod0_w, frm2_rr, mod1_w, frm2_rl, mod2_w, frm2_lr, mod3_w, frm2_ll,
mod0_w, mod1_w, mod2_w, mod3_w);
- ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3,
- mod0_w, mod1_w, mod2_w, mod3_w);
+ ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, mod0_w, mod1_w,
+ mod2_w, mod3_w);
ST_SW2(mod0_w, mod1_w, acc, 4);
acc += 8;
@@ -101,10 +98,10 @@
HSUB_UB2_SH(frm_r, frm_l, diff0, diff1);
UNPCK_SH_SW(diff0, diff0_r, diff0_l);
UNPCK_SH_SW(diff1, diff1_r, diff1_l);
- MUL4(diff0_r, diff0_r, diff0_l, diff0_l, diff1_r, diff1_r, diff1_l,
- diff1_l, mod0_w, mod1_w, mod2_w, mod3_w);
- MUL4(mod0_w, cnst3, mod1_w, cnst3, mod2_w, cnst3, mod3_w, cnst3,
+ MUL4(diff0_r, diff0_r, diff0_l, diff0_l, diff1_r, diff1_r, diff1_l, diff1_l,
mod0_w, mod1_w, mod2_w, mod3_w);
+ MUL4(mod0_w, cnst3, mod1_w, cnst3, mod2_w, cnst3, mod3_w, cnst3, mod0_w,
+ mod1_w, mod2_w, mod3_w);
SRAR_W4_SW(mod0_w, mod1_w, mod2_w, mod3_w, strength);
diff0_r = (mod0_w < cnst16);
@@ -112,8 +109,8 @@
diff1_r = (mod2_w < cnst16);
diff1_l = (mod3_w < cnst16);
- SUB4(cnst16, mod0_w, cnst16, mod1_w, cnst16, mod2_w, cnst16, mod3_w,
- mod0_w, mod1_w, mod2_w, mod3_w);
+ SUB4(cnst16, mod0_w, cnst16, mod1_w, cnst16, mod2_w, cnst16, mod3_w, mod0_w,
+ mod1_w, mod2_w, mod3_w);
mod0_w = diff0_r & mod0_w;
mod1_w = diff0_l & mod1_w;
@@ -131,8 +128,8 @@
UNPCK_SH_SW(frm2_l, frm2_lr, frm2_ll);
MUL4(mod0_w, frm2_rr, mod1_w, frm2_rl, mod2_w, frm2_lr, mod3_w, frm2_ll,
mod0_w, mod1_w, mod2_w, mod3_w);
- ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3,
- mod0_w, mod1_w, mod2_w, mod3_w);
+ ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, mod0_w, mod1_w,
+ mod2_w, mod3_w);
ST_SW2(mod0_w, mod1_w, acc, 4);
acc += 8;
@@ -141,13 +138,10 @@
}
}
-static void temporal_filter_apply_16size_msa(uint8_t *frm1_ptr,
- uint32_t stride,
+static void temporal_filter_apply_16size_msa(uint8_t *frm1_ptr, uint32_t stride,
uint8_t *frm2_ptr,
- int32_t filt_sth,
- int32_t filt_wgt,
- uint32_t *acc,
- uint16_t *cnt) {
+ int32_t filt_sth, int32_t filt_wgt,
+ uint32_t *acc, uint16_t *cnt) {
uint32_t row;
v16i8 frm1, frm2, frm3, frm4;
v16u8 frm_r, frm_l;
@@ -183,8 +177,8 @@
UNPCK_SH_SW(diff1, diff1_r, diff1_l);
MUL4(diff0_r, diff0_r, diff0_l, diff0_l, diff1_r, diff1_r, diff1_l, diff1_l,
mod0_w, mod1_w, mod2_w, mod3_w);
- MUL4(mod0_w, cnst3, mod1_w, cnst3, mod2_w, cnst3, mod3_w, cnst3,
- mod0_w, mod1_w, mod2_w, mod3_w);
+ MUL4(mod0_w, cnst3, mod1_w, cnst3, mod2_w, cnst3, mod3_w, cnst3, mod0_w,
+ mod1_w, mod2_w, mod3_w);
SRAR_W4_SW(mod0_w, mod1_w, mod2_w, mod3_w, strength);
diff0_r = (mod0_w < cnst16);
@@ -192,8 +186,8 @@
diff1_r = (mod2_w < cnst16);
diff1_l = (mod3_w < cnst16);
- SUB4(cnst16, mod0_w, cnst16, mod1_w, cnst16, mod2_w, cnst16, mod3_w,
- mod0_w, mod1_w, mod2_w, mod3_w);
+ SUB4(cnst16, mod0_w, cnst16, mod1_w, cnst16, mod2_w, cnst16, mod3_w, mod0_w,
+ mod1_w, mod2_w, mod3_w);
mod0_w = diff0_r & mod0_w;
mod1_w = diff0_l & mod1_w;
@@ -212,8 +206,8 @@
UNPCK_SH_SW(frm2_l, frm2_lr, frm2_ll);
MUL4(mod0_w, frm2_rr, mod1_w, frm2_rl, mod2_w, frm2_lr, mod3_w, frm2_ll,
mod0_w, mod1_w, mod2_w, mod3_w);
- ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3,
- mod0_w, mod1_w, mod2_w, mod3_w);
+ ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, mod0_w, mod1_w,
+ mod2_w, mod3_w);
ST_SW2(mod0_w, mod1_w, acc, 4);
acc += 8;
@@ -230,8 +224,8 @@
UNPCK_SH_SW(diff1, diff1_r, diff1_l);
MUL4(diff0_r, diff0_r, diff0_l, diff0_l, diff1_r, diff1_r, diff1_l, diff1_l,
mod0_w, mod1_w, mod2_w, mod3_w);
- MUL4(mod0_w, cnst3, mod1_w, cnst3, mod2_w, cnst3, mod3_w, cnst3,
- mod0_w, mod1_w, mod2_w, mod3_w);
+ MUL4(mod0_w, cnst3, mod1_w, cnst3, mod2_w, cnst3, mod3_w, cnst3, mod0_w,
+ mod1_w, mod2_w, mod3_w);
SRAR_W4_SW(mod0_w, mod1_w, mod2_w, mod3_w, strength);
diff0_r = (mod0_w < cnst16);
@@ -239,8 +233,8 @@
diff1_r = (mod2_w < cnst16);
diff1_l = (mod3_w < cnst16);
- SUB4(cnst16, mod0_w, cnst16, mod1_w, cnst16, mod2_w, cnst16, mod3_w,
- mod0_w, mod1_w, mod2_w, mod3_w);
+ SUB4(cnst16, mod0_w, cnst16, mod1_w, cnst16, mod2_w, cnst16, mod3_w, mod0_w,
+ mod1_w, mod2_w, mod3_w);
mod0_w = diff0_r & mod0_w;
mod1_w = diff0_l & mod1_w;
@@ -259,8 +253,8 @@
UNPCK_SH_SW(frm2_l, frm2_lr, frm2_ll);
MUL4(mod0_w, frm2_rr, mod1_w, frm2_rl, mod2_w, frm2_lr, mod3_w, frm2_ll,
mod0_w, mod1_w, mod2_w, mod3_w);
- ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3,
- mod0_w, mod1_w, mod2_w, mod3_w);
+ ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, mod0_w, mod1_w,
+ mod2_w, mod3_w);
ST_SW2(mod0_w, mod1_w, acc, 4);
acc += 8;
ST_SW2(mod2_w, mod3_w, acc, 4);
@@ -272,18 +266,18 @@
}
void vp10_temporal_filter_apply_msa(uint8_t *frame1_ptr, uint32_t stride,
- uint8_t *frame2_ptr, uint32_t blk_w,
- uint32_t blk_h, int32_t strength,
- int32_t filt_wgt, uint32_t *accu,
- uint16_t *cnt) {
+ uint8_t *frame2_ptr, uint32_t blk_w,
+ uint32_t blk_h, int32_t strength,
+ int32_t filt_wgt, uint32_t *accu,
+ uint16_t *cnt) {
if (8 == (blk_w * blk_h)) {
- temporal_filter_apply_8size_msa(frame1_ptr, stride, frame2_ptr,
- strength, filt_wgt, accu, cnt);
+ temporal_filter_apply_8size_msa(frame1_ptr, stride, frame2_ptr, strength,
+ filt_wgt, accu, cnt);
} else if (16 == (blk_w * blk_h)) {
- temporal_filter_apply_16size_msa(frame1_ptr, stride, frame2_ptr,
- strength, filt_wgt, accu, cnt);
+ temporal_filter_apply_16size_msa(frame1_ptr, stride, frame2_ptr, strength,
+ filt_wgt, accu, cnt);
} else {
vp10_temporal_filter_apply_c(frame1_ptr, stride, frame2_ptr, blk_w, blk_h,
- strength, filt_wgt, accu, cnt);
+ strength, filt_wgt, accu, cnt);
}
}
diff --git a/vp10/encoder/picklpf.c b/vp10/encoder/picklpf.c
index b8e692e..0550df8 100644
--- a/vp10/encoder/picklpf.c
+++ b/vp10/encoder/picklpf.c
@@ -34,20 +34,19 @@
}
}
-
static int64_t try_filter_frame(const YV12_BUFFER_CONFIG *sd,
- VP10_COMP *const cpi,
- int filt_level, int partial_frame) {
+ VP10_COMP *const cpi, int filt_level,
+ int partial_frame) {
VP10_COMMON *const cm = &cpi->common;
int64_t filt_err;
if (cpi->num_workers > 1)
vp10_loop_filter_frame_mt(cm->frame_to_show, cm, cpi->td.mb.e_mbd.plane,
- filt_level, 1, partial_frame,
- cpi->workers, cpi->num_workers, &cpi->lf_row_sync);
+ filt_level, 1, partial_frame, cpi->workers,
+ cpi->num_workers, &cpi->lf_row_sync);
else
vp10_loop_filter_frame(cm->frame_to_show, cm, &cpi->td.mb.e_mbd, filt_level,
- 1, partial_frame);
+ 1, partial_frame);
#if CONFIG_VPX_HIGHBITDEPTH
if (cm->use_highbitdepth) {
@@ -103,8 +102,7 @@
bias = (bias * cpi->twopass.section_intra_rating) / 20;
// yx, bias less for large block size
- if (cm->tx_mode != ONLY_4X4)
- bias >>= 1;
+ if (cm->tx_mode != ONLY_4X4) bias >>= 1;
if (filt_direction <= 0 && filt_low != filt_mid) {
// Get Low filter error score
@@ -115,8 +113,7 @@
// filter value.
if ((ss_err[filt_low] - bias) < best_err) {
// Was it actually better than the previous best?
- if (ss_err[filt_low] < best_err)
- best_err = ss_err[filt_low];
+ if (ss_err[filt_low] < best_err) best_err = ss_err[filt_low];
filt_best = filt_low;
}
@@ -148,21 +145,20 @@
}
void vp10_pick_filter_level(const YV12_BUFFER_CONFIG *sd, VP10_COMP *cpi,
- LPF_PICK_METHOD method) {
+ LPF_PICK_METHOD method) {
VP10_COMMON *const cm = &cpi->common;
struct loopfilter *const lf = &cm->lf;
- lf->sharpness_level = cm->frame_type == KEY_FRAME ? 0
- : cpi->oxcf.sharpness;
+ lf->sharpness_level = cm->frame_type == KEY_FRAME ? 0 : cpi->oxcf.sharpness;
if (method == LPF_PICK_MINIMAL_LPF && lf->filter_level) {
- lf->filter_level = 0;
+ lf->filter_level = 0;
} else if (method >= LPF_PICK_FROM_Q) {
const int min_filter_level = 0;
const int max_filter_level = get_max_filter_level(cpi);
const int q = vp10_ac_quant(cm->base_qindex, 0, cm->bit_depth);
- // These values were determined by linear fitting the result of the
- // searched level, filt_guess = q * 0.316206 + 3.87252
+// These values were determined by linear fitting the result of the
+// searched level, filt_guess = q * 0.316206 + 3.87252
#if CONFIG_VPX_HIGHBITDEPTH
int filt_guess;
switch (cm->bit_depth) {
@@ -176,18 +172,18 @@
filt_guess = ROUND_POWER_OF_TWO(q * 20723 + 16242526, 22);
break;
default:
- assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 "
- "or VPX_BITS_12");
+ assert(0 &&
+ "bit_depth should be VPX_BITS_8, VPX_BITS_10 "
+ "or VPX_BITS_12");
return;
}
#else
int filt_guess = ROUND_POWER_OF_TWO(q * 20723 + 1015158, 18);
#endif // CONFIG_VPX_HIGHBITDEPTH
- if (cm->frame_type == KEY_FRAME)
- filt_guess -= 4;
+ if (cm->frame_type == KEY_FRAME) filt_guess -= 4;
lf->filter_level = clamp(filt_guess, min_filter_level, max_filter_level);
} else {
- lf->filter_level = search_filter_level(sd, cpi,
- method == LPF_PICK_FROM_SUBIMAGE);
+ lf->filter_level =
+ search_filter_level(sd, cpi, method == LPF_PICK_FROM_SUBIMAGE);
}
}
diff --git a/vp10/encoder/picklpf.h b/vp10/encoder/picklpf.h
index 21a8758..7d5f167 100644
--- a/vp10/encoder/picklpf.h
+++ b/vp10/encoder/picklpf.h
@@ -8,7 +8,6 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
#ifndef VP10_ENCODER_PICKLPF_H_
#define VP10_ENCODER_PICKLPF_H_
@@ -22,7 +21,7 @@
struct VP10_COMP;
void vp10_pick_filter_level(const struct yv12_buffer_config *sd,
- struct VP10_COMP *cpi, LPF_PICK_METHOD method);
+ struct VP10_COMP *cpi, LPF_PICK_METHOD method);
#ifdef __cplusplus
} // extern "C"
#endif
diff --git a/vp10/encoder/quantize.c b/vp10/encoder/quantize.c
index 66a8e5f..0688a69 100644
--- a/vp10/encoder/quantize.c
+++ b/vp10/encoder/quantize.c
@@ -21,13 +21,12 @@
#include "vp10/encoder/rd.h"
void vp10_quantize_fp_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
- int skip_block,
- const int16_t *zbin_ptr, const int16_t *round_ptr,
- const int16_t *quant_ptr, const int16_t *quant_shift_ptr,
- tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
- const int16_t *dequant_ptr,
- uint16_t *eob_ptr,
- const int16_t *scan, const int16_t *iscan) {
+ int skip_block, const int16_t *zbin_ptr,
+ const int16_t *round_ptr, const int16_t *quant_ptr,
+ const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr,
+ tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr,
+ uint16_t *eob_ptr, const int16_t *scan,
+ const int16_t *iscan) {
int i, eob = -1;
// TODO(jingning) Decide the need of these arguments after the
// quantization process is completed.
@@ -53,27 +52,21 @@
qcoeff_ptr[rc] = (tmp ^ coeff_sign) - coeff_sign;
dqcoeff_ptr[rc] = qcoeff_ptr[rc] * dequant_ptr[rc != 0];
- if (tmp)
- eob = i;
+ if (tmp) eob = i;
}
}
*eob_ptr = eob + 1;
}
#if CONFIG_VPX_HIGHBITDEPTH
-void vp10_highbd_quantize_fp_c(const tran_low_t *coeff_ptr,
- intptr_t count,
- int skip_block,
- const int16_t *zbin_ptr,
- const int16_t *round_ptr,
- const int16_t *quant_ptr,
- const int16_t *quant_shift_ptr,
- tran_low_t *qcoeff_ptr,
- tran_low_t *dqcoeff_ptr,
- const int16_t *dequant_ptr,
- uint16_t *eob_ptr,
- const int16_t *scan,
- const int16_t *iscan) {
+void vp10_highbd_quantize_fp_c(const tran_low_t *coeff_ptr, intptr_t count,
+ int skip_block, const int16_t *zbin_ptr,
+ const int16_t *round_ptr,
+ const int16_t *quant_ptr,
+ const int16_t *quant_shift_ptr,
+ tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
+ const int16_t *dequant_ptr, uint16_t *eob_ptr,
+ const int16_t *scan, const int16_t *iscan) {
int i;
int eob = -1;
// TODO(jingning) Decide the need of these arguments after the
@@ -97,8 +90,7 @@
const uint32_t abs_qcoeff = (uint32_t)((tmp * quant_ptr[rc != 0]) >> 16);
qcoeff_ptr[rc] = (tran_low_t)((abs_qcoeff ^ coeff_sign) - coeff_sign);
dqcoeff_ptr[rc] = qcoeff_ptr[rc] * dequant_ptr[rc != 0];
- if (abs_qcoeff)
- eob = i;
+ if (abs_qcoeff) eob = i;
}
}
*eob_ptr = eob + 1;
@@ -108,14 +100,13 @@
// TODO(jingning) Refactor this file and combine functions with similar
// operations.
void vp10_quantize_fp_32x32_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
- int skip_block,
- const int16_t *zbin_ptr, const int16_t *round_ptr,
- const int16_t *quant_ptr,
- const int16_t *quant_shift_ptr,
- tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
- const int16_t *dequant_ptr,
- uint16_t *eob_ptr,
- const int16_t *scan, const int16_t *iscan) {
+ int skip_block, const int16_t *zbin_ptr,
+ const int16_t *round_ptr,
+ const int16_t *quant_ptr,
+ const int16_t *quant_shift_ptr,
+ tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
+ const int16_t *dequant_ptr, uint16_t *eob_ptr,
+ const int16_t *scan, const int16_t *iscan) {
int i, eob = -1;
(void)zbin_ptr;
(void)quant_shift_ptr;
@@ -140,25 +131,19 @@
dqcoeff_ptr[rc] = qcoeff_ptr[rc] * dequant_ptr[rc != 0] / 2;
}
- if (tmp)
- eob = i;
+ if (tmp) eob = i;
}
}
*eob_ptr = eob + 1;
}
#if CONFIG_VPX_HIGHBITDEPTH
-void vp10_highbd_quantize_fp_32x32_c(const tran_low_t *coeff_ptr,
- intptr_t n_coeffs, int skip_block,
- const int16_t *zbin_ptr,
- const int16_t *round_ptr,
- const int16_t *quant_ptr,
- const int16_t *quant_shift_ptr,
- tran_low_t *qcoeff_ptr,
- tran_low_t *dqcoeff_ptr,
- const int16_t *dequant_ptr,
- uint16_t *eob_ptr,
- const int16_t *scan, const int16_t *iscan) {
+void vp10_highbd_quantize_fp_32x32_c(
+ const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block,
+ const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr,
+ const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr,
+ tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr,
+ const int16_t *scan, const int16_t *iscan) {
int i, eob = -1;
(void)zbin_ptr;
(void)quant_shift_ptr;
@@ -176,15 +161,14 @@
const int abs_coeff = (coeff ^ coeff_sign) - coeff_sign;
if (abs_coeff >= (dequant_ptr[rc != 0] >> 2)) {
- const int64_t tmp = abs_coeff
- + ROUND_POWER_OF_TWO(round_ptr[rc != 0], 1);
- abs_qcoeff = (uint32_t) ((tmp * quant_ptr[rc != 0]) >> 15);
+ const int64_t tmp =
+ abs_coeff + ROUND_POWER_OF_TWO(round_ptr[rc != 0], 1);
+ abs_qcoeff = (uint32_t)((tmp * quant_ptr[rc != 0]) >> 15);
qcoeff_ptr[rc] = (tran_low_t)((abs_qcoeff ^ coeff_sign) - coeff_sign);
dqcoeff_ptr[rc] = qcoeff_ptr[rc] * dequant_ptr[rc != 0] / 2;
}
- if (abs_qcoeff)
- eob = i;
+ if (abs_qcoeff) eob = i;
}
}
*eob_ptr = eob + 1;
@@ -192,37 +176,33 @@
#endif
void vp10_regular_quantize_b_4x4(MACROBLOCK *x, int plane, int block,
- const int16_t *scan, const int16_t *iscan) {
+ const int16_t *scan, const int16_t *iscan) {
MACROBLOCKD *const xd = &x->e_mbd;
struct macroblock_plane *p = &x->plane[plane];
struct macroblockd_plane *pd = &xd->plane[plane];
#if CONFIG_VPX_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
- vpx_highbd_quantize_b(BLOCK_OFFSET(p->coeff, block),
- 16, x->skip_block,
+ vpx_highbd_quantize_b(BLOCK_OFFSET(p->coeff, block), 16, x->skip_block,
p->zbin, p->round, p->quant, p->quant_shift,
BLOCK_OFFSET(p->qcoeff, block),
- BLOCK_OFFSET(pd->dqcoeff, block),
- pd->dequant, &p->eobs[block],
- scan, iscan);
+ BLOCK_OFFSET(pd->dqcoeff, block), pd->dequant,
+ &p->eobs[block], scan, iscan);
return;
}
#endif
- vpx_quantize_b(BLOCK_OFFSET(p->coeff, block),
- 16, x->skip_block,
- p->zbin, p->round, p->quant, p->quant_shift,
+ vpx_quantize_b(BLOCK_OFFSET(p->coeff, block), 16, x->skip_block, p->zbin,
+ p->round, p->quant, p->quant_shift,
BLOCK_OFFSET(p->qcoeff, block),
- BLOCK_OFFSET(pd->dqcoeff, block),
- pd->dequant, &p->eobs[block], scan, iscan);
+ BLOCK_OFFSET(pd->dqcoeff, block), pd->dequant, &p->eobs[block],
+ scan, iscan);
}
static void invert_quant(int16_t *quant, int16_t *shift, int d) {
unsigned t;
int l;
t = d;
- for (l = 0; t > 1; l++)
- t >>= 1;
+ for (l = 0; t > 1; l++) t >>= 1;
t = 1 + (1 << (16 + l)) / d;
*quant = (int16_t)(t - (1 << 16));
*shift = 1 << (16 - l);
@@ -232,18 +212,15 @@
const int quant = vp10_dc_quant(q, 0, bit_depth);
#if CONFIG_VPX_HIGHBITDEPTH
switch (bit_depth) {
- case VPX_BITS_8:
- return q == 0 ? 64 : (quant < 148 ? 84 : 80);
- case VPX_BITS_10:
- return q == 0 ? 64 : (quant < 592 ? 84 : 80);
- case VPX_BITS_12:
- return q == 0 ? 64 : (quant < 2368 ? 84 : 80);
+ case VPX_BITS_8: return q == 0 ? 64 : (quant < 148 ? 84 : 80);
+ case VPX_BITS_10: return q == 0 ? 64 : (quant < 592 ? 84 : 80);
+ case VPX_BITS_12: return q == 0 ? 64 : (quant < 2368 ? 84 : 80);
default:
assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12");
return -1;
}
#else
- (void) bit_depth;
+ (void)bit_depth;
return q == 0 ? 64 : (quant < 148 ? 84 : 80);
#endif
}
@@ -259,8 +236,7 @@
for (i = 0; i < 2; ++i) {
int qrounding_factor_fp = i == 0 ? 48 : 42;
- if (q == 0)
- qrounding_factor_fp = 64;
+ if (q == 0) qrounding_factor_fp = 64;
// y
quant = i == 0 ? vp10_dc_quant(q, cm->y_dc_delta_q, cm->bit_depth)
@@ -275,8 +251,8 @@
// uv
quant = i == 0 ? vp10_dc_quant(q, cm->uv_dc_delta_q, cm->bit_depth)
: vp10_ac_quant(q, cm->uv_ac_delta_q, cm->bit_depth);
- invert_quant(&quants->uv_quant[q][i],
- &quants->uv_quant_shift[q][i], quant);
+ invert_quant(&quants->uv_quant[q][i], &quants->uv_quant_shift[q][i],
+ quant);
quants->uv_quant_fp[q][i] = (1 << 16) / quant;
quants->uv_round_fp[q][i] = (qrounding_factor_fp * quant) >> 7;
quants->uv_zbin[q][i] = ROUND_POWER_OF_TWO(qzbin_factor * quant, 7);
@@ -364,14 +340,11 @@
// Table that converts 0-63 Q-range values passed in outside to the Qindex
// range used internally.
static const int quantizer_to_qindex[] = {
- 0, 4, 8, 12, 16, 20, 24, 28,
- 32, 36, 40, 44, 48, 52, 56, 60,
- 64, 68, 72, 76, 80, 84, 88, 92,
- 96, 100, 104, 108, 112, 116, 120, 124,
- 128, 132, 136, 140, 144, 148, 152, 156,
- 160, 164, 168, 172, 176, 180, 184, 188,
- 192, 196, 200, 204, 208, 212, 216, 220,
- 224, 228, 232, 236, 240, 244, 249, 255,
+ 0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48,
+ 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92, 96, 100,
+ 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144, 148, 152,
+ 156, 160, 164, 168, 172, 176, 180, 184, 188, 192, 196, 200, 204,
+ 208, 212, 216, 220, 224, 228, 232, 236, 240, 244, 249, 255,
};
int vp10_quantizer_to_qindex(int quantizer) {
@@ -382,8 +355,7 @@
int quantizer;
for (quantizer = 0; quantizer < 64; ++quantizer)
- if (quantizer_to_qindex[quantizer] >= qindex)
- return quantizer;
+ if (quantizer_to_qindex[quantizer] >= qindex) return quantizer;
return 63;
}
diff --git a/vp10/encoder/quantize.h b/vp10/encoder/quantize.h
index b44088e..29f044c 100644
--- a/vp10/encoder/quantize.h
+++ b/vp10/encoder/quantize.h
@@ -38,7 +38,7 @@
} QUANTS;
void vp10_regular_quantize_b_4x4(MACROBLOCK *x, int plane, int block,
- const int16_t *scan, const int16_t *iscan);
+ const int16_t *scan, const int16_t *iscan);
struct VP10_COMP;
struct VP10Common;
diff --git a/vp10/encoder/ratectrl.c b/vp10/encoder/ratectrl.c
index 1666b2e..73593cb 100644
--- a/vp10/encoder/ratectrl.c
+++ b/vp10/encoder/ratectrl.c
@@ -46,29 +46,24 @@
#define FRAME_OVERHEAD_BITS 200
#if CONFIG_VPX_HIGHBITDEPTH
-#define ASSIGN_MINQ_TABLE(bit_depth, name) \
- do { \
- switch (bit_depth) { \
- case VPX_BITS_8: \
- name = name##_8; \
- break; \
- case VPX_BITS_10: \
- name = name##_10; \
- break; \
- case VPX_BITS_12: \
- name = name##_12; \
- break; \
- default: \
- assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10" \
- " or VPX_BITS_12"); \
- name = NULL; \
- } \
+#define ASSIGN_MINQ_TABLE(bit_depth, name) \
+ do { \
+ switch (bit_depth) { \
+ case VPX_BITS_8: name = name##_8; break; \
+ case VPX_BITS_10: name = name##_10; break; \
+ case VPX_BITS_12: name = name##_12; break; \
+ default: \
+ assert(0 && \
+ "bit_depth should be VPX_BITS_8, VPX_BITS_10" \
+ " or VPX_BITS_12"); \
+ name = NULL; \
+ } \
} while (0)
#else
#define ASSIGN_MINQ_TABLE(bit_depth, name) \
- do { \
- (void) bit_depth; \
- name = name##_8; \
+ do { \
+ (void) bit_depth; \
+ name = name##_8; \
} while (0)
#endif
@@ -111,20 +106,18 @@
// Special case handling to deal with the step from q2.0
// down to lossless mode represented by q 1.0.
- if (minqtarget <= 2.0)
- return 0;
+ if (minqtarget <= 2.0) return 0;
for (i = 0; i < QINDEX_RANGE; i++) {
- if (minqtarget <= vp10_convert_qindex_to_q(i, bit_depth))
- return i;
+ if (minqtarget <= vp10_convert_qindex_to_q(i, bit_depth)) return i;
}
return QINDEX_RANGE - 1;
}
-static void init_minq_luts(int *kf_low_m, int *kf_high_m,
- int *arfgf_low, int *arfgf_high,
- int *inter, int *rtc, vpx_bit_depth_t bit_depth) {
+static void init_minq_luts(int *kf_low_m, int *kf_high_m, int *arfgf_low,
+ int *arfgf_high, int *inter, int *rtc,
+ vpx_bit_depth_t bit_depth) {
int i;
for (i = 0; i < QINDEX_RANGE; i++) {
const double maxq = vp10_convert_qindex_to_q(i, bit_depth);
@@ -155,15 +148,12 @@
// quantizer tables easier. If necessary they can be replaced by lookup
// tables if and when things settle down in the experimental bitstream
double vp10_convert_qindex_to_q(int qindex, vpx_bit_depth_t bit_depth) {
- // Convert the index to a real Q value (scaled down to match old Q values)
+// Convert the index to a real Q value (scaled down to match old Q values)
#if CONFIG_VPX_HIGHBITDEPTH
switch (bit_depth) {
- case VPX_BITS_8:
- return vp10_ac_quant(qindex, 0, bit_depth) / 4.0;
- case VPX_BITS_10:
- return vp10_ac_quant(qindex, 0, bit_depth) / 16.0;
- case VPX_BITS_12:
- return vp10_ac_quant(qindex, 0, bit_depth) / 64.0;
+ case VPX_BITS_8: return vp10_ac_quant(qindex, 0, bit_depth) / 4.0;
+ case VPX_BITS_10: return vp10_ac_quant(qindex, 0, bit_depth) / 16.0;
+ case VPX_BITS_12: return vp10_ac_quant(qindex, 0, bit_depth) / 64.0;
default:
assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12");
return -1.0;
@@ -174,8 +164,7 @@
}
int vp10_rc_bits_per_mb(FRAME_TYPE frame_type, int qindex,
- double correction_factor,
- vpx_bit_depth_t bit_depth) {
+ double correction_factor, vpx_bit_depth_t bit_depth) {
const double q = vp10_convert_qindex_to_q(qindex, bit_depth);
int enumerator = frame_type == KEY_FRAME ? 2700000 : 1800000;
@@ -188,10 +177,10 @@
}
int vp10_estimate_bits_at_q(FRAME_TYPE frame_type, int q, int mbs,
- double correction_factor,
- vpx_bit_depth_t bit_depth) {
- const int bpm = (int)(vp10_rc_bits_per_mb(frame_type, q, correction_factor,
- bit_depth));
+ double correction_factor,
+ vpx_bit_depth_t bit_depth) {
+ const int bpm =
+ (int)(vp10_rc_bits_per_mb(frame_type, q, correction_factor, bit_depth));
return VPXMAX(FRAME_OVERHEAD_BITS,
(int)((uint64_t)bpm * mbs) >> BPER_MB_NORMBITS);
}
@@ -199,10 +188,9 @@
int vp10_rc_clamp_pframe_target_size(const VP10_COMP *const cpi, int target) {
const RATE_CONTROL *rc = &cpi->rc;
const VP10EncoderConfig *oxcf = &cpi->oxcf;
- const int min_frame_target = VPXMAX(rc->min_frame_bandwidth,
- rc->avg_frame_bandwidth >> 5);
- if (target < min_frame_target)
- target = min_frame_target;
+ const int min_frame_target =
+ VPXMAX(rc->min_frame_bandwidth, rc->avg_frame_bandwidth >> 5);
+ if (target < min_frame_target) target = min_frame_target;
if (cpi->refresh_golden_frame && rc->is_src_frame_alt_ref) {
// If there is an active ARF at this location use the minimum
// bits on this frame even if it is a constructed arf.
@@ -211,11 +199,10 @@
target = min_frame_target;
}
// Clip the frame target to the maximum allowed value.
- if (target > rc->max_frame_bandwidth)
- target = rc->max_frame_bandwidth;
+ if (target > rc->max_frame_bandwidth) target = rc->max_frame_bandwidth;
if (oxcf->rc_max_inter_bitrate_pct) {
- const int max_rate = rc->avg_frame_bandwidth *
- oxcf->rc_max_inter_bitrate_pct / 100;
+ const int max_rate =
+ rc->avg_frame_bandwidth * oxcf->rc_max_inter_bitrate_pct / 100;
target = VPXMIN(target, max_rate);
}
return target;
@@ -225,12 +212,11 @@
const RATE_CONTROL *rc = &cpi->rc;
const VP10EncoderConfig *oxcf = &cpi->oxcf;
if (oxcf->rc_max_intra_bitrate_pct) {
- const int max_rate = rc->avg_frame_bandwidth *
- oxcf->rc_max_intra_bitrate_pct / 100;
+ const int max_rate =
+ rc->avg_frame_bandwidth * oxcf->rc_max_intra_bitrate_pct / 100;
target = VPXMIN(target, max_rate);
}
- if (target > rc->max_frame_bandwidth)
- target = rc->max_frame_bandwidth;
+ if (target > rc->max_frame_bandwidth) target = rc->max_frame_bandwidth;
return target;
}
@@ -251,8 +237,8 @@
rc->buffer_level = rc->bits_off_target;
}
-int vp10_rc_get_default_min_gf_interval(
- int width, int height, double framerate) {
+int vp10_rc_get_default_min_gf_interval(int width, int height,
+ double framerate) {
// Assume we do not need any constraint lower than 4K 20 fps
static const double factor_safe = 3840 * 2160 * 20.0;
const double factor = width * height * framerate;
@@ -283,20 +269,20 @@
rc->avg_frame_qindex[KEY_FRAME] = oxcf->worst_allowed_q;
rc->avg_frame_qindex[INTER_FRAME] = oxcf->worst_allowed_q;
} else {
- rc->avg_frame_qindex[KEY_FRAME] = (oxcf->worst_allowed_q +
- oxcf->best_allowed_q) / 2;
- rc->avg_frame_qindex[INTER_FRAME] = (oxcf->worst_allowed_q +
- oxcf->best_allowed_q) / 2;
+ rc->avg_frame_qindex[KEY_FRAME] =
+ (oxcf->worst_allowed_q + oxcf->best_allowed_q) / 2;
+ rc->avg_frame_qindex[INTER_FRAME] =
+ (oxcf->worst_allowed_q + oxcf->best_allowed_q) / 2;
}
rc->last_q[KEY_FRAME] = oxcf->best_allowed_q;
rc->last_q[INTER_FRAME] = oxcf->worst_allowed_q;
- rc->buffer_level = rc->starting_buffer_level;
+ rc->buffer_level = rc->starting_buffer_level;
rc->bits_off_target = rc->starting_buffer_level;
- rc->rolling_target_bits = rc->avg_frame_bandwidth;
- rc->rolling_actual_bits = rc->avg_frame_bandwidth;
+ rc->rolling_target_bits = rc->avg_frame_bandwidth;
+ rc->rolling_actual_bits = rc->avg_frame_bandwidth;
rc->long_rolling_target_bits = rc->avg_frame_bandwidth;
rc->long_rolling_actual_bits = rc->avg_frame_bandwidth;
@@ -346,13 +332,11 @@
} else {
// If buffer is below drop_mark, for now just drop every other frame
// (starting with the next frame) until it increases back over drop_mark.
- int drop_mark = (int)(oxcf->drop_frames_water_mark *
- rc->optimal_buffer_level / 100);
- if ((rc->buffer_level > drop_mark) &&
- (rc->decimation_factor > 0)) {
+ int drop_mark =
+ (int)(oxcf->drop_frames_water_mark * rc->optimal_buffer_level / 100);
+ if ((rc->buffer_level > drop_mark) && (rc->decimation_factor > 0)) {
--rc->decimation_factor;
- } else if (rc->buffer_level <= drop_mark &&
- rc->decimation_factor == 0) {
+ } else if (rc->buffer_level <= drop_mark && rc->decimation_factor == 0) {
rc->decimation_factor = 1;
}
if (rc->decimation_factor > 0) {
@@ -379,7 +363,7 @@
rcf = rc->rate_correction_factors[KF_STD];
} else if (cpi->oxcf.pass == 2) {
RATE_FACTOR_LEVEL rf_lvl =
- cpi->twopass.gf_group.rf_level[cpi->twopass.gf_group.index];
+ cpi->twopass.gf_group.rf_level[cpi->twopass.gf_group.index];
rcf = rc->rate_correction_factors[rf_lvl];
} else {
if ((cpi->refresh_alt_ref_frame || cpi->refresh_golden_frame) &&
@@ -405,7 +389,7 @@
rc->rate_correction_factors[KF_STD] = factor;
} else if (cpi->oxcf.pass == 2) {
RATE_FACTOR_LEVEL rf_lvl =
- cpi->twopass.gf_group.rf_level[cpi->twopass.gf_group.index];
+ cpi->twopass.gf_group.rf_level[cpi->twopass.gf_group.index];
rc->rate_correction_factors[rf_lvl] = factor;
} else {
if ((cpi->refresh_alt_ref_frame || cpi->refresh_golden_frame) &&
@@ -426,8 +410,7 @@
int projected_size_based_on_q = 0;
// Do not update the rate factors for arf overlay frames.
- if (cpi->rc.is_src_frame_alt_ref)
- return;
+ if (cpi->rc.is_src_frame_alt_ref) return;
// Clear down mmx registers to allow floating point in what follows
vpx_clear_system_state();
@@ -439,21 +422,19 @@
projected_size_based_on_q =
vp10_cyclic_refresh_estimate_bits_at_q(cpi, rate_correction_factor);
} else {
- projected_size_based_on_q = vp10_estimate_bits_at_q(cpi->common.frame_type,
- cm->base_qindex,
- cm->MBs,
- rate_correction_factor,
- cm->bit_depth);
+ projected_size_based_on_q =
+ vp10_estimate_bits_at_q(cpi->common.frame_type, cm->base_qindex,
+ cm->MBs, rate_correction_factor, cm->bit_depth);
}
// Work out a size correction factor.
if (projected_size_based_on_q > FRAME_OVERHEAD_BITS)
correction_factor = (int)((100 * (int64_t)cpi->rc.projected_frame_size) /
- projected_size_based_on_q);
+ projected_size_based_on_q);
// More heavily damped adjustment used if we have been oscillating either side
// of target.
- adjustment_limit = 0.25 +
- 0.5 * VPXMIN(1, fabs(log10(0.01 * correction_factor)));
+ adjustment_limit =
+ 0.25 + 0.5 * VPXMIN(1, fabs(log10(0.01 * correction_factor)));
cpi->rc.q_2_frame = cpi->rc.q_1_frame;
cpi->rc.q_1_frame = cm->base_qindex;
@@ -467,16 +448,16 @@
if (correction_factor > 102) {
// We are not already at the worst allowable quality
- correction_factor = (int)(100 + ((correction_factor - 100) *
- adjustment_limit));
+ correction_factor =
+ (int)(100 + ((correction_factor - 100) * adjustment_limit));
rate_correction_factor = (rate_correction_factor * correction_factor) / 100;
// Keep rate_correction_factor within limits
if (rate_correction_factor > MAX_BPB_FACTOR)
rate_correction_factor = MAX_BPB_FACTOR;
} else if (correction_factor < 99) {
// We are not already at the best allowable quality
- correction_factor = (int)(100 - ((100 - correction_factor) *
- adjustment_limit));
+ correction_factor =
+ (int)(100 - ((100 - correction_factor) * adjustment_limit));
rate_correction_factor = (rate_correction_factor * correction_factor) / 100;
// Keep rate_correction_factor within limits
@@ -487,9 +468,8 @@
set_rate_correction_factor(cpi, rate_correction_factor);
}
-
int vp10_rc_regulate_q(const VP10_COMP *cpi, int target_bits_per_frame,
- int active_best_quality, int active_worst_quality) {
+ int active_best_quality, int active_worst_quality) {
const VP10_COMMON *const cm = &cpi->common;
int q = active_worst_quality;
int last_error = INT_MAX;
@@ -508,9 +488,8 @@
bits_per_mb_at_this_q =
(int)vp10_cyclic_refresh_rc_bits_per_mb(cpi, i, correction_factor);
} else {
- bits_per_mb_at_this_q = (int)vp10_rc_bits_per_mb(cm->frame_type, i,
- correction_factor,
- cm->bit_depth);
+ bits_per_mb_at_this_q = (int)vp10_rc_bits_per_mb(
+ cm->frame_type, i, correction_factor, cm->bit_depth);
}
if (bits_per_mb_at_this_q <= target_bits_per_mb) {
@@ -577,13 +556,13 @@
int active_worst_quality;
if (cpi->common.frame_type == KEY_FRAME) {
- active_worst_quality = curr_frame == 0 ? rc->worst_quality
- : rc->last_q[KEY_FRAME] * 2;
+ active_worst_quality =
+ curr_frame == 0 ? rc->worst_quality : rc->last_q[KEY_FRAME] * 2;
} else {
if (!rc->is_src_frame_alt_ref &&
(cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) {
- active_worst_quality = curr_frame == 1 ? rc->last_q[KEY_FRAME] * 5 / 4
- : rc->last_q[INTER_FRAME];
+ active_worst_quality = curr_frame == 1 ? rc->last_q[KEY_FRAME] * 5 / 4
+ : rc->last_q[INTER_FRAME];
} else {
active_worst_quality = curr_frame == 1 ? rc->last_q[KEY_FRAME] * 2
: rc->last_q[INTER_FRAME] * 2;
@@ -607,28 +586,27 @@
int adjustment = 0;
int active_worst_quality;
int ambient_qp;
- if (cm->frame_type == KEY_FRAME)
- return rc->worst_quality;
+ if (cm->frame_type == KEY_FRAME) return rc->worst_quality;
// For ambient_qp we use minimum of avg_frame_qindex[KEY_FRAME/INTER_FRAME]
// for the first few frames following key frame. These are both initialized
// to worst_quality and updated with (3/4, 1/4) average in postencode_update.
// So for first few frames following key, the qp of that key frame is weighted
// into the active_worst_quality setting.
- ambient_qp = (cm->current_video_frame < 5) ?
- VPXMIN(rc->avg_frame_qindex[INTER_FRAME],
- rc->avg_frame_qindex[KEY_FRAME]) :
- rc->avg_frame_qindex[INTER_FRAME];
+ ambient_qp = (cm->current_video_frame < 5)
+ ? VPXMIN(rc->avg_frame_qindex[INTER_FRAME],
+ rc->avg_frame_qindex[KEY_FRAME])
+ : rc->avg_frame_qindex[INTER_FRAME];
active_worst_quality = VPXMIN(rc->worst_quality, ambient_qp * 5 / 4);
if (rc->buffer_level > rc->optimal_buffer_level) {
// Adjust down.
// Maximum limit for down adjustment, ~30%.
int max_adjustment_down = active_worst_quality / 3;
if (max_adjustment_down) {
- buff_lvl_step = ((rc->maximum_buffer_size -
- rc->optimal_buffer_level) / max_adjustment_down);
+ buff_lvl_step = ((rc->maximum_buffer_size - rc->optimal_buffer_level) /
+ max_adjustment_down);
if (buff_lvl_step)
adjustment = (int)((rc->buffer_level - rc->optimal_buffer_level) /
- buff_lvl_step);
+ buff_lvl_step);
active_worst_quality -= adjustment;
}
} else if (rc->buffer_level > critical_level) {
@@ -668,18 +646,16 @@
if (rc->this_key_frame_forced) {
int qindex = rc->last_boosted_qindex;
double last_boosted_q = vp10_convert_qindex_to_q(qindex, cm->bit_depth);
- int delta_qindex = vp10_compute_qdelta(rc, last_boosted_q,
- (last_boosted_q * 0.75),
- cm->bit_depth);
+ int delta_qindex = vp10_compute_qdelta(
+ rc, last_boosted_q, (last_boosted_q * 0.75), cm->bit_depth);
active_best_quality = VPXMAX(qindex + delta_qindex, rc->best_quality);
} else if (cm->current_video_frame > 0) {
// not first frame of one pass and kf_boost is set
double q_adj_factor = 1.0;
double q_val;
- active_best_quality =
- get_kf_active_quality(rc, rc->avg_frame_qindex[KEY_FRAME],
- cm->bit_depth);
+ active_best_quality = get_kf_active_quality(
+ rc, rc->avg_frame_qindex[KEY_FRAME], cm->bit_depth);
// Allow somewhat lower kf minq with small image formats.
if ((cm->width * cm->height) <= (352 * 288)) {
@@ -689,9 +665,8 @@
// Convert the adjustment factor to a qindex delta
// on active_best_quality.
q_val = vp10_convert_qindex_to_q(active_best_quality, cm->bit_depth);
- active_best_quality += vp10_compute_qdelta(rc, q_val,
- q_val * q_adj_factor,
- cm->bit_depth);
+ active_best_quality +=
+ vp10_compute_qdelta(rc, q_val, q_val * q_adj_factor, cm->bit_depth);
}
} else if (!rc->is_src_frame_alt_ref &&
(cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) {
@@ -721,24 +696,22 @@
}
// Clip the active best and worst quality values to limits
- active_best_quality = clamp(active_best_quality,
- rc->best_quality, rc->worst_quality);
- active_worst_quality = clamp(active_worst_quality,
- active_best_quality, rc->worst_quality);
+ active_best_quality =
+ clamp(active_best_quality, rc->best_quality, rc->worst_quality);
+ active_worst_quality =
+ clamp(active_worst_quality, active_best_quality, rc->worst_quality);
*top_index = active_worst_quality;
*bottom_index = active_best_quality;
#if LIMIT_QRANGE_FOR_ALTREF_AND_KEY
// Limit Q range for the adaptive loop.
- if (cm->frame_type == KEY_FRAME &&
- !rc->this_key_frame_forced &&
+ if (cm->frame_type == KEY_FRAME && !rc->this_key_frame_forced &&
!(cm->current_video_frame == 0)) {
int qdelta = 0;
vpx_clear_system_state();
- qdelta = vp10_compute_qdelta_by_rate(&cpi->rc, cm->frame_type,
- active_worst_quality, 2.0,
- cm->bit_depth);
+ qdelta = vp10_compute_qdelta_by_rate(
+ &cpi->rc, cm->frame_type, active_worst_quality, 2.0, cm->bit_depth);
*top_index = active_worst_quality + qdelta;
*top_index = (*top_index > *bottom_index) ? *top_index : *bottom_index;
}
@@ -748,8 +721,8 @@
if (cm->frame_type == KEY_FRAME && rc->this_key_frame_forced) {
q = rc->last_boosted_qindex;
} else {
- q = vp10_rc_regulate_q(cpi, rc->this_frame_target,
- active_best_quality, active_worst_quality);
+ q = vp10_rc_regulate_q(cpi, rc->this_frame_target, active_best_quality,
+ active_worst_quality);
if (q > *top_index) {
// Special case when we are targeting the max allowed rate
if (rc->this_frame_target >= rc->max_frame_bandwidth)
@@ -758,8 +731,7 @@
q = *top_index;
}
}
- assert(*top_index <= rc->worst_quality &&
- *top_index >= rc->best_quality);
+ assert(*top_index <= rc->worst_quality && *top_index >= rc->best_quality);
assert(*bottom_index <= rc->worst_quality &&
*bottom_index >= rc->best_quality);
assert(q <= rc->worst_quality && q >= rc->best_quality);
@@ -770,8 +742,7 @@
const VP10EncoderConfig *const oxcf) {
static const double cq_adjust_threshold = 0.1;
int active_cq_level = oxcf->cq_level;
- if (oxcf->rc_mode == VPX_CQ &&
- rc->total_target_bits > 0) {
+ if (oxcf->rc_mode == VPX_CQ && rc->total_target_bits > 0) {
const double x = (double)rc->total_actual_bits / rc->total_target_bits;
if (x < cq_adjust_threshold) {
active_cq_level = (int)(active_cq_level * x / cq_adjust_threshold);
@@ -797,24 +768,21 @@
if (oxcf->rc_mode == VPX_Q) {
int qindex = cq_level;
double q = vp10_convert_qindex_to_q(qindex, cm->bit_depth);
- int delta_qindex = vp10_compute_qdelta(rc, q, q * 0.25,
- cm->bit_depth);
+ int delta_qindex = vp10_compute_qdelta(rc, q, q * 0.25, cm->bit_depth);
active_best_quality = VPXMAX(qindex + delta_qindex, rc->best_quality);
} else if (rc->this_key_frame_forced) {
int qindex = rc->last_boosted_qindex;
double last_boosted_q = vp10_convert_qindex_to_q(qindex, cm->bit_depth);
- int delta_qindex = vp10_compute_qdelta(rc, last_boosted_q,
- last_boosted_q * 0.75,
- cm->bit_depth);
+ int delta_qindex = vp10_compute_qdelta(
+ rc, last_boosted_q, last_boosted_q * 0.75, cm->bit_depth);
active_best_quality = VPXMAX(qindex + delta_qindex, rc->best_quality);
} else {
// not first frame of one pass and kf_boost is set
double q_adj_factor = 1.0;
double q_val;
- active_best_quality =
- get_kf_active_quality(rc, rc->avg_frame_qindex[KEY_FRAME],
- cm->bit_depth);
+ active_best_quality = get_kf_active_quality(
+ rc, rc->avg_frame_qindex[KEY_FRAME], cm->bit_depth);
// Allow somewhat lower kf minq with small image formats.
if ((cm->width * cm->height) <= (352 * 288)) {
@@ -824,9 +792,8 @@
// Convert the adjustment factor to a qindex delta
// on active_best_quality.
q_val = vp10_convert_qindex_to_q(active_best_quality, cm->bit_depth);
- active_best_quality += vp10_compute_qdelta(rc, q_val,
- q_val * q_adj_factor,
- cm->bit_depth);
+ active_best_quality +=
+ vp10_compute_qdelta(rc, q_val, q_val * q_adj_factor, cm->bit_depth);
}
} else if (!rc->is_src_frame_alt_ref &&
(cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) {
@@ -841,8 +808,7 @@
}
// For constrained quality dont allow Q less than the cq level
if (oxcf->rc_mode == VPX_CQ) {
- if (q < cq_level)
- q = cq_level;
+ if (q < cq_level) q = cq_level;
active_best_quality = get_gf_active_quality(rc, q, cm->bit_depth);
@@ -865,12 +831,11 @@
if (oxcf->rc_mode == VPX_Q) {
int qindex = cq_level;
double q = vp10_convert_qindex_to_q(qindex, cm->bit_depth);
- double delta_rate[FIXED_GF_INTERVAL] =
- {0.50, 1.0, 0.85, 1.0, 0.70, 1.0, 0.85, 1.0};
- int delta_qindex =
- vp10_compute_qdelta(rc, q,
- q * delta_rate[cm->current_video_frame %
- FIXED_GF_INTERVAL], cm->bit_depth);
+ double delta_rate[FIXED_GF_INTERVAL] = { 0.50, 1.0, 0.85, 1.0,
+ 0.70, 1.0, 0.85, 1.0 };
+ int delta_qindex = vp10_compute_qdelta(
+ rc, q, q * delta_rate[cm->current_video_frame % FIXED_GF_INTERVAL],
+ cm->bit_depth);
active_best_quality = VPXMAX(qindex + delta_qindex, rc->best_quality);
} else {
// Use the lower of active_worst_quality and recent/average Q.
@@ -880,18 +845,17 @@
active_best_quality = inter_minq[rc->avg_frame_qindex[KEY_FRAME]];
// For the constrained quality mode we don't want
// q to fall below the cq level.
- if ((oxcf->rc_mode == VPX_CQ) &&
- (active_best_quality < cq_level)) {
+ if ((oxcf->rc_mode == VPX_CQ) && (active_best_quality < cq_level)) {
active_best_quality = cq_level;
}
}
}
// Clip the active best and worst quality values to limits
- active_best_quality = clamp(active_best_quality,
- rc->best_quality, rc->worst_quality);
- active_worst_quality = clamp(active_worst_quality,
- active_best_quality, rc->worst_quality);
+ active_best_quality =
+ clamp(active_best_quality, rc->best_quality, rc->worst_quality);
+ active_worst_quality =
+ clamp(active_worst_quality, active_best_quality, rc->worst_quality);
*top_index = active_worst_quality;
*bottom_index = active_best_quality;
@@ -902,17 +866,14 @@
vpx_clear_system_state();
// Limit Q range for the adaptive loop.
- if (cm->frame_type == KEY_FRAME &&
- !rc->this_key_frame_forced &&
+ if (cm->frame_type == KEY_FRAME && !rc->this_key_frame_forced &&
!(cm->current_video_frame == 0)) {
- qdelta = vp10_compute_qdelta_by_rate(&cpi->rc, cm->frame_type,
- active_worst_quality, 2.0,
- cm->bit_depth);
+ qdelta = vp10_compute_qdelta_by_rate(
+ &cpi->rc, cm->frame_type, active_worst_quality, 2.0, cm->bit_depth);
} else if (!rc->is_src_frame_alt_ref &&
(cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) {
- qdelta = vp10_compute_qdelta_by_rate(&cpi->rc, cm->frame_type,
- active_worst_quality, 1.75,
- cm->bit_depth);
+ qdelta = vp10_compute_qdelta_by_rate(
+ &cpi->rc, cm->frame_type, active_worst_quality, 1.75, cm->bit_depth);
}
*top_index = active_worst_quality + qdelta;
*top_index = (*top_index > *bottom_index) ? *top_index : *bottom_index;
@@ -921,12 +882,12 @@
if (oxcf->rc_mode == VPX_Q) {
q = active_best_quality;
- // Special case code to try and match quality with forced key frames
+ // Special case code to try and match quality with forced key frames
} else if ((cm->frame_type == KEY_FRAME) && rc->this_key_frame_forced) {
q = rc->last_boosted_qindex;
} else {
- q = vp10_rc_regulate_q(cpi, rc->this_frame_target,
- active_best_quality, active_worst_quality);
+ q = vp10_rc_regulate_q(cpi, rc->this_frame_target, active_best_quality,
+ active_worst_quality);
if (q > *top_index) {
// Special case when we are targeting the max allowed rate
if (rc->this_frame_target >= rc->max_frame_bandwidth)
@@ -936,8 +897,7 @@
}
}
- assert(*top_index <= rc->worst_quality &&
- *top_index >= rc->best_quality);
+ assert(*top_index <= rc->worst_quality && *top_index >= rc->best_quality);
assert(*bottom_index <= rc->worst_quality &&
*bottom_index >= rc->best_quality);
assert(q <= rc->worst_quality && q >= rc->best_quality);
@@ -952,19 +912,19 @@
1.75, // GF_ARF_STD
2.00, // KF_STD
};
- static const FRAME_TYPE frame_type[RATE_FACTOR_LEVELS] =
- {INTER_FRAME, INTER_FRAME, INTER_FRAME, INTER_FRAME, KEY_FRAME};
+ static const FRAME_TYPE frame_type[RATE_FACTOR_LEVELS] = {
+ INTER_FRAME, INTER_FRAME, INTER_FRAME, INTER_FRAME, KEY_FRAME
+ };
const VP10_COMMON *const cm = &cpi->common;
- int qdelta = vp10_compute_qdelta_by_rate(&cpi->rc, frame_type[rf_level],
- q, rate_factor_deltas[rf_level],
- cm->bit_depth);
+ int qdelta =
+ vp10_compute_qdelta_by_rate(&cpi->rc, frame_type[rf_level], q,
+ rate_factor_deltas[rf_level], cm->bit_depth);
return qdelta;
}
#define STATIC_MOTION_THRESH 95
static int rc_pick_q_and_bounds_two_pass(const VP10_COMP *cpi,
- int *bottom_index,
- int *top_index) {
+ int *bottom_index, int *top_index) {
const VP10_COMMON *const cm = &cpi->common;
const RATE_CONTROL *const rc = &cpi->rc;
const VP10EncoderConfig *const oxcf = &cpi->oxcf;
@@ -989,17 +949,15 @@
qindex = VPXMIN(rc->last_kf_qindex, rc->last_boosted_qindex);
active_best_quality = qindex;
last_boosted_q = vp10_convert_qindex_to_q(qindex, cm->bit_depth);
- delta_qindex = vp10_compute_qdelta(rc, last_boosted_q,
- last_boosted_q * 1.25,
- cm->bit_depth);
+ delta_qindex = vp10_compute_qdelta(
+ rc, last_boosted_q, last_boosted_q * 1.25, cm->bit_depth);
active_worst_quality =
VPXMIN(qindex + delta_qindex, active_worst_quality);
} else {
qindex = rc->last_boosted_qindex;
last_boosted_q = vp10_convert_qindex_to_q(qindex, cm->bit_depth);
- delta_qindex = vp10_compute_qdelta(rc, last_boosted_q,
- last_boosted_q * 0.75,
- cm->bit_depth);
+ delta_qindex = vp10_compute_qdelta(
+ rc, last_boosted_q, last_boosted_q * 0.75, cm->bit_depth);
active_best_quality = VPXMAX(qindex + delta_qindex, rc->best_quality);
}
} else {
@@ -1007,8 +965,8 @@
double q_adj_factor = 1.0;
double q_val;
// Baseline value derived from cpi->active_worst_quality and kf boost.
- active_best_quality = get_kf_active_quality(rc, active_worst_quality,
- cm->bit_depth);
+ active_best_quality =
+ get_kf_active_quality(rc, active_worst_quality, cm->bit_depth);
// Allow somewhat lower kf minq with small image formats.
if ((cm->width * cm->height) <= (352 * 288)) {
@@ -1021,9 +979,8 @@
// Convert the adjustment factor to a qindex delta
// on active_best_quality.
q_val = vp10_convert_qindex_to_q(active_best_quality, cm->bit_depth);
- active_best_quality += vp10_compute_qdelta(rc, q_val,
- q_val * q_adj_factor,
- cm->bit_depth);
+ active_best_quality +=
+ vp10_compute_qdelta(rc, q_val, q_val * q_adj_factor, cm->bit_depth);
}
} else if (!rc->is_src_frame_alt_ref &&
(cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) {
@@ -1038,8 +995,7 @@
}
// For constrained quality dont allow Q less than the cq level
if (oxcf->rc_mode == VPX_CQ) {
- if (q < cq_level)
- q = cq_level;
+ if (q < cq_level) q = cq_level;
active_best_quality = get_gf_active_quality(rc, q, cm->bit_depth);
@@ -1050,7 +1006,7 @@
if (!cpi->refresh_alt_ref_frame) {
active_best_quality = cq_level;
} else {
- active_best_quality = get_gf_active_quality(rc, q, cm->bit_depth);
+ active_best_quality = get_gf_active_quality(rc, q, cm->bit_depth);
// Modify best quality for second level arfs. For mode VPX_Q this
// becomes the baseline frame q.
@@ -1068,8 +1024,7 @@
// For the constrained quality mode we don't want
// q to fall below the cq level.
- if ((oxcf->rc_mode == VPX_CQ) &&
- (active_best_quality < cq_level)) {
+ if ((oxcf->rc_mode == VPX_CQ) && (active_best_quality < cq_level)) {
active_best_quality = cq_level;
}
}
@@ -1083,11 +1038,11 @@
(!rc->is_src_frame_alt_ref &&
(cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame))) {
active_best_quality -=
- (cpi->twopass.extend_minq + cpi->twopass.extend_minq_fast);
+ (cpi->twopass.extend_minq + cpi->twopass.extend_minq_fast);
active_worst_quality += (cpi->twopass.extend_maxq / 2);
} else {
active_best_quality -=
- (cpi->twopass.extend_minq + cpi->twopass.extend_minq_fast) / 2;
+ (cpi->twopass.extend_minq + cpi->twopass.extend_minq_fast) / 2;
active_worst_quality += cpi->twopass.extend_maxq;
}
}
@@ -1095,33 +1050,31 @@
#if LIMIT_QRANGE_FOR_ALTREF_AND_KEY
vpx_clear_system_state();
// Static forced key frames Q restrictions dealt with elsewhere.
- if (!(frame_is_intra_only(cm)) ||
- !rc->this_key_frame_forced ||
+ if (!(frame_is_intra_only(cm)) || !rc->this_key_frame_forced ||
(cpi->twopass.last_kfgroup_zeromotion_pct < STATIC_MOTION_THRESH)) {
- int qdelta = vp10_frame_type_qdelta(cpi, gf_group->rf_level[gf_group->index],
- active_worst_quality);
- active_worst_quality = VPXMAX(active_worst_quality + qdelta,
- active_best_quality);
+ int qdelta = vp10_frame_type_qdelta(
+ cpi, gf_group->rf_level[gf_group->index], active_worst_quality);
+ active_worst_quality =
+ VPXMAX(active_worst_quality + qdelta, active_best_quality);
}
#endif
// Modify active_best_quality for downscaled normal frames.
if (rc->frame_size_selector != UNSCALED && !frame_is_kf_gf_arf(cpi)) {
- int qdelta = vp10_compute_qdelta_by_rate(rc, cm->frame_type,
- active_best_quality, 2.0,
- cm->bit_depth);
+ int qdelta = vp10_compute_qdelta_by_rate(
+ rc, cm->frame_type, active_best_quality, 2.0, cm->bit_depth);
active_best_quality =
VPXMAX(active_best_quality + qdelta, rc->best_quality);
}
- active_best_quality = clamp(active_best_quality,
- rc->best_quality, rc->worst_quality);
- active_worst_quality = clamp(active_worst_quality,
- active_best_quality, rc->worst_quality);
+ active_best_quality =
+ clamp(active_best_quality, rc->best_quality, rc->worst_quality);
+ active_worst_quality =
+ clamp(active_worst_quality, active_best_quality, rc->worst_quality);
if (oxcf->rc_mode == VPX_Q) {
q = active_best_quality;
- // Special case code to try and match quality with forced key frames.
+ // Special case code to try and match quality with forced key frames.
} else if (frame_is_intra_only(cm) && rc->this_key_frame_forced) {
// If static since last kf use better of last boosted and last kf q.
if (cpi->twopass.last_kfgroup_zeromotion_pct >= STATIC_MOTION_THRESH) {
@@ -1130,8 +1083,8 @@
q = rc->last_boosted_qindex;
}
} else {
- q = vp10_rc_regulate_q(cpi, rc->this_frame_target,
- active_best_quality, active_worst_quality);
+ q = vp10_rc_regulate_q(cpi, rc->this_frame_target, active_best_quality,
+ active_worst_quality);
if (q > active_worst_quality) {
// Special case when we are targeting the max allowed rate.
if (rc->this_frame_target >= rc->max_frame_bandwidth)
@@ -1145,16 +1098,15 @@
*top_index = active_worst_quality;
*bottom_index = active_best_quality;
- assert(*top_index <= rc->worst_quality &&
- *top_index >= rc->best_quality);
+ assert(*top_index <= rc->worst_quality && *top_index >= rc->best_quality);
assert(*bottom_index <= rc->worst_quality &&
*bottom_index >= rc->best_quality);
assert(q <= rc->worst_quality && q >= rc->best_quality);
return q;
}
-int vp10_rc_pick_q_and_bounds(const VP10_COMP *cpi,
- int *bottom_index, int *top_index) {
+int vp10_rc_pick_q_and_bounds(const VP10_COMP *cpi, int *bottom_index,
+ int *top_index) {
int q;
if (cpi->oxcf.pass == 0) {
if (cpi->oxcf.rc_mode == VPX_CBR)
@@ -1168,20 +1120,19 @@
return q;
}
-void vp10_rc_compute_frame_size_bounds(const VP10_COMP *cpi,
- int frame_target,
- int *frame_under_shoot_limit,
- int *frame_over_shoot_limit) {
+void vp10_rc_compute_frame_size_bounds(const VP10_COMP *cpi, int frame_target,
+ int *frame_under_shoot_limit,
+ int *frame_over_shoot_limit) {
if (cpi->oxcf.rc_mode == VPX_Q) {
*frame_under_shoot_limit = 0;
- *frame_over_shoot_limit = INT_MAX;
+ *frame_over_shoot_limit = INT_MAX;
} else {
// For very small rate targets where the fractional adjustment
// may be tiny make sure there is at least a minimum range.
const int tolerance = (cpi->sf.recode_tolerance * frame_target) / 100;
*frame_under_shoot_limit = VPXMAX(frame_target - tolerance - 200, 0);
- *frame_over_shoot_limit = VPXMIN(frame_target + tolerance + 200,
- cpi->rc.max_frame_bandwidth);
+ *frame_over_shoot_limit =
+ VPXMIN(frame_target + tolerance + 200, cpi->rc.max_frame_bandwidth);
}
}
@@ -1194,12 +1145,12 @@
// Modify frame size target when down-scaling.
if (cpi->oxcf.resize_mode == RESIZE_DYNAMIC &&
rc->frame_size_selector != UNSCALED)
- rc->this_frame_target = (int)(rc->this_frame_target
- * rate_thresh_mult[rc->frame_size_selector]);
+ rc->this_frame_target = (int)(rc->this_frame_target *
+ rate_thresh_mult[rc->frame_size_selector]);
// Target rate per SB64 (including partial SB64s.
- rc->sb64_target_rate = ((int64_t)rc->this_frame_target * 64 * 64) /
- (cm->width * cm->height);
+ rc->sb64_target_rate =
+ ((int64_t)rc->this_frame_target * 64 * 64) / (cm->width * cm->height);
}
static void update_alt_ref_frame_stats(VP10_COMP *cpi) {
@@ -1233,13 +1184,11 @@
}
// Decrement count down till next gf
- if (rc->frames_till_gf_update_due > 0)
- rc->frames_till_gf_update_due--;
+ if (rc->frames_till_gf_update_due > 0) rc->frames_till_gf_update_due--;
} else if (!cpi->refresh_alt_ref_frame) {
// Decrement count down till next gf
- if (rc->frames_till_gf_update_due > 0)
- rc->frames_till_gf_update_due--;
+ if (rc->frames_till_gf_update_due > 0) rc->frames_till_gf_update_due--;
rc->frames_since_golden++;
}
@@ -1271,7 +1220,7 @@
!(cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) {
rc->last_q[INTER_FRAME] = qindex;
rc->avg_frame_qindex[INTER_FRAME] =
- ROUND_POWER_OF_TWO(3 * rc->avg_frame_qindex[INTER_FRAME] + qindex, 2);
+ ROUND_POWER_OF_TWO(3 * rc->avg_frame_qindex[INTER_FRAME] + qindex, 2);
rc->ni_frames++;
rc->tot_q += vp10_convert_qindex_to_q(qindex, cm->bit_depth);
rc->avg_q = rc->tot_q / rc->ni_frames;
@@ -1287,15 +1236,13 @@
// If all mbs in this group are skipped only update if the Q value is
// better than that already stored.
// This is used to help set quality in forced key frames to reduce popping
- if ((qindex < rc->last_boosted_qindex) ||
- (cm->frame_type == KEY_FRAME) ||
+ if ((qindex < rc->last_boosted_qindex) || (cm->frame_type == KEY_FRAME) ||
(!rc->constrained_gf_group &&
(cpi->refresh_alt_ref_frame ||
(cpi->refresh_golden_frame && !rc->is_src_frame_alt_ref)))) {
rc->last_boosted_qindex = qindex;
}
- if (cm->frame_type == KEY_FRAME)
- rc->last_kf_qindex = qindex;
+ if (cm->frame_type == KEY_FRAME) rc->last_kf_qindex = qindex;
update_buffer_level(cpi, rc->projected_frame_size);
@@ -1326,8 +1273,7 @@
// Update the Golden frame stats as appropriate.
update_golden_frame_stats(cpi);
- if (cm->frame_type == KEY_FRAME)
- rc->frames_since_key = 0;
+ if (cm->frame_type == KEY_FRAME) rc->frames_since_key = 0;
if (cm->show_frame) {
rc->frames_since_key++;
rc->frames_to_key--;
@@ -1351,19 +1297,20 @@
}
// Use this macro to turn on/off use of alt-refs in one-pass mode.
-#define USE_ALTREF_FOR_ONE_PASS 1
+#define USE_ALTREF_FOR_ONE_PASS 1
static int calc_pframe_target_size_one_pass_vbr(const VP10_COMP *const cpi) {
static const int af_ratio = 10;
const RATE_CONTROL *const rc = &cpi->rc;
int target;
#if USE_ALTREF_FOR_ONE_PASS
- target = (!rc->is_src_frame_alt_ref &&
- (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) ?
- (rc->avg_frame_bandwidth * rc->baseline_gf_interval * af_ratio) /
- (rc->baseline_gf_interval + af_ratio - 1) :
- (rc->avg_frame_bandwidth * rc->baseline_gf_interval) /
- (rc->baseline_gf_interval + af_ratio - 1);
+ target =
+ (!rc->is_src_frame_alt_ref &&
+ (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame))
+ ? (rc->avg_frame_bandwidth * rc->baseline_gf_interval * af_ratio) /
+ (rc->baseline_gf_interval + af_ratio - 1)
+ : (rc->avg_frame_bandwidth * rc->baseline_gf_interval) /
+ (rc->baseline_gf_interval + af_ratio - 1);
#else
target = rc->avg_frame_bandwidth;
#endif
@@ -1383,13 +1330,11 @@
int target;
// TODO(yaowu): replace the "auto_key && 0" below with proper decision logic.
if (!cpi->refresh_alt_ref_frame &&
- (cm->current_video_frame == 0 ||
- (cpi->frame_flags & FRAMEFLAGS_KEY) ||
- rc->frames_to_key == 0 ||
- (cpi->oxcf.auto_key && 0))) {
+ (cm->current_video_frame == 0 || (cpi->frame_flags & FRAMEFLAGS_KEY) ||
+ rc->frames_to_key == 0 || (cpi->oxcf.auto_key && 0))) {
cm->frame_type = KEY_FRAME;
- rc->this_key_frame_forced = cm->current_video_frame != 0 &&
- rc->frames_to_key == 0;
+ rc->this_key_frame_forced =
+ cm->current_video_frame != 0 && rc->frames_to_key == 0;
rc->frames_to_key = cpi->oxcf.key_freq;
rc->kf_boost = DEFAULT_KF_BOOST;
rc->source_alt_ref_active = 0;
@@ -1428,11 +1373,12 @@
if (oxcf->gf_cbr_boost_pct) {
const int af_ratio_pct = oxcf->gf_cbr_boost_pct + 100;
- target = cpi->refresh_golden_frame ?
- (rc->avg_frame_bandwidth * rc->baseline_gf_interval * af_ratio_pct) /
- (rc->baseline_gf_interval * 100 + af_ratio_pct - 100) :
- (rc->avg_frame_bandwidth * rc->baseline_gf_interval * 100) /
- (rc->baseline_gf_interval * 100 + af_ratio_pct - 100);
+ target = cpi->refresh_golden_frame
+ ? (rc->avg_frame_bandwidth * rc->baseline_gf_interval *
+ af_ratio_pct) /
+ (rc->baseline_gf_interval * 100 + af_ratio_pct - 100)
+ : (rc->avg_frame_bandwidth * rc->baseline_gf_interval * 100) /
+ (rc->baseline_gf_interval * 100 + af_ratio_pct - 100);
} else {
target = rc->avg_frame_bandwidth;
}
@@ -1448,8 +1394,8 @@
target += (target * pct_high) / 200;
}
if (oxcf->rc_max_inter_bitrate_pct) {
- const int max_rate = rc->avg_frame_bandwidth *
- oxcf->rc_max_inter_bitrate_pct / 100;
+ const int max_rate =
+ rc->avg_frame_bandwidth * oxcf->rc_max_inter_bitrate_pct / 100;
target = VPXMIN(target, max_rate);
}
return VPXMAX(min_frame_target, target);
@@ -1460,15 +1406,15 @@
int target;
if (cpi->common.current_video_frame == 0) {
target = ((rc->starting_buffer_level / 2) > INT_MAX)
- ? INT_MAX : (int)(rc->starting_buffer_level / 2);
+ ? INT_MAX
+ : (int)(rc->starting_buffer_level / 2);
} else {
int kf_boost = 32;
double framerate = cpi->framerate;
kf_boost = VPXMAX(kf_boost, (int)(2 * framerate - 16));
- if (rc->frames_since_key < framerate / 2) {
- kf_boost = (int)(kf_boost * rc->frames_since_key /
- (framerate / 2));
+ if (rc->frames_since_key < framerate / 2) {
+ kf_boost = (int)(kf_boost * rc->frames_since_key / (framerate / 2));
}
target = ((16 + kf_boost) * rc->avg_frame_bandwidth) >> 4;
}
@@ -1480,13 +1426,11 @@
RATE_CONTROL *const rc = &cpi->rc;
int target;
// TODO(yaowu): replace the "auto_key && 0" below with proper decision logic.
- if ((cm->current_video_frame == 0 ||
- (cpi->frame_flags & FRAMEFLAGS_KEY) ||
- rc->frames_to_key == 0 ||
- (cpi->oxcf.auto_key && 0))) {
+ if ((cm->current_video_frame == 0 || (cpi->frame_flags & FRAMEFLAGS_KEY) ||
+ rc->frames_to_key == 0 || (cpi->oxcf.auto_key && 0))) {
cm->frame_type = KEY_FRAME;
- rc->this_key_frame_forced = cm->current_video_frame != 0 &&
- rc->frames_to_key == 0;
+ rc->this_key_frame_forced =
+ cm->current_video_frame != 0 && rc->frames_to_key == 0;
rc->frames_to_key = cpi->oxcf.key_freq;
rc->kf_boost = DEFAULT_KF_BOOST;
rc->source_alt_ref_active = 0;
@@ -1525,7 +1469,7 @@
}
int vp10_compute_qdelta(const RATE_CONTROL *rc, double qstart, double qtarget,
- vpx_bit_depth_t bit_depth) {
+ vpx_bit_depth_t bit_depth) {
int start_index = rc->worst_quality;
int target_index = rc->worst_quality;
int i;
@@ -1533,29 +1477,27 @@
// Convert the average q value to an index.
for (i = rc->best_quality; i < rc->worst_quality; ++i) {
start_index = i;
- if (vp10_convert_qindex_to_q(i, bit_depth) >= qstart)
- break;
+ if (vp10_convert_qindex_to_q(i, bit_depth) >= qstart) break;
}
// Convert the q target to an index
for (i = rc->best_quality; i < rc->worst_quality; ++i) {
target_index = i;
- if (vp10_convert_qindex_to_q(i, bit_depth) >= qtarget)
- break;
+ if (vp10_convert_qindex_to_q(i, bit_depth) >= qtarget) break;
}
return target_index - start_index;
}
int vp10_compute_qdelta_by_rate(const RATE_CONTROL *rc, FRAME_TYPE frame_type,
- int qindex, double rate_target_ratio,
- vpx_bit_depth_t bit_depth) {
+ int qindex, double rate_target_ratio,
+ vpx_bit_depth_t bit_depth) {
int target_index = rc->worst_quality;
int i;
// Look up the current projected bits per block for the base index
- const int base_bits_per_mb = vp10_rc_bits_per_mb(frame_type, qindex, 1.0,
- bit_depth);
+ const int base_bits_per_mb =
+ vp10_rc_bits_per_mb(frame_type, qindex, 1.0, bit_depth);
// Find the target bits per mb based on the base value and given ratio.
const int target_bits_per_mb = (int)(rate_target_ratio * base_bits_per_mb);
@@ -1572,7 +1514,7 @@
}
void vp10_rc_set_gf_interval_range(const VP10_COMP *const cpi,
- RATE_CONTROL *const rc) {
+ RATE_CONTROL *const rc) {
const VP10EncoderConfig *const oxcf = &cpi->oxcf;
// Special case code for 1 pass fixed Q mode tests
@@ -1614,8 +1556,8 @@
int vbr_max_bits;
rc->avg_frame_bandwidth = (int)(oxcf->target_bandwidth / cpi->framerate);
- rc->min_frame_bandwidth = (int)(rc->avg_frame_bandwidth *
- oxcf->two_pass_vbrmin_section / 100);
+ rc->min_frame_bandwidth =
+ (int)(rc->avg_frame_bandwidth * oxcf->two_pass_vbrmin_section / 100);
rc->min_frame_bandwidth =
VPXMAX(rc->min_frame_bandwidth, FRAME_OVERHEAD_BITS);
@@ -1627,8 +1569,9 @@
// a very high rate is given on the command line or the the rate cannnot
// be acheived because of a user specificed max q (e.g. when the user
// specifies lossless encode.
- vbr_max_bits = (int)(((int64_t)rc->avg_frame_bandwidth *
- oxcf->two_pass_vbrmax_section) / 100);
+ vbr_max_bits =
+ (int)(((int64_t)rc->avg_frame_bandwidth * oxcf->two_pass_vbrmax_section) /
+ 100);
rc->max_frame_bandwidth =
VPXMAX(VPXMAX((cm->MBs * MAX_MB_RATE), MAXRATE_1080P), vbr_max_bits);
@@ -1655,13 +1598,13 @@
// vbr_bits_off_target > 0 means we have extra bits to spend
if (vbr_bits_off_target > 0) {
- *this_frame_target +=
- (vbr_bits_off_target > max_delta) ? max_delta
- : (int)vbr_bits_off_target;
+ *this_frame_target += (vbr_bits_off_target > max_delta)
+ ? max_delta
+ : (int)vbr_bits_off_target;
} else {
- *this_frame_target -=
- (vbr_bits_off_target < -max_delta) ? max_delta
- : (int)-vbr_bits_off_target;
+ *this_frame_target -= (vbr_bits_off_target < -max_delta)
+ ? max_delta
+ : (int)-vbr_bits_off_target;
}
// Fast redistribution of bits arising from massive local undershoot.
@@ -1744,7 +1687,7 @@
cpi->resize_scale_num = 1;
cpi->resize_scale_den = 2;
tot_scale_change = (cpi->resize_scale_den * cpi->resize_scale_den) /
- (cpi->resize_scale_num * cpi->resize_scale_num);
+ (cpi->resize_scale_num * cpi->resize_scale_num);
// Reset buffer level to optimal, update target size.
rc->buffer_level = rc->optimal_buffer_level;
rc->bits_off_target = rc->optimal_buffer_level;
@@ -1754,26 +1697,22 @@
vp10_cyclic_refresh_reset_resize(cpi);
// Get the projected qindex, based on the scaled target frame size (scaled
// so target_bits_per_mb in vp10_rc_regulate_q will be correct target).
- target_bits_per_frame = (resize_now == 1) ?
- rc->this_frame_target * tot_scale_change :
- rc->this_frame_target / tot_scale_change;
+ target_bits_per_frame = (resize_now == 1)
+ ? rc->this_frame_target * tot_scale_change
+ : rc->this_frame_target / tot_scale_change;
active_worst_quality = calc_active_worst_quality_one_pass_cbr(cpi);
- qindex = vp10_rc_regulate_q(cpi,
- target_bits_per_frame,
- rc->best_quality,
- active_worst_quality);
+ qindex = vp10_rc_regulate_q(cpi, target_bits_per_frame, rc->best_quality,
+ active_worst_quality);
// If resize is down, check if projected q index is close to worst_quality,
// and if so, reduce the rate correction factor (since likely can afford
// lower q for resized frame).
- if (resize_now == 1 &&
- qindex > 90 * cpi->rc.worst_quality / 100) {
+ if (resize_now == 1 && qindex > 90 * cpi->rc.worst_quality / 100) {
rc->rate_correction_factors[INTER_NORMAL] *= 0.85;
}
// If resize is back up, check if projected q index is too much above the
// current base_qindex, and if so, reduce the rate correction factor
// (since prefer to keep q for resized frame at least close to previous q).
- if (resize_now == -1 &&
- qindex > 130 * cm->base_qindex / 100) {
+ if (resize_now == -1 && qindex > 130 * cm->base_qindex / 100) {
rc->rate_correction_factors[INTER_NORMAL] *= 0.9;
}
}
diff --git a/vp10/encoder/ratectrl.h b/vp10/encoder/ratectrl.h
index 0b9fd45..f84df83 100644
--- a/vp10/encoder/ratectrl.h
+++ b/vp10/encoder/ratectrl.h
@@ -8,7 +8,6 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
#ifndef VP10_ENCODER_RATECTRL_H_
#define VP10_ENCODER_RATECTRL_H_
@@ -22,11 +21,11 @@
#endif
// Bits Per MB at different Q (Multiplied by 512)
-#define BPER_MB_NORMBITS 9
+#define BPER_MB_NORMBITS 9
-#define MIN_GF_INTERVAL 4
-#define MAX_GF_INTERVAL 16
-#define FIXED_GF_INTERVAL 8 // Used in some testing modes only
+#define MIN_GF_INTERVAL 4
+#define MAX_GF_INTERVAL 16
+#define FIXED_GF_INTERVAL 8 // Used in some testing modes only
typedef enum {
INTER_NORMAL = 0,
@@ -49,25 +48,25 @@
// e.g. 24 => 16/24 = 2/3 of native size. The restriction to 1/16th is
// intended to match the capabilities of the normative scaling filters,
// giving precedence to the up-scaling accuracy.
-static const int frame_scale_factor[FRAME_SCALE_STEPS] = {16, 24};
+static const int frame_scale_factor[FRAME_SCALE_STEPS] = { 16, 24 };
// Multiplier of the target rate to be used as threshold for triggering scaling.
-static const double rate_thresh_mult[FRAME_SCALE_STEPS] = {1.0, 2.0};
+static const double rate_thresh_mult[FRAME_SCALE_STEPS] = { 1.0, 2.0 };
// Scale dependent Rate Correction Factor multipliers. Compensates for the
// greater number of bits per pixel generated in down-scaled frames.
-static const double rcf_mult[FRAME_SCALE_STEPS] = {1.0, 2.0};
+static const double rcf_mult[FRAME_SCALE_STEPS] = { 1.0, 2.0 };
typedef struct {
// Rate targetting variables
- int base_frame_target; // A baseline frame target before adjustment
- // for previous under or over shoot.
- int this_frame_target; // Actual frame target after rc adjustment.
+ int base_frame_target; // A baseline frame target before adjustment
+ // for previous under or over shoot.
+ int this_frame_target; // Actual frame target after rc adjustment.
int projected_frame_size;
int sb64_target_rate;
- int last_q[FRAME_TYPES]; // Separate values for Intra/Inter
- int last_boosted_qindex; // Last boosted GF/KF/ARF q
- int last_kf_qindex; // Q index of the last key frame coded.
+ int last_q[FRAME_TYPES]; // Separate values for Intra/Inter
+ int last_boosted_qindex; // Last boosted GF/KF/ARF q
+ int last_kf_qindex; // Q index of the last key frame coded.
int gfu_boost;
int last_boost;
@@ -149,17 +148,18 @@
struct VP10EncoderConfig;
void vp10_rc_init(const struct VP10EncoderConfig *oxcf, int pass,
- RATE_CONTROL *rc);
+ RATE_CONTROL *rc);
int vp10_estimate_bits_at_q(FRAME_TYPE frame_kind, int q, int mbs,
- double correction_factor,
- vpx_bit_depth_t bit_depth);
+ double correction_factor,
+ vpx_bit_depth_t bit_depth);
double vp10_convert_qindex_to_q(int qindex, vpx_bit_depth_t bit_depth);
void vp10_rc_init_minq_luts(void);
-int vp10_rc_get_default_min_gf_interval(int width, int height, double framerate);
+int vp10_rc_get_default_min_gf_interval(int width, int height,
+ double framerate);
// Note vp10_rc_get_default_max_gf_interval() requires the min_gf_interval to
// be passed in to ensure that the max_gf_interval returned is at least as bis
// as that.
@@ -207,28 +207,27 @@
// Computes frame size bounds.
void vp10_rc_compute_frame_size_bounds(const struct VP10_COMP *cpi,
- int this_frame_target,
- int *frame_under_shoot_limit,
- int *frame_over_shoot_limit);
+ int this_frame_target,
+ int *frame_under_shoot_limit,
+ int *frame_over_shoot_limit);
// Picks q and q bounds given the target for bits
-int vp10_rc_pick_q_and_bounds(const struct VP10_COMP *cpi,
- int *bottom_index,
- int *top_index);
+int vp10_rc_pick_q_and_bounds(const struct VP10_COMP *cpi, int *bottom_index,
+ int *top_index);
// Estimates q to achieve a target bits per frame
int vp10_rc_regulate_q(const struct VP10_COMP *cpi, int target_bits_per_frame,
- int active_best_quality, int active_worst_quality);
+ int active_best_quality, int active_worst_quality);
// Estimates bits per mb for a given qindex and correction factor.
int vp10_rc_bits_per_mb(FRAME_TYPE frame_type, int qindex,
- double correction_factor, vpx_bit_depth_t bit_depth);
+ double correction_factor, vpx_bit_depth_t bit_depth);
// Clamping utilities for bitrate targets for iframes and pframes.
int vp10_rc_clamp_iframe_target_size(const struct VP10_COMP *const cpi,
- int target);
+ int target);
int vp10_rc_clamp_pframe_target_size(const struct VP10_COMP *const cpi,
- int target);
+ int target);
// Utility to set frame_target into the RATE_CONTROL structure
// This function is called only from the vp10_rc_get_..._params() functions.
void vp10_rc_set_frame_target(struct VP10_COMP *cpi, int target);
@@ -236,20 +235,20 @@
// Computes a q delta (in "q index" terms) to get from a starting q value
// to a target q value
int vp10_compute_qdelta(const RATE_CONTROL *rc, double qstart, double qtarget,
- vpx_bit_depth_t bit_depth);
+ vpx_bit_depth_t bit_depth);
// Computes a q delta (in "q index" terms) to get from a starting q value
// to a value that should equate to the given rate ratio.
int vp10_compute_qdelta_by_rate(const RATE_CONTROL *rc, FRAME_TYPE frame_type,
- int qindex, double rate_target_ratio,
- vpx_bit_depth_t bit_depth);
+ int qindex, double rate_target_ratio,
+ vpx_bit_depth_t bit_depth);
int vp10_frame_type_qdelta(const struct VP10_COMP *cpi, int rf_level, int q);
void vp10_rc_update_framerate(struct VP10_COMP *cpi);
void vp10_rc_set_gf_interval_range(const struct VP10_COMP *const cpi,
- RATE_CONTROL *const rc);
+ RATE_CONTROL *const rc);
void vp10_set_target_rate(struct VP10_COMP *cpi);
diff --git a/vp10/encoder/rd.c b/vp10/encoder/rd.c
index acc58e2..f72ebdb 100644
--- a/vp10/encoder/rd.c
+++ b/vp10/encoder/rd.c
@@ -40,8 +40,8 @@
#include "vp10/encoder/rd.h"
#include "vp10/encoder/tokenize.h"
-#define RD_THRESH_POW 1.25
-#define RD_MULT_EPB_RATIO 64
+#define RD_THRESH_POW 1.25
+#define RD_MULT_EPB_RATIO 64
// Factor to weigh the rate for switchable interp filters.
#define SWITCHABLE_INTERP_RATE_FACTOR 1
@@ -73,26 +73,25 @@
for (i = 0; i < INTRA_MODES; ++i)
for (j = 0; j < INTRA_MODES; ++j)
vp10_cost_tokens(cpi->y_mode_costs[i][j], vp10_kf_y_mode_prob[i][j],
- vp10_intra_mode_tree);
+ vp10_intra_mode_tree);
vp10_cost_tokens(cpi->mbmode_cost, fc->y_mode_prob[1], vp10_intra_mode_tree);
for (i = 0; i < INTRA_MODES; ++i)
- vp10_cost_tokens(cpi->intra_uv_mode_cost[i],
- fc->uv_mode_prob[i], vp10_intra_mode_tree);
+ vp10_cost_tokens(cpi->intra_uv_mode_cost[i], fc->uv_mode_prob[i],
+ vp10_intra_mode_tree);
for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
vp10_cost_tokens(cpi->switchable_interp_costs[i],
- fc->switchable_interp_prob[i], vp10_switchable_interp_tree);
+ fc->switchable_interp_prob[i],
+ vp10_switchable_interp_tree);
for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
for (j = 0; j < TX_TYPES; ++j)
vp10_cost_tokens(cpi->intra_tx_type_costs[i][j],
- fc->intra_ext_tx_prob[i][j],
- vp10_ext_tx_tree);
+ fc->intra_ext_tx_prob[i][j], vp10_ext_tx_tree);
}
for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
- vp10_cost_tokens(cpi->inter_tx_type_costs[i],
- fc->inter_ext_tx_prob[i],
+ vp10_cost_tokens(cpi->inter_tx_type_costs[i], fc->inter_ext_tx_prob[i],
vp10_ext_tx_tree);
}
}
@@ -108,10 +107,9 @@
for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
vpx_prob probs[ENTROPY_NODES];
vp10_model_to_full_probs(p[t][i][j][k][l], probs);
- vp10_cost_tokens((int *)c[t][i][j][k][0][l], probs,
- vp10_coef_tree);
+ vp10_cost_tokens((int *)c[t][i][j][k][0][l], probs, vp10_coef_tree);
vp10_cost_tokens_skip((int *)c[t][i][j][k][1][l], probs,
- vp10_coef_tree);
+ vp10_coef_tree);
assert(c[t][i][j][k][0][l][EOB_TOKEN] ==
c[t][i][j][k][1][l][EOB_TOKEN]);
}
@@ -152,28 +150,19 @@
#endif
}
-static const int rd_boost_factor[16] = {
- 64, 32, 32, 32, 24, 16, 12, 12,
- 8, 8, 4, 4, 2, 2, 1, 0
-};
-static const int rd_frame_type_factor[FRAME_UPDATE_TYPES] = {
- 128, 144, 128, 128, 144
-};
+static const int rd_boost_factor[16] = { 64, 32, 32, 32, 24, 16, 12, 12,
+ 8, 8, 4, 4, 2, 2, 1, 0 };
+static const int rd_frame_type_factor[FRAME_UPDATE_TYPES] = { 128, 144, 128,
+ 128, 144 };
int vp10_compute_rd_mult(const VP10_COMP *cpi, int qindex) {
const int64_t q = vp10_dc_quant(qindex, 0, cpi->common.bit_depth);
#if CONFIG_VPX_HIGHBITDEPTH
int64_t rdmult = 0;
switch (cpi->common.bit_depth) {
- case VPX_BITS_8:
- rdmult = 88 * q * q / 24;
- break;
- case VPX_BITS_10:
- rdmult = ROUND_POWER_OF_TWO(88 * q * q / 24, 4);
- break;
- case VPX_BITS_12:
- rdmult = ROUND_POWER_OF_TWO(88 * q * q / 24, 8);
- break;
+ case VPX_BITS_8: rdmult = 88 * q * q / 24; break;
+ case VPX_BITS_10: rdmult = ROUND_POWER_OF_TWO(88 * q * q / 24, 4); break;
+ case VPX_BITS_12: rdmult = ROUND_POWER_OF_TWO(88 * q * q / 24, 8); break;
default:
assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12");
return -1;
@@ -189,8 +178,7 @@
rdmult = (rdmult * rd_frame_type_factor[frame_type]) >> 7;
rdmult += ((rdmult * rd_boost_factor[boost_index]) >> 7);
}
- if (rdmult < 1)
- rdmult = 1;
+ if (rdmult < 1) rdmult = 1;
return (int)rdmult;
}
@@ -198,21 +186,15 @@
double q;
#if CONFIG_VPX_HIGHBITDEPTH
switch (bit_depth) {
- case VPX_BITS_8:
- q = vp10_dc_quant(qindex, 0, VPX_BITS_8) / 4.0;
- break;
- case VPX_BITS_10:
- q = vp10_dc_quant(qindex, 0, VPX_BITS_10) / 16.0;
- break;
- case VPX_BITS_12:
- q = vp10_dc_quant(qindex, 0, VPX_BITS_12) / 64.0;
- break;
+ case VPX_BITS_8: q = vp10_dc_quant(qindex, 0, VPX_BITS_8) / 4.0; break;
+ case VPX_BITS_10: q = vp10_dc_quant(qindex, 0, VPX_BITS_10) / 16.0; break;
+ case VPX_BITS_12: q = vp10_dc_quant(qindex, 0, VPX_BITS_12) / 64.0; break;
default:
assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12");
return -1;
}
#else
- (void) bit_depth;
+ (void)bit_depth;
q = vp10_dc_quant(qindex, 0, VPX_BITS_8) / 4.0;
#endif // CONFIG_VPX_HIGHBITDEPTH
// TODO(debargha): Adjust the function below.
@@ -250,7 +232,8 @@
for (segment_id = 0; segment_id < MAX_SEGMENTS; ++segment_id) {
const int qindex =
clamp(vp10_get_qindex(&cm->seg, segment_id, cm->base_qindex) +
- cm->y_dc_delta_q, 0, MAXQ);
+ cm->y_dc_delta_q,
+ 0, MAXQ);
const int q = compute_rd_thresh_factor(qindex, cm->bit_depth);
for (bsize = 0; bsize < BLOCK_SIZES; ++bsize) {
@@ -261,10 +244,9 @@
if (bsize >= BLOCK_8X8) {
for (i = 0; i < MAX_MODES; ++i)
- rd->threshes[segment_id][bsize][i] =
- rd->thresh_mult[i] < thresh_max
- ? rd->thresh_mult[i] * t / 4
- : INT_MAX;
+ rd->threshes[segment_id][bsize][i] = rd->thresh_mult[i] < thresh_max
+ ? rd->thresh_mult[i] * t / 4
+ : INT_MAX;
} else {
for (i = 0; i < MAX_REFS; ++i)
rd->threshes[segment_id][bsize][i] =
@@ -291,7 +273,9 @@
x->errorperbit += (x->errorperbit == 0);
x->select_tx_size = (cpi->sf.tx_size_search_method == USE_LARGESTALL &&
- cm->frame_type != KEY_FRAME) ? 0 : 1;
+ cm->frame_type != KEY_FRAME)
+ ? 0
+ : 1;
set_block_thresholds(cm, rd);
@@ -301,20 +285,20 @@
cm->frame_type == KEY_FRAME) {
for (i = 0; i < PARTITION_CONTEXTS; ++i)
vp10_cost_tokens(cpi->partition_cost[i], cm->fc->partition_prob[i],
- vp10_partition_tree);
+ vp10_partition_tree);
}
fill_mode_costs(cpi);
if (!frame_is_intra_only(cm)) {
- vp10_build_nmv_cost_table(x->nmvjointcost,
- cm->allow_high_precision_mv ? x->nmvcost_hp
- : x->nmvcost,
- &cm->fc->nmvc, cm->allow_high_precision_mv);
+ vp10_build_nmv_cost_table(
+ x->nmvjointcost,
+ cm->allow_high_precision_mv ? x->nmvcost_hp : x->nmvcost, &cm->fc->nmvc,
+ cm->allow_high_precision_mv);
for (i = 0; i < INTER_MODE_CONTEXTS; ++i)
vp10_cost_tokens((int *)cpi->inter_mode_cost[i],
- cm->fc->inter_mode_probs[i], vp10_inter_mode_tree);
+ cm->fc->inter_mode_probs[i], vp10_inter_mode_tree);
}
}
@@ -332,19 +316,15 @@
// where r = exp(-sqrt(2) * x) and x = qpstep / sqrt(variance),
// and H(x) is the binary entropy function.
static const int rate_tab_q10[] = {
- 65536, 6086, 5574, 5275, 5063, 4899, 4764, 4651,
- 4553, 4389, 4255, 4142, 4044, 3958, 3881, 3811,
- 3748, 3635, 3538, 3453, 3376, 3307, 3244, 3186,
- 3133, 3037, 2952, 2877, 2809, 2747, 2690, 2638,
- 2589, 2501, 2423, 2353, 2290, 2232, 2179, 2130,
- 2084, 2001, 1928, 1862, 1802, 1748, 1698, 1651,
- 1608, 1530, 1460, 1398, 1342, 1290, 1243, 1199,
- 1159, 1086, 1021, 963, 911, 864, 821, 781,
- 745, 680, 623, 574, 530, 490, 455, 424,
- 395, 345, 304, 269, 239, 213, 190, 171,
- 154, 126, 104, 87, 73, 61, 52, 44,
- 38, 28, 21, 16, 12, 10, 8, 6,
- 5, 3, 2, 1, 1, 1, 0, 0,
+ 65536, 6086, 5574, 5275, 5063, 4899, 4764, 4651, 4553, 4389, 4255, 4142,
+ 4044, 3958, 3881, 3811, 3748, 3635, 3538, 3453, 3376, 3307, 3244, 3186,
+ 3133, 3037, 2952, 2877, 2809, 2747, 2690, 2638, 2589, 2501, 2423, 2353,
+ 2290, 2232, 2179, 2130, 2084, 2001, 1928, 1862, 1802, 1748, 1698, 1651,
+ 1608, 1530, 1460, 1398, 1342, 1290, 1243, 1199, 1159, 1086, 1021, 963,
+ 911, 864, 821, 781, 745, 680, 623, 574, 530, 490, 455, 424,
+ 395, 345, 304, 269, 239, 213, 190, 171, 154, 126, 104, 87,
+ 73, 61, 52, 44, 38, 28, 21, 16, 12, 10, 8, 6,
+ 5, 3, 2, 1, 1, 1, 0, 0,
};
// Normalized distortion:
// This table models the normalized distortion for a Laplacian source
@@ -354,34 +334,29 @@
// where x = qpstep / sqrt(variance).
// Note the actual distortion is Dn * variance.
static const int dist_tab_q10[] = {
- 0, 0, 1, 1, 1, 2, 2, 2,
- 3, 3, 4, 5, 5, 6, 7, 7,
- 8, 9, 11, 12, 13, 15, 16, 17,
- 18, 21, 24, 26, 29, 31, 34, 36,
- 39, 44, 49, 54, 59, 64, 69, 73,
- 78, 88, 97, 106, 115, 124, 133, 142,
- 151, 167, 184, 200, 215, 231, 245, 260,
- 274, 301, 327, 351, 375, 397, 418, 439,
- 458, 495, 528, 559, 587, 613, 637, 659,
- 680, 717, 749, 777, 801, 823, 842, 859,
- 874, 899, 919, 936, 949, 960, 969, 977,
- 983, 994, 1001, 1006, 1010, 1013, 1015, 1017,
- 1018, 1020, 1022, 1022, 1023, 1023, 1023, 1024,
+ 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 4, 5,
+ 5, 6, 7, 7, 8, 9, 11, 12, 13, 15, 16, 17,
+ 18, 21, 24, 26, 29, 31, 34, 36, 39, 44, 49, 54,
+ 59, 64, 69, 73, 78, 88, 97, 106, 115, 124, 133, 142,
+ 151, 167, 184, 200, 215, 231, 245, 260, 274, 301, 327, 351,
+ 375, 397, 418, 439, 458, 495, 528, 559, 587, 613, 637, 659,
+ 680, 717, 749, 777, 801, 823, 842, 859, 874, 899, 919, 936,
+ 949, 960, 969, 977, 983, 994, 1001, 1006, 1010, 1013, 1015, 1017,
+ 1018, 1020, 1022, 1022, 1023, 1023, 1023, 1024,
};
static const int xsq_iq_q10[] = {
- 0, 4, 8, 12, 16, 20, 24, 28,
- 32, 40, 48, 56, 64, 72, 80, 88,
- 96, 112, 128, 144, 160, 176, 192, 208,
- 224, 256, 288, 320, 352, 384, 416, 448,
- 480, 544, 608, 672, 736, 800, 864, 928,
- 992, 1120, 1248, 1376, 1504, 1632, 1760, 1888,
- 2016, 2272, 2528, 2784, 3040, 3296, 3552, 3808,
- 4064, 4576, 5088, 5600, 6112, 6624, 7136, 7648,
- 8160, 9184, 10208, 11232, 12256, 13280, 14304, 15328,
- 16352, 18400, 20448, 22496, 24544, 26592, 28640, 30688,
- 32736, 36832, 40928, 45024, 49120, 53216, 57312, 61408,
- 65504, 73696, 81888, 90080, 98272, 106464, 114656, 122848,
- 131040, 147424, 163808, 180192, 196576, 212960, 229344, 245728,
+ 0, 4, 8, 12, 16, 20, 24, 28, 32,
+ 40, 48, 56, 64, 72, 80, 88, 96, 112,
+ 128, 144, 160, 176, 192, 208, 224, 256, 288,
+ 320, 352, 384, 416, 448, 480, 544, 608, 672,
+ 736, 800, 864, 928, 992, 1120, 1248, 1376, 1504,
+ 1632, 1760, 1888, 2016, 2272, 2528, 2784, 3040, 3296,
+ 3552, 3808, 4064, 4576, 5088, 5600, 6112, 6624, 7136,
+ 7648, 8160, 9184, 10208, 11232, 12256, 13280, 14304, 15328,
+ 16352, 18400, 20448, 22496, 24544, 26592, 28640, 30688, 32736,
+ 36832, 40928, 45024, 49120, 53216, 57312, 61408, 65504, 73696,
+ 81888, 90080, 98272, 106464, 114656, 122848, 131040, 147424, 163808,
+ 180192, 196576, 212960, 229344, 245728,
};
const int tmp = (xsq_q10 >> 2) + 8;
const int k = get_msb(tmp) - 3;
@@ -394,8 +369,8 @@
}
void vp10_model_rd_from_var_lapndz(unsigned int var, unsigned int n_log2,
- unsigned int qstep, int *rate,
- int64_t *dist) {
+ unsigned int qstep, int *rate,
+ int64_t *dist) {
// This function models the rate and distortion for a Laplacian
// source with given variance when quantized with a uniform quantizer
// with given stepsize. The closed form expressions are in:
@@ -418,9 +393,9 @@
}
void vp10_get_entropy_contexts(BLOCK_SIZE bsize, TX_SIZE tx_size,
- const struct macroblockd_plane *pd,
- ENTROPY_CONTEXT t_above[16],
- ENTROPY_CONTEXT t_left[16]) {
+ const struct macroblockd_plane *pd,
+ ENTROPY_CONTEXT t_above[16],
+ ENTROPY_CONTEXT t_left[16]) {
const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd);
const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize];
const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize];
@@ -451,15 +426,12 @@
for (i = 0; i < num_4x4_h; i += 8)
t_left[i] = !!*(const uint64_t *)&left[i];
break;
- default:
- assert(0 && "Invalid transform size.");
- break;
+ default: assert(0 && "Invalid transform size."); break;
}
}
-void vp10_mv_pred(VP10_COMP *cpi, MACROBLOCK *x,
- uint8_t *ref_y_buffer, int ref_y_stride,
- int ref_frame, BLOCK_SIZE block_size) {
+void vp10_mv_pred(VP10_COMP *cpi, MACROBLOCK *x, uint8_t *ref_y_buffer,
+ int ref_y_stride, int ref_frame, BLOCK_SIZE block_size) {
int i;
int zero_seen = 0;
int best_index = 0;
@@ -469,9 +441,9 @@
int near_same_nearest;
uint8_t *src_y_ptr = x->plane[0].src.buf;
uint8_t *ref_y_ptr;
- const int num_mv_refs = MAX_MV_REF_CANDIDATES +
- (cpi->sf.adaptive_motion_search &&
- block_size < x->max_partition_size);
+ const int num_mv_refs =
+ MAX_MV_REF_CANDIDATES +
+ (cpi->sf.adaptive_motion_search && block_size < x->max_partition_size);
MV pred_mv[3];
pred_mv[0] = x->mbmi_ext->ref_mvs[ref_frame][0].as_mv;
@@ -479,25 +451,22 @@
pred_mv[2] = x->pred_mv[ref_frame];
assert(num_mv_refs <= (int)(sizeof(pred_mv) / sizeof(pred_mv[0])));
- near_same_nearest =
- x->mbmi_ext->ref_mvs[ref_frame][0].as_int ==
- x->mbmi_ext->ref_mvs[ref_frame][1].as_int;
+ near_same_nearest = x->mbmi_ext->ref_mvs[ref_frame][0].as_int ==
+ x->mbmi_ext->ref_mvs[ref_frame][1].as_int;
// Get the sad for each candidate reference mv.
for (i = 0; i < num_mv_refs; ++i) {
const MV *this_mv = &pred_mv[i];
int fp_row, fp_col;
- if (i == 1 && near_same_nearest)
- continue;
+ if (i == 1 && near_same_nearest) continue;
fp_row = (this_mv->row + 3 + (this_mv->row >= 0)) >> 3;
fp_col = (this_mv->col + 3 + (this_mv->col >= 0)) >> 3;
max_mv = VPXMAX(max_mv, VPXMAX(abs(this_mv->row), abs(this_mv->col)) >> 3);
- if (fp_row ==0 && fp_col == 0 && zero_seen)
- continue;
- zero_seen |= (fp_row ==0 && fp_col == 0);
+ if (fp_row == 0 && fp_col == 0 && zero_seen) continue;
+ zero_seen |= (fp_row == 0 && fp_col == 0);
- ref_y_ptr =&ref_y_buffer[ref_y_stride * fp_row + fp_col];
+ ref_y_ptr = &ref_y_buffer[ref_y_stride * fp_row + fp_col];
// Find sad for current vector.
this_sad = cpi->fn_ptr[block_size].sdf(src_y_ptr, x->plane[0].src.stride,
ref_y_ptr, ref_y_stride);
@@ -515,11 +484,10 @@
}
void vp10_setup_pred_block(const MACROBLOCKD *xd,
- struct buf_2d dst[MAX_MB_PLANE],
- const YV12_BUFFER_CONFIG *src,
- int mi_row, int mi_col,
- const struct scale_factors *scale,
- const struct scale_factors *scale_uv) {
+ struct buf_2d dst[MAX_MB_PLANE],
+ const YV12_BUFFER_CONFIG *src, int mi_row,
+ int mi_col, const struct scale_factors *scale,
+ const struct scale_factors *scale_uv) {
int i;
dst[0].buf = src->y_buffer;
@@ -530,33 +498,33 @@
for (i = 0; i < MAX_MB_PLANE; ++i) {
setup_pred_plane(dst + i, dst[i].buf, dst[i].stride, mi_row, mi_col,
- i ? scale_uv : scale,
- xd->plane[i].subsampling_x, xd->plane[i].subsampling_y);
+ i ? scale_uv : scale, xd->plane[i].subsampling_x,
+ xd->plane[i].subsampling_y);
}
}
-int vp10_raster_block_offset(BLOCK_SIZE plane_bsize,
- int raster_block, int stride) {
+int vp10_raster_block_offset(BLOCK_SIZE plane_bsize, int raster_block,
+ int stride) {
const int bw = b_width_log2_lookup[plane_bsize];
const int y = 4 * (raster_block >> bw);
const int x = 4 * (raster_block & ((1 << bw) - 1));
return y * stride + x;
}
-int16_t* vp10_raster_block_offset_int16(BLOCK_SIZE plane_bsize,
- int raster_block, int16_t *base) {
+int16_t *vp10_raster_block_offset_int16(BLOCK_SIZE plane_bsize,
+ int raster_block, int16_t *base) {
const int stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
return base + vp10_raster_block_offset(plane_bsize, raster_block, stride);
}
YV12_BUFFER_CONFIG *vp10_get_scaled_ref_frame(const VP10_COMP *cpi,
- int ref_frame) {
+ int ref_frame) {
const VP10_COMMON *const cm = &cpi->common;
const int scaled_idx = cpi->scaled_ref_idx[ref_frame - 1];
const int ref_idx = get_ref_frame_buf_idx(cpi, ref_frame);
- return
- (scaled_idx != ref_idx && scaled_idx != INVALID_IDX) ?
- &cm->buffer_pool->frame_bufs[scaled_idx].buf : NULL;
+ return (scaled_idx != ref_idx && scaled_idx != INVALID_IDX)
+ ? &cm->buffer_pool->frame_bufs[scaled_idx].buf
+ : NULL;
}
int vp10_get_switchable_rate(const VP10_COMP *cpi,
@@ -564,7 +532,7 @@
const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
const int ctx = vp10_get_pred_context_switchable_interp(xd);
return SWITCHABLE_INTERP_RATE_FACTOR *
- cpi->switchable_interp_costs[ctx][mbmi->interp_filter];
+ cpi->switchable_interp_costs[ctx][mbmi->interp_filter];
}
void vp10_set_rd_speed_thresholds(VP10_COMP *cpi) {
@@ -613,7 +581,7 @@
rd->thresh_mult[THR_H_PRED] += 2000;
rd->thresh_mult[THR_V_PRED] += 2000;
- rd->thresh_mult[THR_D45_PRED ] += 2500;
+ rd->thresh_mult[THR_D45_PRED] += 2500;
rd->thresh_mult[THR_D135_PRED] += 2500;
rd->thresh_mult[THR_D117_PRED] += 2500;
rd->thresh_mult[THR_D153_PRED] += 2500;
@@ -622,16 +590,17 @@
}
void vp10_set_rd_speed_thresholds_sub8x8(VP10_COMP *cpi) {
- static const int thresh_mult[2][MAX_REFS] =
- {{2500, 2500, 2500, 4500, 4500, 2500},
- {2000, 2000, 2000, 4000, 4000, 2000}};
+ static const int thresh_mult[2][MAX_REFS] = {
+ { 2500, 2500, 2500, 4500, 4500, 2500 },
+ { 2000, 2000, 2000, 4000, 4000, 2000 }
+ };
RD_OPT *const rd = &cpi->rd;
const int idx = cpi->oxcf.mode == BEST;
memcpy(rd->thresh_mult_sub8x8, thresh_mult[idx], sizeof(thresh_mult[idx]));
}
void vp10_update_rd_thresh_fact(int (*factor_buf)[MAX_MODES], int rd_thresh,
- int bsize, int best_mode_index) {
+ int bsize, int best_mode_index) {
if (rd_thresh > 0) {
const int top_mode = bsize < BLOCK_8X8 ? MAX_REFS : MAX_MODES;
int mode;
@@ -652,16 +621,13 @@
}
int vp10_get_intra_cost_penalty(int qindex, int qdelta,
- vpx_bit_depth_t bit_depth) {
+ vpx_bit_depth_t bit_depth) {
const int q = vp10_dc_quant(qindex, qdelta, bit_depth);
#if CONFIG_VPX_HIGHBITDEPTH
switch (bit_depth) {
- case VPX_BITS_8:
- return 20 * q;
- case VPX_BITS_10:
- return 5 * q;
- case VPX_BITS_12:
- return ROUND_POWER_OF_TWO(5 * q, 2);
+ case VPX_BITS_8: return 20 * q;
+ case VPX_BITS_10: return 5 * q;
+ case VPX_BITS_12: return ROUND_POWER_OF_TWO(5 * q, 2);
default:
assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12");
return -1;
@@ -670,4 +636,3 @@
return 20 * q;
#endif // CONFIG_VPX_HIGHBITDEPTH
}
-
diff --git a/vp10/encoder/rd.h b/vp10/encoder/rd.h
index cd58bf8..f9f811c 100644
--- a/vp10/encoder/rd.h
+++ b/vp10/encoder/rd.h
@@ -22,22 +22,21 @@
extern "C" {
#endif
-#define RDDIV_BITS 7
+#define RDDIV_BITS 7
-#define RDCOST(RM, DM, R, D) \
- (((128 + ((int64_t)R) * (RM)) >> 8) + (D << DM))
-#define QIDX_SKIP_THRESH 115
+#define RDCOST(RM, DM, R, D) (((128 + ((int64_t)R) * (RM)) >> 8) + (D << DM))
+#define QIDX_SKIP_THRESH 115
-#define MV_COST_WEIGHT 108
-#define MV_COST_WEIGHT_SUB 120
+#define MV_COST_WEIGHT 108
+#define MV_COST_WEIGHT_SUB 120
#define INVALID_MV 0x80008000
#define MAX_MODES 30
-#define MAX_REFS 6
+#define MAX_REFS 6
#define RD_THRESH_MAX_FACT 64
-#define RD_THRESH_INC 1
+#define RD_THRESH_INC 1
// This enumerator type needs to be kept aligned with the mode order in
// const MODE_DEFINITION vp10_mode_order[MAX_MODES] used in the rd code.
@@ -130,57 +129,55 @@
void vp10_initialize_rd_consts(struct VP10_COMP *cpi);
-void vp10_initialize_me_consts(struct VP10_COMP *cpi,
- MACROBLOCK *x, int qindex);
+void vp10_initialize_me_consts(struct VP10_COMP *cpi, MACROBLOCK *x,
+ int qindex);
void vp10_model_rd_from_var_lapndz(unsigned int var, unsigned int n,
- unsigned int qstep, int *rate,
- int64_t *dist);
+ unsigned int qstep, int *rate,
+ int64_t *dist);
int vp10_get_switchable_rate(const struct VP10_COMP *cpi,
- const MACROBLOCKD *const xd);
+ const MACROBLOCKD *const xd);
-int vp10_raster_block_offset(BLOCK_SIZE plane_bsize,
- int raster_block, int stride);
+int vp10_raster_block_offset(BLOCK_SIZE plane_bsize, int raster_block,
+ int stride);
-int16_t* vp10_raster_block_offset_int16(BLOCK_SIZE plane_bsize,
- int raster_block, int16_t *base);
+int16_t *vp10_raster_block_offset_int16(BLOCK_SIZE plane_bsize,
+ int raster_block, int16_t *base);
YV12_BUFFER_CONFIG *vp10_get_scaled_ref_frame(const struct VP10_COMP *cpi,
- int ref_frame);
+ int ref_frame);
void vp10_init_me_luts(void);
void vp10_get_entropy_contexts(BLOCK_SIZE bsize, TX_SIZE tx_size,
- const struct macroblockd_plane *pd,
- ENTROPY_CONTEXT t_above[16],
- ENTROPY_CONTEXT t_left[16]);
+ const struct macroblockd_plane *pd,
+ ENTROPY_CONTEXT t_above[16],
+ ENTROPY_CONTEXT t_left[16]);
void vp10_set_rd_speed_thresholds(struct VP10_COMP *cpi);
void vp10_set_rd_speed_thresholds_sub8x8(struct VP10_COMP *cpi);
void vp10_update_rd_thresh_fact(int (*fact)[MAX_MODES], int rd_thresh,
- int bsize, int best_mode_index);
+ int bsize, int best_mode_index);
static INLINE int rd_less_than_thresh(int64_t best_rd, int thresh,
int thresh_fact) {
- return best_rd < ((int64_t)thresh * thresh_fact >> 5) || thresh == INT_MAX;
+ return best_rd < ((int64_t)thresh * thresh_fact >> 5) || thresh == INT_MAX;
}
-void vp10_mv_pred(struct VP10_COMP *cpi, MACROBLOCK *x,
- uint8_t *ref_y_buffer, int ref_y_stride,
- int ref_frame, BLOCK_SIZE block_size);
+void vp10_mv_pred(struct VP10_COMP *cpi, MACROBLOCK *x, uint8_t *ref_y_buffer,
+ int ref_y_stride, int ref_frame, BLOCK_SIZE block_size);
void vp10_setup_pred_block(const MACROBLOCKD *xd,
- struct buf_2d dst[MAX_MB_PLANE],
- const YV12_BUFFER_CONFIG *src,
- int mi_row, int mi_col,
- const struct scale_factors *scale,
- const struct scale_factors *scale_uv);
+ struct buf_2d dst[MAX_MB_PLANE],
+ const YV12_BUFFER_CONFIG *src, int mi_row,
+ int mi_col, const struct scale_factors *scale,
+ const struct scale_factors *scale_uv);
int vp10_get_intra_cost_penalty(int qindex, int qdelta,
- vpx_bit_depth_t bit_depth);
+ vpx_bit_depth_t bit_depth);
#ifdef __cplusplus
} // extern "C"
diff --git a/vp10/encoder/rdopt.c b/vp10/encoder/rdopt.c
index 6b59161..ca71429 100644
--- a/vp10/encoder/rdopt.c
+++ b/vp10/encoder/rdopt.c
@@ -42,17 +42,17 @@
#include "vp10/encoder/rdopt.h"
#include "vp10/encoder/aq_variance.h"
-#define LAST_FRAME_MODE_MASK ((1 << GOLDEN_FRAME) | (1 << ALTREF_FRAME) | \
- (1 << INTRA_FRAME))
-#define GOLDEN_FRAME_MODE_MASK ((1 << LAST_FRAME) | (1 << ALTREF_FRAME) | \
- (1 << INTRA_FRAME))
-#define ALT_REF_MODE_MASK ((1 << LAST_FRAME) | (1 << GOLDEN_FRAME) | \
- (1 << INTRA_FRAME))
+#define LAST_FRAME_MODE_MASK \
+ ((1 << GOLDEN_FRAME) | (1 << ALTREF_FRAME) | (1 << INTRA_FRAME))
+#define GOLDEN_FRAME_MODE_MASK \
+ ((1 << LAST_FRAME) | (1 << ALTREF_FRAME) | (1 << INTRA_FRAME))
+#define ALT_REF_MODE_MASK \
+ ((1 << LAST_FRAME) | (1 << GOLDEN_FRAME) | (1 << INTRA_FRAME))
-#define SECOND_REF_FRAME_MASK ((1 << ALTREF_FRAME) | 0x01)
+#define SECOND_REF_FRAME_MASK ((1 << ALTREF_FRAME) | 0x01)
-#define MIN_EARLY_TERM_INDEX 3
-#define NEW_MV_DISCOUNT_FACTOR 8
+#define MIN_EARLY_TERM_INDEX 3
+#define NEW_MV_DISCOUNT_FACTOR 8
const double ext_tx_th = 0.99;
@@ -61,9 +61,7 @@
MV_REFERENCE_FRAME ref_frame[2];
} MODE_DEFINITION;
-typedef struct {
- MV_REFERENCE_FRAME ref_frame[2];
-} REF_DEFINITION;
+typedef struct { MV_REFERENCE_FRAME ref_frame[2]; } REF_DEFINITION;
struct rdcost_block_args {
MACROBLOCK *x;
@@ -82,95 +80,91 @@
#define LAST_NEW_MV_INDEX 6
static const MODE_DEFINITION vp10_mode_order[MAX_MODES] = {
- {NEARESTMV, {LAST_FRAME, NONE}},
- {NEARESTMV, {ALTREF_FRAME, NONE}},
- {NEARESTMV, {GOLDEN_FRAME, NONE}},
+ { NEARESTMV, { LAST_FRAME, NONE } },
+ { NEARESTMV, { ALTREF_FRAME, NONE } },
+ { NEARESTMV, { GOLDEN_FRAME, NONE } },
- {DC_PRED, {INTRA_FRAME, NONE}},
+ { DC_PRED, { INTRA_FRAME, NONE } },
- {NEWMV, {LAST_FRAME, NONE}},
- {NEWMV, {ALTREF_FRAME, NONE}},
- {NEWMV, {GOLDEN_FRAME, NONE}},
+ { NEWMV, { LAST_FRAME, NONE } },
+ { NEWMV, { ALTREF_FRAME, NONE } },
+ { NEWMV, { GOLDEN_FRAME, NONE } },
- {NEARMV, {LAST_FRAME, NONE}},
- {NEARMV, {ALTREF_FRAME, NONE}},
- {NEARMV, {GOLDEN_FRAME, NONE}},
+ { NEARMV, { LAST_FRAME, NONE } },
+ { NEARMV, { ALTREF_FRAME, NONE } },
+ { NEARMV, { GOLDEN_FRAME, NONE } },
- {ZEROMV, {LAST_FRAME, NONE}},
- {ZEROMV, {GOLDEN_FRAME, NONE}},
- {ZEROMV, {ALTREF_FRAME, NONE}},
+ { ZEROMV, { LAST_FRAME, NONE } },
+ { ZEROMV, { GOLDEN_FRAME, NONE } },
+ { ZEROMV, { ALTREF_FRAME, NONE } },
- {NEARESTMV, {LAST_FRAME, ALTREF_FRAME}},
- {NEARESTMV, {GOLDEN_FRAME, ALTREF_FRAME}},
+ { NEARESTMV, { LAST_FRAME, ALTREF_FRAME } },
+ { NEARESTMV, { GOLDEN_FRAME, ALTREF_FRAME } },
- {TM_PRED, {INTRA_FRAME, NONE}},
+ { TM_PRED, { INTRA_FRAME, NONE } },
- {NEARMV, {LAST_FRAME, ALTREF_FRAME}},
- {NEWMV, {LAST_FRAME, ALTREF_FRAME}},
- {NEARMV, {GOLDEN_FRAME, ALTREF_FRAME}},
- {NEWMV, {GOLDEN_FRAME, ALTREF_FRAME}},
+ { NEARMV, { LAST_FRAME, ALTREF_FRAME } },
+ { NEWMV, { LAST_FRAME, ALTREF_FRAME } },
+ { NEARMV, { GOLDEN_FRAME, ALTREF_FRAME } },
+ { NEWMV, { GOLDEN_FRAME, ALTREF_FRAME } },
- {ZEROMV, {LAST_FRAME, ALTREF_FRAME}},
- {ZEROMV, {GOLDEN_FRAME, ALTREF_FRAME}},
+ { ZEROMV, { LAST_FRAME, ALTREF_FRAME } },
+ { ZEROMV, { GOLDEN_FRAME, ALTREF_FRAME } },
- {H_PRED, {INTRA_FRAME, NONE}},
- {V_PRED, {INTRA_FRAME, NONE}},
- {D135_PRED, {INTRA_FRAME, NONE}},
- {D207_PRED, {INTRA_FRAME, NONE}},
- {D153_PRED, {INTRA_FRAME, NONE}},
- {D63_PRED, {INTRA_FRAME, NONE}},
- {D117_PRED, {INTRA_FRAME, NONE}},
- {D45_PRED, {INTRA_FRAME, NONE}},
+ { H_PRED, { INTRA_FRAME, NONE } },
+ { V_PRED, { INTRA_FRAME, NONE } },
+ { D135_PRED, { INTRA_FRAME, NONE } },
+ { D207_PRED, { INTRA_FRAME, NONE } },
+ { D153_PRED, { INTRA_FRAME, NONE } },
+ { D63_PRED, { INTRA_FRAME, NONE } },
+ { D117_PRED, { INTRA_FRAME, NONE } },
+ { D45_PRED, { INTRA_FRAME, NONE } },
};
static const REF_DEFINITION vp10_ref_order[MAX_REFS] = {
- {{LAST_FRAME, NONE}},
- {{GOLDEN_FRAME, NONE}},
- {{ALTREF_FRAME, NONE}},
- {{LAST_FRAME, ALTREF_FRAME}},
- {{GOLDEN_FRAME, ALTREF_FRAME}},
- {{INTRA_FRAME, NONE}},
+ { { LAST_FRAME, NONE } }, { { GOLDEN_FRAME, NONE } },
+ { { ALTREF_FRAME, NONE } }, { { LAST_FRAME, ALTREF_FRAME } },
+ { { GOLDEN_FRAME, ALTREF_FRAME } }, { { INTRA_FRAME, NONE } },
};
static INLINE int write_uniform_cost(int n, int v) {
int l = get_unsigned_bits(n), m = (1 << l) - n;
- if (l == 0)
- return 0;
+ if (l == 0) return 0;
if (v < m)
return (l - 1) * vp10_cost_bit(128, 0);
else
return l * vp10_cost_bit(128, 0);
}
-static void swap_block_ptr(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx,
- int m, int n, int min_plane, int max_plane) {
+static void swap_block_ptr(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx, int m, int n,
+ int min_plane, int max_plane) {
int i;
for (i = min_plane; i < max_plane; ++i) {
struct macroblock_plane *const p = &x->plane[i];
struct macroblockd_plane *const pd = &x->e_mbd.plane[i];
- p->coeff = ctx->coeff_pbuf[i][m];
- p->qcoeff = ctx->qcoeff_pbuf[i][m];
+ p->coeff = ctx->coeff_pbuf[i][m];
+ p->qcoeff = ctx->qcoeff_pbuf[i][m];
pd->dqcoeff = ctx->dqcoeff_pbuf[i][m];
- p->eobs = ctx->eobs_pbuf[i][m];
+ p->eobs = ctx->eobs_pbuf[i][m];
- ctx->coeff_pbuf[i][m] = ctx->coeff_pbuf[i][n];
- ctx->qcoeff_pbuf[i][m] = ctx->qcoeff_pbuf[i][n];
+ ctx->coeff_pbuf[i][m] = ctx->coeff_pbuf[i][n];
+ ctx->qcoeff_pbuf[i][m] = ctx->qcoeff_pbuf[i][n];
ctx->dqcoeff_pbuf[i][m] = ctx->dqcoeff_pbuf[i][n];
- ctx->eobs_pbuf[i][m] = ctx->eobs_pbuf[i][n];
+ ctx->eobs_pbuf[i][m] = ctx->eobs_pbuf[i][n];
- ctx->coeff_pbuf[i][n] = p->coeff;
- ctx->qcoeff_pbuf[i][n] = p->qcoeff;
+ ctx->coeff_pbuf[i][n] = p->coeff;
+ ctx->qcoeff_pbuf[i][n] = p->qcoeff;
ctx->dqcoeff_pbuf[i][n] = pd->dqcoeff;
- ctx->eobs_pbuf[i][n] = p->eobs;
+ ctx->eobs_pbuf[i][n] = p->eobs;
}
}
-static void model_rd_for_sb(VP10_COMP *cpi, BLOCK_SIZE bsize,
- MACROBLOCK *x, MACROBLOCKD *xd,
- int *out_rate_sum, int64_t *out_dist_sum,
- int *skip_txfm_sb, int64_t *skip_sse_sb) {
+static void model_rd_for_sb(VP10_COMP *cpi, BLOCK_SIZE bsize, MACROBLOCK *x,
+ MACROBLOCKD *xd, int *out_rate_sum,
+ int64_t *out_dist_sum, int *skip_txfm_sb,
+ int64_t *skip_sse_sb) {
// Note our transform coeffs are 8 times an orthogonal transform.
// Hence quantizer step is also 8 times. To get effective quantizer
// we need to divide by 8 before sending to modeling function.
@@ -188,10 +182,9 @@
int64_t dist;
const int dequant_shift =
#if CONFIG_VPX_HIGHBITDEPTH
- (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) ?
- xd->bd - 5 :
+ (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) ? xd->bd - 5 :
#endif // CONFIG_VPX_HIGHBITDEPTH
- 3;
+ 3;
x->pred_sse[ref] = 0;
@@ -222,8 +215,8 @@
int block_idx = (idy << 1) + idx;
int low_err_skip = 0;
- var = cpi->fn_ptr[unit_size].vf(src, p->src.stride,
- dst, pd->dst.stride, &sse);
+ var = cpi->fn_ptr[unit_size].vf(src, p->src.stride, dst, pd->dst.stride,
+ &sse);
x->bsse[(i << 2) + block_idx] = sse;
sum_sse += sse;
@@ -243,11 +236,9 @@
}
}
- if (skip_flag && !low_err_skip)
- skip_flag = 0;
+ if (skip_flag && !low_err_skip) skip_flag = 0;
- if (i == 0)
- x->pred_sse[ref] += sse;
+ if (i == 0) x->pred_sse[ref] += sse;
}
}
@@ -268,8 +259,8 @@
dist_sum += dist;
} else {
vp10_model_rd_from_var_lapndz(sum_sse, num_pels_log2_lookup[bs],
- pd->dequant[1] >> dequant_shift,
- &rate, &dist);
+ pd->dequant[1] >> dequant_shift, &rate,
+ &dist);
rate_sum += rate;
dist_sum += dist;
}
@@ -282,13 +273,13 @@
}
int64_t vp10_block_error_c(const tran_low_t *coeff, const tran_low_t *dqcoeff,
- intptr_t block_size, int64_t *ssz) {
+ intptr_t block_size, int64_t *ssz) {
int i;
int64_t error = 0, sqcoeff = 0;
for (i = 0; i < block_size; i++) {
const int diff = coeff[i] - dqcoeff[i];
- error += diff * diff;
+ error += diff * diff;
sqcoeff += coeff[i] * coeff[i];
}
@@ -297,13 +288,13 @@
}
int64_t vp10_block_error_fp_c(const int16_t *coeff, const int16_t *dqcoeff,
- int block_size) {
+ int block_size) {
int i;
int64_t error = 0;
for (i = 0; i < block_size; i++) {
const int diff = coeff[i] - dqcoeff[i];
- error += diff * diff;
+ error += diff * diff;
}
return error;
@@ -311,9 +302,8 @@
#if CONFIG_VPX_HIGHBITDEPTH
int64_t vp10_highbd_block_error_c(const tran_low_t *coeff,
- const tran_low_t *dqcoeff,
- intptr_t block_size,
- int64_t *ssz, int bd) {
+ const tran_low_t *dqcoeff,
+ intptr_t block_size, int64_t *ssz, int bd) {
int i;
int64_t error = 0, sqcoeff = 0;
int shift = 2 * (bd - 8);
@@ -321,7 +311,7 @@
for (i = 0; i < block_size; i++) {
const int64_t diff = coeff[i] - dqcoeff[i];
- error += diff * diff;
+ error += diff * diff;
sqcoeff += (int64_t)coeff[i] * (int64_t)coeff[i];
}
assert(error >= 0 && sqcoeff >= 0);
@@ -339,17 +329,14 @@
* 16th coefficient in a 4x4 block or the 64th coefficient in a 8x8 block,
* were non-zero). */
static const int16_t band_counts[TX_SIZES][8] = {
- { 1, 2, 3, 4, 3, 16 - 13, 0 },
- { 1, 2, 3, 4, 11, 64 - 21, 0 },
- { 1, 2, 3, 4, 11, 256 - 21, 0 },
+ { 1, 2, 3, 4, 3, 16 - 13, 0 },
+ { 1, 2, 3, 4, 11, 64 - 21, 0 },
+ { 1, 2, 3, 4, 11, 256 - 21, 0 },
{ 1, 2, 3, 4, 11, 1024 - 21, 0 },
};
-static int cost_coeffs(MACROBLOCK *x,
- int plane, int block,
- ENTROPY_CONTEXT *A, ENTROPY_CONTEXT *L,
- TX_SIZE tx_size,
- const int16_t *scan, const int16_t *nb,
- int use_fast_coef_costing) {
+static int cost_coeffs(MACROBLOCK *x, int plane, int block, ENTROPY_CONTEXT *A,
+ ENTROPY_CONTEXT *L, TX_SIZE tx_size, const int16_t *scan,
+ const int16_t *nb, int use_fast_coef_costing) {
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
const struct macroblock_plane *p = &x->plane[plane];
@@ -358,8 +345,8 @@
const int16_t *band_count = &band_counts[tx_size][1];
const int eob = p->eobs[block];
const tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block);
- unsigned int (*token_costs)[2][COEFF_CONTEXTS][ENTROPY_TOKENS] =
- x->token_costs[tx_size][type][is_inter_block(mbmi)];
+ unsigned int(*token_costs)[2][COEFF_CONTEXTS][ENTROPY_TOKENS] =
+ x->token_costs[tx_size][type][is_inter_block(mbmi)];
uint8_t token_cache[32 * 32];
int pt = combine_entropy_contexts(*A, *L);
int c, cost;
@@ -386,7 +373,7 @@
EXTRABIT e;
vp10_get_token_extra(v, &prev_t, &e);
cost = (*token_costs)[0][pt][prev_t] +
- vp10_get_cost(prev_t, e, cat6_high_cost);
+ vp10_get_cost(prev_t, e, cat6_high_cost);
token_cache[0] = vp10_pt_energy_class[prev_t];
++token_costs;
@@ -400,11 +387,11 @@
vp10_get_token_extra(v, &t, &e);
if (use_fast_coef_costing) {
cost += (*token_costs)[!prev_t][!prev_t][t] +
- vp10_get_cost(t, e, cat6_high_cost);
+ vp10_get_cost(t, e, cat6_high_cost);
} else {
pt = get_coef_context(nb, token_cache, c);
cost += (*token_costs)[!prev_t][pt][t] +
- vp10_get_cost(t, e, cat6_high_cost);
+ vp10_get_cost(t, e, cat6_high_cost);
token_cache[rc] = vp10_pt_energy_class[t];
}
prev_t = t;
@@ -434,7 +421,7 @@
static void dist_block(MACROBLOCK *x, int plane, int block, TX_SIZE tx_size,
int64_t *out_dist, int64_t *out_sse) {
const int ss_txfrm_size = tx_size << 1;
- MACROBLOCKD* const xd = &x->e_mbd;
+ MACROBLOCKD *const xd = &x->e_mbd;
const struct macroblock_plane *const p = &x->plane[plane];
const struct macroblockd_plane *const pd = &xd->plane[plane];
int64_t this_sse;
@@ -444,25 +431,24 @@
#if CONFIG_VPX_HIGHBITDEPTH
const int bd = (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) ? xd->bd : 8;
*out_dist = vp10_highbd_block_error(coeff, dqcoeff, 16 << ss_txfrm_size,
- &this_sse, bd) >> shift;
+ &this_sse, bd) >>
+ shift;
#else
- *out_dist = vp10_block_error(coeff, dqcoeff, 16 << ss_txfrm_size,
- &this_sse) >> shift;
+ *out_dist =
+ vp10_block_error(coeff, dqcoeff, 16 << ss_txfrm_size, &this_sse) >> shift;
#endif // CONFIG_VPX_HIGHBITDEPTH
*out_sse = this_sse >> shift;
}
static int rate_block(int plane, int block, int blk_row, int blk_col,
- TX_SIZE tx_size, struct rdcost_block_args* args) {
+ TX_SIZE tx_size, struct rdcost_block_args *args) {
return cost_coeffs(args->x, plane, block, args->t_above + blk_col,
- args->t_left + blk_row, tx_size,
- args->so->scan, args->so->neighbors,
- args->use_fast_coef_costing);
+ args->t_left + blk_row, tx_size, args->so->scan,
+ args->so->neighbors, args->use_fast_coef_costing);
}
static void block_rd_txfm(int plane, int block, int blk_row, int blk_col,
- BLOCK_SIZE plane_bsize,
- TX_SIZE tx_size, void *arg) {
+ BLOCK_SIZE plane_bsize, TX_SIZE tx_size, void *arg) {
struct rdcost_block_args *args = arg;
MACROBLOCK *const x = args->x;
MACROBLOCKD *const xd = &x->e_mbd;
@@ -472,29 +458,27 @@
int64_t dist;
int64_t sse;
- if (args->exit_early)
- return;
+ if (args->exit_early) return;
if (!is_inter_block(mbmi)) {
- struct encode_b_args arg = {x, NULL, &mbmi->skip};
- vp10_encode_block_intra(plane, block, blk_row, blk_col,
- plane_bsize, tx_size, &arg);
+ struct encode_b_args arg = { x, NULL, &mbmi->skip };
+ vp10_encode_block_intra(plane, block, blk_row, blk_col, plane_bsize,
+ tx_size, &arg);
dist_block(x, plane, block, tx_size, &dist, &sse);
} else if (max_txsize_lookup[plane_bsize] == tx_size) {
if (x->skip_txfm[(plane << 2) + (block >> (tx_size << 1))] ==
SKIP_TXFM_NONE) {
// full forward transform and quantization
- vp10_xform_quant(x, plane, block, blk_row, blk_col,
- plane_bsize, tx_size);
+ vp10_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size);
dist_block(x, plane, block, tx_size, &dist, &sse);
} else if (x->skip_txfm[(plane << 2) + (block >> (tx_size << 1))] ==
SKIP_TXFM_AC_ONLY) {
// compute DC coefficient
- tran_low_t *const coeff = BLOCK_OFFSET(x->plane[plane].coeff, block);
+ tran_low_t *const coeff = BLOCK_OFFSET(x->plane[plane].coeff, block);
tran_low_t *const dqcoeff = BLOCK_OFFSET(xd->plane[plane].dqcoeff, block);
- vp10_xform_quant_dc(x, plane, block, blk_row, blk_col,
- plane_bsize, tx_size);
- sse = x->bsse[(plane << 2) + (block >> (tx_size << 1))] << 4;
+ vp10_xform_quant_dc(x, plane, block, blk_row, blk_col, plane_bsize,
+ tx_size);
+ sse = x->bsse[(plane << 2) + (block >> (tx_size << 1))] << 4;
dist = sse;
if (x->plane[plane].eobs[block]) {
const int64_t orig_sse = (int64_t)coeff[0] * coeff[0];
@@ -503,8 +487,7 @@
#if CONFIG_VPX_HIGHBITDEPTH
dc_correct >>= ((xd->bd - 8) * 2);
#endif
- if (tx_size != TX_32X32)
- dc_correct >>= 2;
+ if (tx_size != TX_32X32) dc_correct >>= 2;
dist = VPXMAX(0, sse - dc_correct);
}
@@ -512,7 +495,7 @@
// SKIP_TXFM_AC_DC
// skip forward transform
x->plane[plane].eobs[block] = 0;
- sse = x->bsse[(plane << 2) + (block >> (tx_size << 1))] << 4;
+ sse = x->bsse[(plane << 2) + (block >> (tx_size << 1))] << 4;
dist = sse;
}
} else {
@@ -534,7 +517,8 @@
// TODO(jingning): temporarily enabled only for luma component
rd = VPXMIN(rd1, rd2);
if (plane == 0)
- x->zcoeff_blk[tx_size][block] = !x->plane[plane].eobs[block] ||
+ x->zcoeff_blk[tx_size][block] =
+ !x->plane[plane].eobs[block] ||
(rd1 > rd2 && !xd->lossless[mbmi->segment_id]);
args->this_rate += rate;
@@ -550,11 +534,9 @@
args->skippable &= !x->plane[plane].eobs[block];
}
-static void txfm_rd_in_plane(MACROBLOCK *x,
- int *rate, int64_t *distortion,
- int *skippable, int64_t *sse,
- int64_t ref_best_rd, int plane,
- BLOCK_SIZE bsize, TX_SIZE tx_size,
+static void txfm_rd_in_plane(MACROBLOCK *x, int *rate, int64_t *distortion,
+ int *skippable, int64_t *sse, int64_t ref_best_rd,
+ int plane, BLOCK_SIZE bsize, TX_SIZE tx_size,
int use_fast_coef_casting) {
MACROBLOCKD *const xd = &x->e_mbd;
const struct macroblockd_plane *const pd = &xd->plane[plane];
@@ -566,34 +548,31 @@
args.use_fast_coef_costing = use_fast_coef_casting;
args.skippable = 1;
- if (plane == 0)
- xd->mi[0]->mbmi.tx_size = tx_size;
+ if (plane == 0) xd->mi[0]->mbmi.tx_size = tx_size;
vp10_get_entropy_contexts(bsize, tx_size, pd, args.t_above, args.t_left);
tx_type = get_tx_type(pd->plane_type, xd, 0);
args.so = get_scan(tx_size, tx_type);
- vp10_foreach_transformed_block_in_plane(xd, bsize, plane,
- block_rd_txfm, &args);
+ vp10_foreach_transformed_block_in_plane(xd, bsize, plane, block_rd_txfm,
+ &args);
if (args.exit_early) {
- *rate = INT_MAX;
+ *rate = INT_MAX;
*distortion = INT64_MAX;
- *sse = INT64_MAX;
- *skippable = 0;
+ *sse = INT64_MAX;
+ *skippable = 0;
} else {
*distortion = args.this_dist;
- *rate = args.this_rate;
- *sse = args.this_sse;
- *skippable = args.skippable;
+ *rate = args.this_rate;
+ *sse = args.this_sse;
+ *skippable = args.skippable;
}
}
-static void choose_largest_tx_size(VP10_COMP *cpi, MACROBLOCK *x,
- int *rate, int64_t *distortion,
- int *skip, int64_t *sse,
- int64_t ref_best_rd,
- BLOCK_SIZE bs) {
+static void choose_largest_tx_size(VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+ int64_t *distortion, int *skip, int64_t *sse,
+ int64_t ref_best_rd, BLOCK_SIZE bs) {
const TX_SIZE max_tx_size = max_txsize_lookup[bs];
VP10_COMMON *const cm = &cpi->common;
const TX_SIZE largest_tx_size = tx_mode_to_biggest_tx_size[cm->tx_mode];
@@ -604,20 +583,17 @@
int r, s;
int64_t d, psse, this_rd, best_rd = INT64_MAX;
vpx_prob skip_prob = vp10_get_skip_prob(cm, xd);
- int s0 = vp10_cost_bit(skip_prob, 0);
- int s1 = vp10_cost_bit(skip_prob, 1);
+ int s0 = vp10_cost_bit(skip_prob, 0);
+ int s1 = vp10_cost_bit(skip_prob, 1);
const int is_inter = is_inter_block(mbmi);
mbmi->tx_size = VPXMIN(max_tx_size, largest_tx_size);
- if (mbmi->tx_size < TX_32X32 &&
- !xd->lossless[mbmi->segment_id]) {
+ if (mbmi->tx_size < TX_32X32 && !xd->lossless[mbmi->segment_id]) {
for (tx_type = 0; tx_type < TX_TYPES; ++tx_type) {
mbmi->tx_type = tx_type;
- txfm_rd_in_plane(x, &r, &d, &s,
- &psse, ref_best_rd, 0, bs, mbmi->tx_size,
+ txfm_rd_in_plane(x, &r, &d, &s, &psse, ref_best_rd, 0, bs, mbmi->tx_size,
cpi->sf.use_fast_coef_costing);
- if (r == INT_MAX)
- continue;
+ if (r == INT_MAX) continue;
if (is_inter)
r += cpi->inter_tx_type_costs[mbmi->tx_size][mbmi->tx_type];
else
@@ -638,41 +614,35 @@
}
}
mbmi->tx_type = best_tx_type;
- txfm_rd_in_plane(x, rate, distortion, skip,
- sse, ref_best_rd, 0, bs,
+ txfm_rd_in_plane(x, rate, distortion, skip, sse, ref_best_rd, 0, bs,
mbmi->tx_size, cpi->sf.use_fast_coef_costing);
if (mbmi->tx_size < TX_32X32 && !xd->lossless[mbmi->segment_id] &&
*rate != INT_MAX) {
if (is_inter)
*rate += cpi->inter_tx_type_costs[mbmi->tx_size][mbmi->tx_type];
else
- *rate += cpi->intra_tx_type_costs[mbmi->tx_size]
- [intra_mode_to_tx_type_context[mbmi->mode]]
- [mbmi->tx_type];
+ *rate += cpi->intra_tx_type_costs
+ [mbmi->tx_size][intra_mode_to_tx_type_context[mbmi->mode]]
+ [mbmi->tx_type];
}
}
-static void choose_smallest_tx_size(VP10_COMP *cpi, MACROBLOCK *x,
- int *rate, int64_t *distortion,
- int *skip, int64_t *sse,
- int64_t ref_best_rd,
+static void choose_smallest_tx_size(VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+ int64_t *distortion, int *skip,
+ int64_t *sse, int64_t ref_best_rd,
BLOCK_SIZE bs) {
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
mbmi->tx_size = TX_4X4;
- txfm_rd_in_plane(x, rate, distortion, skip,
- sse, ref_best_rd, 0, bs,
+ txfm_rd_in_plane(x, rate, distortion, skip, sse, ref_best_rd, 0, bs,
mbmi->tx_size, cpi->sf.use_fast_coef_costing);
}
-static void choose_tx_size_from_rd(VP10_COMP *cpi, MACROBLOCK *x,
- int *rate,
- int64_t *distortion,
- int *skip,
- int64_t *psse,
- int64_t ref_best_rd,
+static void choose_tx_size_from_rd(VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+ int64_t *distortion, int *skip,
+ int64_t *psse, int64_t ref_best_rd,
BLOCK_SIZE bs) {
const TX_SIZE max_tx_size = max_txsize_lookup[bs];
VP10_COMMON *const cm = &cpi->common;
@@ -707,15 +677,15 @@
}
*distortion = INT64_MAX;
- *rate = INT_MAX;
- *skip = 0;
- *psse = INT64_MAX;
+ *rate = INT_MAX;
+ *skip = 0;
+ *psse = INT64_MAX;
for (tx_type = DCT_DCT; tx_type < TX_TYPES; ++tx_type) {
last_rd = INT64_MAX;
for (n = start_tx; n >= end_tx; --n) {
int r_tx_size = 0;
- for (m = 0; m <= n - (n == (int) max_tx_size); ++m) {
+ for (m = 0; m <= n - (n == (int)max_tx_size); ++m) {
if (m == n)
r_tx_size += vp10_cost_zero(tx_probs[m]);
else
@@ -726,56 +696,50 @@
continue;
}
mbmi->tx_type = tx_type;
- txfm_rd_in_plane(x, &r, &d, &s,
- &sse, ref_best_rd, 0, bs, n,
+ txfm_rd_in_plane(x, &r, &d, &s, &sse, ref_best_rd, 0, bs, n,
cpi->sf.use_fast_coef_costing);
- if (n < TX_32X32 &&
- !xd->lossless[xd->mi[0]->mbmi.segment_id] &&
+ if (n < TX_32X32 && !xd->lossless[xd->mi[0]->mbmi.segment_id] &&
r != INT_MAX) {
if (is_inter)
r += cpi->inter_tx_type_costs[mbmi->tx_size][mbmi->tx_type];
else
- r += cpi->intra_tx_type_costs[mbmi->tx_size]
- [intra_mode_to_tx_type_context[mbmi->mode]]
- [mbmi->tx_type];
+ r += cpi->intra_tx_type_costs
+ [mbmi->tx_size][intra_mode_to_tx_type_context[mbmi->mode]]
+ [mbmi->tx_type];
}
- if (r == INT_MAX)
- continue;
+ if (r == INT_MAX) continue;
if (s) {
if (is_inter) {
rd = RDCOST(x->rdmult, x->rddiv, s1, sse);
} else {
- rd = RDCOST(x->rdmult, x->rddiv, s1 + r_tx_size * tx_select, sse);
+ rd = RDCOST(x->rdmult, x->rddiv, s1 + r_tx_size * tx_select, sse);
}
} else {
rd = RDCOST(x->rdmult, x->rddiv, r + s0 + r_tx_size * tx_select, d);
}
- if (tx_select && !(s && is_inter))
- r += r_tx_size;
+ if (tx_select && !(s && is_inter)) r += r_tx_size;
if (is_inter && !xd->lossless[xd->mi[0]->mbmi.segment_id] && !s)
rd = VPXMIN(rd, RDCOST(x->rdmult, x->rddiv, s1, sse));
// Early termination in transform size search.
if (cpi->sf.tx_size_search_breakout &&
- (rd == INT64_MAX ||
- (s == 1 && tx_type != DCT_DCT && n < start_tx) ||
- (n < (int) max_tx_size && rd > last_rd)))
+ (rd == INT64_MAX || (s == 1 && tx_type != DCT_DCT && n < start_tx) ||
+ (n < (int)max_tx_size && rd > last_rd)))
break;
last_rd = rd;
if (rd <
- (is_inter && best_tx_type == DCT_DCT ? ext_tx_th : 1) *
- best_rd) {
+ (is_inter && best_tx_type == DCT_DCT ? ext_tx_th : 1) * best_rd) {
best_tx = n;
best_rd = rd;
*distortion = d;
- *rate = r;
- *skip = s;
- *psse = sse;
+ *rate = r;
+ *skip = s;
+ *psse = sse;
best_tx_type = mbmi->tx_type;
}
}
@@ -783,17 +747,14 @@
mbmi->tx_size = best_tx;
mbmi->tx_type = best_tx_type;
- if (mbmi->tx_size >= TX_32X32)
- assert(mbmi->tx_type == DCT_DCT);
- txfm_rd_in_plane(x, &r, &d, &s,
- &sse, ref_best_rd, 0, bs, best_tx,
+ if (mbmi->tx_size >= TX_32X32) assert(mbmi->tx_type == DCT_DCT);
+ txfm_rd_in_plane(x, &r, &d, &s, &sse, ref_best_rd, 0, bs, best_tx,
cpi->sf.use_fast_coef_costing);
}
static void super_block_yrd(VP10_COMP *cpi, MACROBLOCK *x, int *rate,
- int64_t *distortion, int *skip,
- int64_t *psse, BLOCK_SIZE bs,
- int64_t ref_best_rd) {
+ int64_t *distortion, int *skip, int64_t *psse,
+ BLOCK_SIZE bs, int64_t ref_best_rd) {
MACROBLOCKD *xd = &x->e_mbd;
int64_t sse;
int64_t *ret_sse = psse ? psse : &sse;
@@ -808,39 +769,33 @@
choose_largest_tx_size(cpi, x, rate, distortion, skip, ret_sse, ref_best_rd,
bs);
} else {
- choose_tx_size_from_rd(cpi, x, rate, distortion, skip, ret_sse,
- ref_best_rd, bs);
+ choose_tx_size_from_rd(cpi, x, rate, distortion, skip, ret_sse, ref_best_rd,
+ bs);
}
}
static int conditional_skipintra(PREDICTION_MODE mode,
PREDICTION_MODE best_intra_mode) {
- if (mode == D117_PRED &&
- best_intra_mode != V_PRED &&
+ if (mode == D117_PRED && best_intra_mode != V_PRED &&
best_intra_mode != D135_PRED)
return 1;
- if (mode == D63_PRED &&
- best_intra_mode != V_PRED &&
+ if (mode == D63_PRED && best_intra_mode != V_PRED &&
best_intra_mode != D45_PRED)
return 1;
- if (mode == D207_PRED &&
- best_intra_mode != H_PRED &&
+ if (mode == D207_PRED && best_intra_mode != H_PRED &&
best_intra_mode != D45_PRED)
return 1;
- if (mode == D153_PRED &&
- best_intra_mode != H_PRED &&
+ if (mode == D153_PRED && best_intra_mode != H_PRED &&
best_intra_mode != D135_PRED)
return 1;
return 0;
}
-static int64_t rd_pick_intra4x4block(VP10_COMP *cpi, MACROBLOCK *x,
- int row, int col,
- PREDICTION_MODE *best_mode,
- const int *bmode_costs,
- ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l,
- int *bestrate, int *bestratey,
- int64_t *bestdistortion,
+static int64_t rd_pick_intra4x4block(VP10_COMP *cpi, MACROBLOCK *x, int row,
+ int col, PREDICTION_MODE *best_mode,
+ const int *bmode_costs, ENTROPY_CONTEXT *a,
+ ENTROPY_CONTEXT *l, int *bestrate,
+ int *bestratey, int64_t *bestdistortion,
BLOCK_SIZE bsize, int64_t rd_thresh) {
PREDICTION_MODE mode;
MACROBLOCKD *const xd = &x->e_mbd;
@@ -873,14 +828,12 @@
int64_t distortion = 0;
int rate = bmode_costs[mode];
- if (!(cpi->sf.intra_y_mode_mask[TX_4X4] & (1 << mode)))
- continue;
+ if (!(cpi->sf.intra_y_mode_mask[TX_4X4] & (1 << mode))) continue;
// Only do the oblique modes if the best so far is
// one of the neighboring directional modes
if (cpi->sf.mode_search_skip_flags & FLAG_SKIP_INTRA_DIRMISMATCH) {
- if (conditional_skipintra(mode, *best_mode))
- continue;
+ if (conditional_skipintra(mode, *best_mode)) continue;
}
memcpy(tempa, ta, sizeof(ta));
@@ -891,16 +844,14 @@
const int block = (row + idy) * 2 + (col + idx);
const uint8_t *const src = &src_init[idx * 4 + idy * 4 * src_stride];
uint8_t *const dst = &dst_init[idx * 4 + idy * 4 * dst_stride];
- int16_t *const src_diff = vp10_raster_block_offset_int16(BLOCK_8X8,
- block,
- p->src_diff);
+ int16_t *const src_diff =
+ vp10_raster_block_offset_int16(BLOCK_8X8, block, p->src_diff);
tran_low_t *const coeff = BLOCK_OFFSET(x->plane[0].coeff, block);
xd->mi[0]->bmi[block].as_mode = mode;
- vp10_predict_intra_block(xd, 1, 1, TX_4X4, mode, dst, dst_stride,
- dst, dst_stride,
- col + idx, row + idy, 0);
- vpx_highbd_subtract_block(4, 4, src_diff, 8, src, src_stride,
- dst, dst_stride, xd->bd);
+ vp10_predict_intra_block(xd, 1, 1, TX_4X4, mode, dst, dst_stride, dst,
+ dst_stride, col + idx, row + idy, 0);
+ vpx_highbd_subtract_block(4, 4, src_diff, 8, src, src_stride, dst,
+ dst_stride, xd->bd);
if (xd->lossless[xd->mi[0]->mbmi.segment_id]) {
TX_TYPE tx_type = get_tx_type(PLANE_TYPE_Y, xd, block);
const scan_order *so = get_scan(TX_4X4, tx_type);
@@ -911,9 +862,9 @@
cpi->sf.use_fast_coef_costing);
if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd)
goto next_highbd;
- vp10_highbd_inv_txfm_add_4x4(BLOCK_OFFSET(pd->dqcoeff, block),
- dst, dst_stride, p->eobs[block],
- xd->bd, DCT_DCT, 1);
+ vp10_highbd_inv_txfm_add_4x4(BLOCK_OFFSET(pd->dqcoeff, block), dst,
+ dst_stride, p->eobs[block], xd->bd,
+ DCT_DCT, 1);
} else {
int64_t unused;
TX_TYPE tx_type = get_tx_type(PLANE_TYPE_Y, xd, block);
@@ -923,14 +874,15 @@
ratey += cost_coeffs(x, 0, block, tempa + idx, templ + idy, TX_4X4,
so->scan, so->neighbors,
cpi->sf.use_fast_coef_costing);
- distortion += vp10_highbd_block_error(
- coeff, BLOCK_OFFSET(pd->dqcoeff, block),
- 16, &unused, xd->bd) >> 2;
+ distortion +=
+ vp10_highbd_block_error(coeff, BLOCK_OFFSET(pd->dqcoeff, block),
+ 16, &unused, xd->bd) >>
+ 2;
if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd)
goto next_highbd;
- vp10_highbd_inv_txfm_add_4x4(BLOCK_OFFSET(pd->dqcoeff, block),
- dst, dst_stride, p->eobs[block],
- xd->bd, tx_type, 0);
+ vp10_highbd_inv_txfm_add_4x4(BLOCK_OFFSET(pd->dqcoeff, block), dst,
+ dst_stride, p->eobs[block], xd->bd,
+ tx_type, 0);
}
}
}
@@ -952,16 +904,13 @@
num_4x4_blocks_wide * 4 * sizeof(uint16_t));
}
}
- next_highbd:
- {}
+ next_highbd : {}
}
- if (best_rd >= rd_thresh)
- return best_rd;
+ if (best_rd >= rd_thresh) return best_rd;
for (idy = 0; idy < num_4x4_blocks_high * 4; ++idy) {
memcpy(CONVERT_TO_SHORTPTR(dst_init + idy * dst_stride),
- best_dst16 + idy * 8,
- num_4x4_blocks_wide * 4 * sizeof(uint16_t));
+ best_dst16 + idy * 8, num_4x4_blocks_wide * 4 * sizeof(uint16_t));
}
return best_rd;
@@ -974,14 +923,12 @@
int64_t distortion = 0;
int rate = bmode_costs[mode];
- if (!(cpi->sf.intra_y_mode_mask[TX_4X4] & (1 << mode)))
- continue;
+ if (!(cpi->sf.intra_y_mode_mask[TX_4X4] & (1 << mode))) continue;
// Only do the oblique modes if the best so far is
// one of the neighboring directional modes
if (cpi->sf.mode_search_skip_flags & FLAG_SKIP_INTRA_DIRMISMATCH) {
- if (conditional_skipintra(mode, *best_mode))
- continue;
+ if (conditional_skipintra(mode, *best_mode)) continue;
}
memcpy(tempa, ta, sizeof(ta));
@@ -996,8 +943,8 @@
vp10_raster_block_offset_int16(BLOCK_8X8, block, p->src_diff);
tran_low_t *const coeff = BLOCK_OFFSET(x->plane[0].coeff, block);
xd->mi[0]->bmi[block].as_mode = mode;
- vp10_predict_intra_block(xd, 1, 1, TX_4X4, mode, dst, dst_stride,
- dst, dst_stride, col + idx, row + idy, 0);
+ vp10_predict_intra_block(xd, 1, 1, TX_4X4, mode, dst, dst_stride, dst,
+ dst_stride, col + idx, row + idy, 0);
vpx_subtract_block(4, 4, src_diff, 8, src, src_stride, dst, dst_stride);
if (xd->lossless[xd->mi[0]->mbmi.segment_id]) {
@@ -1010,8 +957,8 @@
cpi->sf.use_fast_coef_costing);
if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd)
goto next;
- vp10_inv_txfm_add_4x4(BLOCK_OFFSET(pd->dqcoeff, block),
- dst, dst_stride, p->eobs[block], DCT_DCT, 1);
+ vp10_inv_txfm_add_4x4(BLOCK_OFFSET(pd->dqcoeff, block), dst,
+ dst_stride, p->eobs[block], DCT_DCT, 1);
} else {
int64_t unused;
TX_TYPE tx_type = get_tx_type(PLANE_TYPE_Y, xd, block);
@@ -1019,14 +966,16 @@
vp10_fwd_txfm_4x4(src_diff, coeff, 8, tx_type, 0);
vp10_regular_quantize_b_4x4(x, 0, block, so->scan, so->iscan);
ratey += cost_coeffs(x, 0, block, tempa + idx, templ + idy, TX_4X4,
- so->scan, so->neighbors,
- cpi->sf.use_fast_coef_costing);
- distortion += vp10_block_error(coeff, BLOCK_OFFSET(pd->dqcoeff, block),
- 16, &unused) >> 2;
+ so->scan, so->neighbors,
+ cpi->sf.use_fast_coef_costing);
+ distortion +=
+ vp10_block_error(coeff, BLOCK_OFFSET(pd->dqcoeff, block), 16,
+ &unused) >>
+ 2;
if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd)
goto next;
- vp10_inv_txfm_add_4x4(BLOCK_OFFSET(pd->dqcoeff, block),
- dst, dst_stride, p->eobs[block], tx_type, 0);
+ vp10_inv_txfm_add_4x4(BLOCK_OFFSET(pd->dqcoeff, block), dst,
+ dst_stride, p->eobs[block], tx_type, 0);
}
}
}
@@ -1046,12 +995,10 @@
memcpy(best_dst + idy * 8, dst_init + idy * dst_stride,
num_4x4_blocks_wide * 4);
}
- next:
- {}
+ next : {}
}
- if (best_rd >= rd_thresh)
- return best_rd;
+ if (best_rd >= rd_thresh) return best_rd;
for (idy = 0; idy < num_4x4_blocks_high * 4; ++idy)
memcpy(dst_init + idy * dst_stride, best_dst + idy * 8,
@@ -1094,14 +1041,13 @@
const PREDICTION_MODE A = vp10_above_block_mode(mic, above_mi, i);
const PREDICTION_MODE L = vp10_left_block_mode(mic, left_mi, i);
- bmode_costs = cpi->y_mode_costs[A][L];
+ bmode_costs = cpi->y_mode_costs[A][L];
}
this_rd = rd_pick_intra4x4block(cpi, mb, idy, idx, &best_mode,
bmode_costs, t_above + idx, t_left + idy,
&r, &ry, &d, bsize, best_rd - total_rd);
- if (this_rd >= best_rd - total_rd)
- return INT64_MAX;
+ if (this_rd >= best_rd - total_rd) return INT64_MAX;
total_rd += this_rd;
cost += r;
@@ -1114,8 +1060,7 @@
for (j = 1; j < num_4x4_blocks_wide; ++j)
mic->bmi[i + j].as_mode = best_mode;
- if (total_rd >= best_rd)
- return INT64_MAX;
+ if (total_rd >= best_rd) return INT64_MAX;
}
}
@@ -1128,10 +1073,9 @@
}
// This function is used only for intra_only frames
-static int64_t rd_pick_intra_sby_mode(VP10_COMP *cpi, MACROBLOCK *x,
- int *rate, int *rate_tokenonly,
- int64_t *distortion, int *skippable,
- BLOCK_SIZE bsize,
+static int64_t rd_pick_intra_sby_mode(VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+ int *rate_tokenonly, int64_t *distortion,
+ int *skippable, BLOCK_SIZE bsize,
int64_t best_rd) {
PREDICTION_MODE mode;
PREDICTION_MODE mode_selected = DC_PRED;
@@ -1154,24 +1098,23 @@
for (mode = DC_PRED; mode <= TM_PRED; mode++) {
mic->mbmi.mode = mode;
- super_block_yrd(cpi, x, &this_rate_tokenonly, &this_distortion,
- &s, NULL, bsize, best_rd);
+ super_block_yrd(cpi, x, &this_rate_tokenonly, &this_distortion, &s, NULL,
+ bsize, best_rd);
- if (this_rate_tokenonly == INT_MAX)
- continue;
+ if (this_rate_tokenonly == INT_MAX) continue;
this_rate = this_rate_tokenonly + bmode_costs[mode];
this_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_distortion);
if (this_rd < best_rd) {
- mode_selected = mode;
- best_rd = this_rd;
- best_tx = mic->mbmi.tx_size;
- best_tx_type = mic->mbmi.tx_type;
- *rate = this_rate;
+ mode_selected = mode;
+ best_rd = this_rd;
+ best_tx = mic->mbmi.tx_size;
+ best_tx_type = mic->mbmi.tx_type;
+ *rate = this_rate;
*rate_tokenonly = this_rate_tokenonly;
- *distortion = this_distortion;
- *skippable = s;
+ *distortion = this_distortion;
+ *skippable = s;
}
}
@@ -1184,10 +1127,9 @@
// Return value 0: early termination triggered, no valid rd cost available;
// 1: rd cost values are valid.
-static int super_block_uvrd(const VP10_COMP *cpi, MACROBLOCK *x,
- int *rate, int64_t *distortion, int *skippable,
- int64_t *sse, BLOCK_SIZE bsize,
- int64_t ref_best_rd) {
+static int super_block_uvrd(const VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+ int64_t *distortion, int *skippable, int64_t *sse,
+ BLOCK_SIZE bsize, int64_t ref_best_rd) {
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
const TX_SIZE uv_tx_size = get_uv_tx_size(mbmi, &xd->plane[1]);
@@ -1196,8 +1138,7 @@
int64_t pndist = 0, pnsse = 0;
int is_cost_valid = 1;
- if (ref_best_rd < 0)
- is_cost_valid = 0;
+ if (ref_best_rd < 0) is_cost_valid = 0;
if (is_inter_block(mbmi) && is_cost_valid) {
int plane;
@@ -1211,9 +1152,8 @@
*skippable = 1;
for (plane = 1; plane < MAX_MB_PLANE; ++plane) {
- txfm_rd_in_plane(x, &pnrate, &pndist, &pnskip, &pnsse,
- ref_best_rd, plane, bsize, uv_tx_size,
- cpi->sf.use_fast_coef_costing);
+ txfm_rd_in_plane(x, &pnrate, &pndist, &pnskip, &pnsse, ref_best_rd, plane,
+ bsize, uv_tx_size, cpi->sf.use_fast_coef_costing);
if (pnrate == INT_MAX) {
is_cost_valid = 0;
break;
@@ -1236,10 +1176,10 @@
}
static int64_t rd_pick_intra_sbuv_mode(VP10_COMP *cpi, MACROBLOCK *x,
- PICK_MODE_CONTEXT *ctx,
- int *rate, int *rate_tokenonly,
- int64_t *distortion, int *skippable,
- BLOCK_SIZE bsize, TX_SIZE max_tx_size) {
+ PICK_MODE_CONTEXT *ctx, int *rate,
+ int *rate_tokenonly, int64_t *distortion,
+ int *skippable, BLOCK_SIZE bsize,
+ TX_SIZE max_tx_size) {
MACROBLOCKD *xd = &x->e_mbd;
PREDICTION_MODE mode;
PREDICTION_MODE mode_selected = DC_PRED;
@@ -1249,27 +1189,25 @@
memset(x->skip_txfm, SKIP_TXFM_NONE, sizeof(x->skip_txfm));
for (mode = DC_PRED; mode <= TM_PRED; ++mode) {
- if (!(cpi->sf.intra_uv_mode_mask[max_tx_size] & (1 << mode)))
- continue;
+ if (!(cpi->sf.intra_uv_mode_mask[max_tx_size] & (1 << mode))) continue;
xd->mi[0]->mbmi.uv_mode = mode;
- if (!super_block_uvrd(cpi, x, &this_rate_tokenonly,
- &this_distortion, &s, &this_sse, bsize, best_rd))
+ if (!super_block_uvrd(cpi, x, &this_rate_tokenonly, &this_distortion, &s,
+ &this_sse, bsize, best_rd))
continue;
this_rate = this_rate_tokenonly +
- cpi->intra_uv_mode_cost[xd->mi[0]->mbmi.mode][mode];
+ cpi->intra_uv_mode_cost[xd->mi[0]->mbmi.mode][mode];
this_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_distortion);
if (this_rd < best_rd) {
- mode_selected = mode;
- best_rd = this_rd;
- *rate = this_rate;
+ mode_selected = mode;
+ best_rd = this_rd;
+ *rate = this_rate;
*rate_tokenonly = this_rate_tokenonly;
- *distortion = this_distortion;
- *skippable = s;
- if (!x->select_tx_size)
- swap_block_ptr(x, ctx, 2, 0, 1, MAX_MB_PLANE);
+ *distortion = this_distortion;
+ *skippable = s;
+ if (!x->select_tx_size) swap_block_ptr(x, ctx, 2, 0, 1, MAX_MB_PLANE);
}
}
@@ -1277,38 +1215,36 @@
return best_rd;
}
-static int64_t rd_sbuv_dcpred(const VP10_COMP *cpi, MACROBLOCK *x,
- int *rate, int *rate_tokenonly,
- int64_t *distortion, int *skippable,
- BLOCK_SIZE bsize) {
+static int64_t rd_sbuv_dcpred(const VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+ int *rate_tokenonly, int64_t *distortion,
+ int *skippable, BLOCK_SIZE bsize) {
int64_t unused;
x->e_mbd.mi[0]->mbmi.uv_mode = DC_PRED;
memset(x->skip_txfm, SKIP_TXFM_NONE, sizeof(x->skip_txfm));
- super_block_uvrd(cpi, x, rate_tokenonly, distortion,
- skippable, &unused, bsize, INT64_MAX);
+ super_block_uvrd(cpi, x, rate_tokenonly, distortion, skippable, &unused,
+ bsize, INT64_MAX);
*rate = *rate_tokenonly +
- cpi->intra_uv_mode_cost[x->e_mbd.mi[0]->mbmi.mode][DC_PRED];
+ cpi->intra_uv_mode_cost[x->e_mbd.mi[0]->mbmi.mode][DC_PRED];
return RDCOST(x->rdmult, x->rddiv, *rate, *distortion);
}
static void choose_intra_uv_mode(VP10_COMP *cpi, MACROBLOCK *const x,
- PICK_MODE_CONTEXT *ctx,
- BLOCK_SIZE bsize, TX_SIZE max_tx_size,
- int *rate_uv, int *rate_uv_tokenonly,
- int64_t *dist_uv, int *skip_uv,
- PREDICTION_MODE *mode_uv) {
+ PICK_MODE_CONTEXT *ctx, BLOCK_SIZE bsize,
+ TX_SIZE max_tx_size, int *rate_uv,
+ int *rate_uv_tokenonly, int64_t *dist_uv,
+ int *skip_uv, PREDICTION_MODE *mode_uv) {
// Use an estimated rd for uv_intra based on DC_PRED if the
// appropriate speed flag is set.
if (cpi->sf.use_uv_intra_rd_estimate) {
- rd_sbuv_dcpred(cpi, x, rate_uv, rate_uv_tokenonly, dist_uv,
- skip_uv, bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize);
- // Else do a proper rd search for each possible transform size that may
- // be considered in the main rd loop.
+ rd_sbuv_dcpred(cpi, x, rate_uv, rate_uv_tokenonly, dist_uv, skip_uv,
+ bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize);
+ // Else do a proper rd search for each possible transform size that may
+ // be considered in the main rd loop.
} else {
- rd_pick_intra_sbuv_mode(cpi, x, ctx,
- rate_uv, rate_uv_tokenonly, dist_uv, skip_uv,
- bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize, max_tx_size);
+ rd_pick_intra_sbuv_mode(cpi, x, ctx, rate_uv, rate_uv_tokenonly, dist_uv,
+ skip_uv, bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize,
+ max_tx_size);
}
*mode_uv = x->e_mbd.mi[0]->mbmi.uv_mode;
}
@@ -1320,8 +1256,7 @@
}
static int set_and_cost_bmi_mvs(VP10_COMP *cpi, MACROBLOCK *x, MACROBLOCKD *xd,
- int i,
- PREDICTION_MODE mode, int_mv this_mv[2],
+ int i, PREDICTION_MODE mode, int_mv this_mv[2],
int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES],
int_mv seg_mvs[MAX_REF_FRAMES],
int_mv *best_ref_mv[2], const int *mvjcost,
@@ -1339,11 +1274,12 @@
case NEWMV:
this_mv[0].as_int = seg_mvs[mbmi->ref_frame[0]].as_int;
thismvcost += vp10_mv_bit_cost(&this_mv[0].as_mv, &best_ref_mv[0]->as_mv,
- mvjcost, mvcost, MV_COST_WEIGHT_SUB);
+ mvjcost, mvcost, MV_COST_WEIGHT_SUB);
if (is_compound) {
this_mv[1].as_int = seg_mvs[mbmi->ref_frame[1]].as_int;
- thismvcost += vp10_mv_bit_cost(&this_mv[1].as_mv, &best_ref_mv[1]->as_mv,
- mvjcost, mvcost, MV_COST_WEIGHT_SUB);
+ thismvcost +=
+ vp10_mv_bit_cost(&this_mv[1].as_mv, &best_ref_mv[1]->as_mv, mvjcost,
+ mvcost, MV_COST_WEIGHT_SUB);
}
break;
case NEARMV:
@@ -1354,16 +1290,13 @@
break;
case ZEROMV:
this_mv[0].as_int = 0;
- if (is_compound)
- this_mv[1].as_int = 0;
+ if (is_compound) this_mv[1].as_int = 0;
break;
- default:
- break;
+ default: break;
}
mic->bmi[i].as_mv[0].as_int = this_mv[0].as_int;
- if (is_compound)
- mic->bmi[i].as_mv[1].as_int = this_mv[1].as_int;
+ if (is_compound) mic->bmi[i].as_mv[1].as_int = this_mv[1].as_int;
mic->bmi[i].as_mode = mode;
@@ -1372,19 +1305,14 @@
memmove(&mic->bmi[i + idy * 2 + idx], &mic->bmi[i], sizeof(mic->bmi[i]));
return cost_mv_ref(cpi, mode, mbmi_ext->mode_context[mbmi->ref_frame[0]]) +
- thismvcost;
+ thismvcost;
}
-static int64_t encode_inter_mb_segment(VP10_COMP *cpi,
- MACROBLOCK *x,
- int64_t best_yrd,
- int i,
- int *labelyrate,
+static int64_t encode_inter_mb_segment(VP10_COMP *cpi, MACROBLOCK *x,
+ int64_t best_yrd, int i, int *labelyrate,
int64_t *distortion, int64_t *sse,
- ENTROPY_CONTEXT *ta,
- ENTROPY_CONTEXT *tl,
- int ir, int ic,
- int mi_row, int mi_col) {
+ ENTROPY_CONTEXT *ta, ENTROPY_CONTEXT *tl,
+ int ir, int ic, int mi_row, int mi_col) {
int k;
MACROBLOCKD *xd = &x->e_mbd;
struct macroblockd_plane *const pd = &xd->plane[0];
@@ -1398,8 +1326,8 @@
const uint8_t *const src =
&p->src.buf[vp10_raster_block_offset(BLOCK_8X8, i, p->src.stride)];
- uint8_t *const dst = &pd->dst.buf[vp10_raster_block_offset(BLOCK_8X8, i,
- pd->dst.stride)];
+ uint8_t *const dst =
+ &pd->dst.buf[vp10_raster_block_offset(BLOCK_8X8, i, pd->dst.stride)];
int64_t thisdistortion = 0, thissse = 0;
int thisrate = 0;
TX_TYPE tx_type = get_tx_type(PLANE_TYPE_Y, xd, i);
@@ -1421,12 +1349,13 @@
#if CONFIG_VPX_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
vpx_highbd_subtract_block(
- height, width, vp10_raster_block_offset_int16(BLOCK_8X8, i, p->src_diff),
- 8, src, p->src.stride, dst, pd->dst.stride, xd->bd);
+ height, width,
+ vp10_raster_block_offset_int16(BLOCK_8X8, i, p->src_diff), 8, src,
+ p->src.stride, dst, pd->dst.stride, xd->bd);
} else {
- vpx_subtract_block(
- height, width, vp10_raster_block_offset_int16(BLOCK_8X8, i, p->src_diff),
- 8, src, p->src.stride, dst, pd->dst.stride);
+ vpx_subtract_block(height, width, vp10_raster_block_offset_int16(
+ BLOCK_8X8, i, p->src_diff),
+ 8, src, p->src.stride, dst, pd->dst.stride);
}
#else
vpx_subtract_block(height, width,
@@ -1438,7 +1367,7 @@
for (idy = 0; idy < height / 4; ++idy) {
for (idx = 0; idx < width / 4; ++idx) {
int64_t ssz, rd, rd1, rd2;
- tran_low_t* coeff;
+ tran_low_t *coeff;
k += (idy * 2 + idx);
coeff = BLOCK_OFFSET(p->coeff, k);
@@ -1447,26 +1376,24 @@
vp10_regular_quantize_b_4x4(x, 0, k, so->scan, so->iscan);
#if CONFIG_VPX_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
- thisdistortion += vp10_highbd_block_error(coeff,
- BLOCK_OFFSET(pd->dqcoeff, k),
- 16, &ssz, xd->bd);
+ thisdistortion += vp10_highbd_block_error(
+ coeff, BLOCK_OFFSET(pd->dqcoeff, k), 16, &ssz, xd->bd);
} else {
- thisdistortion += vp10_block_error(coeff, BLOCK_OFFSET(pd->dqcoeff, k),
- 16, &ssz);
+ thisdistortion +=
+ vp10_block_error(coeff, BLOCK_OFFSET(pd->dqcoeff, k), 16, &ssz);
}
#else
- thisdistortion += vp10_block_error(coeff, BLOCK_OFFSET(pd->dqcoeff, k),
- 16, &ssz);
+ thisdistortion +=
+ vp10_block_error(coeff, BLOCK_OFFSET(pd->dqcoeff, k), 16, &ssz);
#endif // CONFIG_VPX_HIGHBITDEPTH
thissse += ssz;
- thisrate += cost_coeffs(x, 0, k, ta + (k & 1), tl + (k >> 1), TX_4X4,
- so->scan, so->neighbors,
- cpi->sf.use_fast_coef_costing);
+ thisrate +=
+ cost_coeffs(x, 0, k, ta + (k & 1), tl + (k >> 1), TX_4X4, so->scan,
+ so->neighbors, cpi->sf.use_fast_coef_costing);
rd1 = RDCOST(x->rdmult, x->rddiv, thisrate, thisdistortion >> 2);
rd2 = RDCOST(x->rdmult, x->rddiv, 0, thissse >> 2);
rd = VPXMIN(rd1, rd2);
- if (rd >= best_yrd)
- return INT64_MAX;
+ if (rd >= best_yrd) return INT64_MAX;
}
}
@@ -1504,10 +1431,8 @@
} BEST_SEG_INFO;
static INLINE int mv_check_bounds(const MACROBLOCK *x, const MV *mv) {
- return (mv->row >> 3) < x->mv_row_min ||
- (mv->row >> 3) > x->mv_row_max ||
- (mv->col >> 3) < x->mv_col_min ||
- (mv->col >> 3) > x->mv_col_max;
+ return (mv->row >> 3) < x->mv_row_min || (mv->row >> 3) > x->mv_row_max ||
+ (mv->col >> 3) < x->mv_col_min || (mv->col >> 3) > x->mv_col_max;
}
static INLINE void mi_buf_shift(MACROBLOCK *x, int i) {
@@ -1515,14 +1440,16 @@
struct macroblock_plane *const p = &x->plane[0];
struct macroblockd_plane *const pd = &x->e_mbd.plane[0];
- p->src.buf = &p->src.buf[vp10_raster_block_offset(BLOCK_8X8, i,
- p->src.stride)];
+ p->src.buf =
+ &p->src.buf[vp10_raster_block_offset(BLOCK_8X8, i, p->src.stride)];
assert(((intptr_t)pd->pre[0].buf & 0x7) == 0);
- pd->pre[0].buf = &pd->pre[0].buf[vp10_raster_block_offset(BLOCK_8X8, i,
- pd->pre[0].stride)];
+ pd->pre[0].buf =
+ &pd->pre[0]
+ .buf[vp10_raster_block_offset(BLOCK_8X8, i, pd->pre[0].stride)];
if (has_second_ref(mbmi))
- pd->pre[1].buf = &pd->pre[1].buf[vp10_raster_block_offset(BLOCK_8X8, i,
- pd->pre[1].stride)];
+ pd->pre[1].buf =
+ &pd->pre[1]
+ .buf[vp10_raster_block_offset(BLOCK_8X8, i, pd->pre[1].stride)];
}
static INLINE void mi_buf_restore(MACROBLOCK *x, struct buf_2d orig_src,
@@ -1530,8 +1457,7 @@
MB_MODE_INFO *mbmi = &x->e_mbd.mi[0]->mbmi;
x->plane[0].src = orig_src;
x->e_mbd.plane[0].pre[0] = orig_pre[0];
- if (has_second_ref(mbmi))
- x->e_mbd.plane[0].pre[1] = orig_pre[1];
+ if (has_second_ref(mbmi)) x->e_mbd.plane[0].pre[1] = orig_pre[1];
}
static INLINE int mv_has_subpel(const MV *mv) {
@@ -1540,10 +1466,11 @@
// Check if NEARESTMV/NEARMV/ZEROMV is the cheapest way encode zero motion.
// TODO(aconverse): Find out if this is still productive then clean up or remove
-static int check_best_zero_mv(
- const VP10_COMP *cpi, const uint8_t mode_context[MAX_REF_FRAMES],
- int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES], int this_mode,
- const MV_REFERENCE_FRAME ref_frames[2]) {
+static int check_best_zero_mv(const VP10_COMP *cpi,
+ const uint8_t mode_context[MAX_REF_FRAMES],
+ int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES],
+ int this_mode,
+ const MV_REFERENCE_FRAME ref_frames[2]) {
if ((this_mode == NEARMV || this_mode == NEARESTMV || this_mode == ZEROMV) &&
frame_mv[this_mode][ref_frames[0]].as_int == 0 &&
(ref_frames[1] == NONE ||
@@ -1575,10 +1502,8 @@
return 1;
}
-static void joint_motion_search(VP10_COMP *cpi, MACROBLOCK *x,
- BLOCK_SIZE bsize,
- int_mv *frame_mv,
- int mi_row, int mi_col,
+static void joint_motion_search(VP10_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
+ int_mv *frame_mv, int mi_row, int mi_col,
int_mv single_newmv[MAX_REF_FRAMES],
int *rate_mv) {
const VP10_COMMON *const cm = &cpi->common;
@@ -1586,8 +1511,8 @@
const int ph = 4 * num_4x4_blocks_high_lookup[bsize];
MACROBLOCKD *xd = &x->e_mbd;
MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
- const int refs[2] = {mbmi->ref_frame[0],
- mbmi->ref_frame[1] < 0 ? 0 : mbmi->ref_frame[1]};
+ const int refs[2] = { mbmi->ref_frame[0],
+ mbmi->ref_frame[1] < 0 ? 0 : mbmi->ref_frame[1] };
int_mv ref_mv[2];
int ite, ref;
const InterpKernel *kernel = vp10_filter_kernels[mbmi->interp_filter];
@@ -1595,13 +1520,13 @@
// Do joint motion search in compound mode to get more accurate mv.
struct buf_2d backup_yv12[2][MAX_MB_PLANE];
- int last_besterr[2] = {INT_MAX, INT_MAX};
+ int last_besterr[2] = { INT_MAX, INT_MAX };
const YV12_BUFFER_CONFIG *const scaled_ref_frame[2] = {
vp10_get_scaled_ref_frame(cpi, mbmi->ref_frame[0]),
vp10_get_scaled_ref_frame(cpi, mbmi->ref_frame[1])
};
- // Prediction buffer from second frame.
+// Prediction buffer from second frame.
#if CONFIG_VPX_HIGHBITDEPTH
DECLARE_ALIGNED(16, uint16_t, second_pred_alloc_16[64 * 64]);
uint8_t *second_pred;
@@ -1620,21 +1545,20 @@
for (i = 0; i < MAX_MB_PLANE; i++)
backup_yv12[ref][i] = xd->plane[i].pre[ref];
vp10_setup_pre_planes(xd, ref, scaled_ref_frame[ref], mi_row, mi_col,
- NULL);
+ NULL);
}
frame_mv[refs[ref]].as_int = single_newmv[refs[ref]].as_int;
}
- // Since we have scaled the reference frames to match the size of the current
- // frame we must use a unit scaling factor during mode selection.
+// Since we have scaled the reference frames to match the size of the current
+// frame we must use a unit scaling factor during mode selection.
#if CONFIG_VPX_HIGHBITDEPTH
- vp10_setup_scale_factors_for_frame(&sf, cm->width, cm->height,
- cm->width, cm->height,
- cm->use_highbitdepth);
+ vp10_setup_scale_factors_for_frame(&sf, cm->width, cm->height, cm->width,
+ cm->height, cm->use_highbitdepth);
#else
- vp10_setup_scale_factors_for_frame(&sf, cm->width, cm->height,
- cm->width, cm->height);
+ vp10_setup_scale_factors_for_frame(&sf, cm->width, cm->height, cm->width,
+ cm->height);
#endif // CONFIG_VPX_HIGHBITDEPTH
// Allow joint search multiple times iteratively for each reference frame
@@ -1658,41 +1582,30 @@
ref_yv12[0] = xd->plane[0].pre[0];
ref_yv12[1] = xd->plane[0].pre[1];
- // Get the prediction block from the 'other' reference frame.
+// Get the prediction block from the 'other' reference frame.
#if CONFIG_VPX_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
second_pred = CONVERT_TO_BYTEPTR(second_pred_alloc_16);
- vp10_highbd_build_inter_predictor(ref_yv12[!id].buf,
- ref_yv12[!id].stride,
- second_pred, pw,
- &frame_mv[refs[!id]].as_mv,
- &sf, pw, ph, 0,
- kernel, MV_PRECISION_Q3,
- mi_col * MI_SIZE, mi_row * MI_SIZE,
- xd->bd);
+ vp10_highbd_build_inter_predictor(
+ ref_yv12[!id].buf, ref_yv12[!id].stride, second_pred, pw,
+ &frame_mv[refs[!id]].as_mv, &sf, pw, ph, 0, kernel, MV_PRECISION_Q3,
+ mi_col * MI_SIZE, mi_row * MI_SIZE, xd->bd);
} else {
second_pred = (uint8_t *)second_pred_alloc_16;
- vp10_build_inter_predictor(ref_yv12[!id].buf,
- ref_yv12[!id].stride,
- second_pred, pw,
- &frame_mv[refs[!id]].as_mv,
- &sf, pw, ph, 0,
- kernel, MV_PRECISION_Q3,
- mi_col * MI_SIZE, mi_row * MI_SIZE);
+ vp10_build_inter_predictor(ref_yv12[!id].buf, ref_yv12[!id].stride,
+ second_pred, pw, &frame_mv[refs[!id]].as_mv,
+ &sf, pw, ph, 0, kernel, MV_PRECISION_Q3,
+ mi_col * MI_SIZE, mi_row * MI_SIZE);
}
#else
- vp10_build_inter_predictor(ref_yv12[!id].buf,
- ref_yv12[!id].stride,
- second_pred, pw,
- &frame_mv[refs[!id]].as_mv,
- &sf, pw, ph, 0,
- kernel, MV_PRECISION_Q3,
- mi_col * MI_SIZE, mi_row * MI_SIZE);
+ vp10_build_inter_predictor(ref_yv12[!id].buf, ref_yv12[!id].stride,
+ second_pred, pw, &frame_mv[refs[!id]].as_mv, &sf,
+ pw, ph, 0, kernel, MV_PRECISION_Q3,
+ mi_col * MI_SIZE, mi_row * MI_SIZE);
#endif // CONFIG_VPX_HIGHBITDEPTH
// Do compound motion search on the current reference frame.
- if (id)
- xd->plane[0].pre[0] = ref_yv12[id];
+ if (id) xd->plane[0].pre[0] = ref_yv12[id];
vp10_set_mv_search_range(x, &ref_mv[id].as_mv);
// Use the mv result from the single mode as mv predictor.
@@ -1702,13 +1615,12 @@
tmp_mv.row >>= 3;
// Small-range full-pixel motion search.
- bestsme = vp10_refining_search_8p_c(x, &tmp_mv, sadpb,
- search_range,
- &cpi->fn_ptr[bsize],
- &ref_mv[id].as_mv, second_pred);
+ bestsme = vp10_refining_search_8p_c(x, &tmp_mv, sadpb, search_range,
+ &cpi->fn_ptr[bsize], &ref_mv[id].as_mv,
+ second_pred);
if (bestsme < INT_MAX)
bestsme = vp10_get_mvpred_av_var(x, &tmp_mv, &ref_mv[id].as_mv,
- second_pred, &cpi->fn_ptr[bsize], 1);
+ second_pred, &cpi->fn_ptr[bsize], 1);
x->mv_col_min = tmp_col_min;
x->mv_col_max = tmp_col_max;
@@ -1719,21 +1631,14 @@
int dis; /* TODO: use dis in distortion calculation later. */
unsigned int sse;
bestsme = cpi->find_fractional_mv_step(
- x, &tmp_mv,
- &ref_mv[id].as_mv,
- cpi->common.allow_high_precision_mv,
- x->errorperbit,
- &cpi->fn_ptr[bsize],
- 0, cpi->sf.mv.subpel_iters_per_step,
- NULL,
- x->nmvjointcost, x->mvcost,
- &dis, &sse, second_pred,
- pw, ph);
+ x, &tmp_mv, &ref_mv[id].as_mv, cpi->common.allow_high_precision_mv,
+ x->errorperbit, &cpi->fn_ptr[bsize], 0,
+ cpi->sf.mv.subpel_iters_per_step, NULL, x->nmvjointcost, x->mvcost,
+ &dis, &sse, second_pred, pw, ph);
}
// Restore the pointer to the first (possibly scaled) prediction buffer.
- if (id)
- xd->plane[0].pre[0] = ref_yv12[0];
+ if (id) xd->plane[0].pre[0] = ref_yv12[0];
if (bestsme < last_besterr[id]) {
frame_mv[refs[id]].as_mv = tmp_mv;
@@ -1754,22 +1659,17 @@
}
*rate_mv += vp10_mv_bit_cost(&frame_mv[refs[ref]].as_mv,
- &x->mbmi_ext->ref_mvs[refs[ref]][0].as_mv,
- x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
+ &x->mbmi_ext->ref_mvs[refs[ref]][0].as_mv,
+ x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
}
}
-static int64_t rd_pick_best_sub8x8_mode(VP10_COMP *cpi, MACROBLOCK *x,
- int_mv *best_ref_mv,
- int_mv *second_best_ref_mv,
- int64_t best_rd, int *returntotrate,
- int *returnyrate,
- int64_t *returndistortion,
- int *skippable, int64_t *psse,
- int mvthresh,
- int_mv seg_mvs[4][MAX_REF_FRAMES],
- BEST_SEG_INFO *bsi_buf, int filter_idx,
- int mi_row, int mi_col) {
+static int64_t rd_pick_best_sub8x8_mode(
+ VP10_COMP *cpi, MACROBLOCK *x, int_mv *best_ref_mv,
+ int_mv *second_best_ref_mv, int64_t best_rd, int *returntotrate,
+ int *returnyrate, int64_t *returndistortion, int *skippable, int64_t *psse,
+ int mvthresh, int_mv seg_mvs[4][MAX_REF_FRAMES], BEST_SEG_INFO *bsi_buf,
+ int filter_idx, int mi_row, int mi_col) {
int i;
BEST_SEG_INFO *bsi = bsi_buf + filter_idx;
MACROBLOCKD *xd = &x->e_mbd;
@@ -1803,8 +1703,7 @@
bsi->mvp.as_int = best_ref_mv->as_int;
bsi->mvthresh = mvthresh;
- for (i = 0; i < 4; i++)
- bsi->modes[i] = ZEROMV;
+ for (i = 0; i < 4; i++) bsi->modes[i] = ZEROMV;
memcpy(t_above, pd->above_context, sizeof(t_above));
memcpy(t_left, pd->left_context, sizeof(t_left));
@@ -1830,10 +1729,9 @@
for (ref = 0; ref < 1 + has_second_rf; ++ref) {
const MV_REFERENCE_FRAME frame = mbmi->ref_frame[ref];
frame_mv[ZEROMV][frame].as_int = 0;
- vp10_append_sub8x8_mvs_for_idx(cm, xd, i, ref, mi_row, mi_col,
- &frame_mv[NEARESTMV][frame],
- &frame_mv[NEARMV][frame],
- mbmi_ext->mode_context);
+ vp10_append_sub8x8_mvs_for_idx(
+ cm, xd, i, ref, mi_row, mi_col, &frame_mv[NEARESTMV][frame],
+ &frame_mv[NEARMV][frame], mbmi_ext->mode_context);
}
// search for the best motion vector on this segment
@@ -1843,8 +1741,7 @@
mode_idx = INTER_OFFSET(this_mode);
bsi->rdstat[i][mode_idx].brdcost = INT64_MAX;
- if (!(inter_mode_mask & (1 << this_mode)))
- continue;
+ if (!(inter_mode_mask & (1 << this_mode))) continue;
if (!check_best_zero_mv(cpi, mbmi_ext->mode_context, frame_mv,
this_mode, mbmi->ref_frame))
@@ -1869,15 +1766,13 @@
/* Is the best so far sufficiently good that we cant justify doing
* and new motion search. */
- if (best_rd < label_mv_thresh)
- break;
+ if (best_rd < label_mv_thresh) break;
if (cpi->oxcf.mode != BEST) {
// use previous block's result as next block's MV predictor.
if (i > 0) {
bsi->mvp.as_int = mi->bmi[i - 1].as_mv[0].as_int;
- if (i == 2)
- bsi->mvp.as_int = mi->bmi[i - 2].as_mv[0].as_int;
+ if (i == 2) bsi->mvp.as_int = mi->bmi[i - 2].as_mv[0].as_int;
}
}
if (i == 0)
@@ -1890,8 +1785,8 @@
// Take wtd average of the step_params based on the last frame's
// max mv magnitude and the best ref mvs of the current block for
// the given reference.
- step_param = (vp10_init_search_range(max_mv) +
- cpi->mv_step_param) / 2;
+ step_param =
+ (vp10_init_search_range(max_mv) + cpi->mv_step_param) / 2;
} else {
step_param = cpi->mv_step_param;
}
@@ -1913,24 +1808,16 @@
bestsme = vp10_full_pixel_search(
cpi, x, bsize, &mvp_full, step_param, sadpb,
cpi->sf.mv.subpel_search_method != SUBPEL_TREE ? cost_list : NULL,
- &bsi->ref_mv[0]->as_mv, new_mv,
- INT_MAX, 1);
+ &bsi->ref_mv[0]->as_mv, new_mv, INT_MAX, 1);
if (bestsme < INT_MAX) {
int distortion;
cpi->find_fractional_mv_step(
- x,
- new_mv,
- &bsi->ref_mv[0]->as_mv,
- cm->allow_high_precision_mv,
+ x, new_mv, &bsi->ref_mv[0]->as_mv, cm->allow_high_precision_mv,
x->errorperbit, &cpi->fn_ptr[bsize],
- cpi->sf.mv.subpel_force_stop,
- cpi->sf.mv.subpel_iters_per_step,
- cond_cost_list(cpi, cost_list),
- x->nmvjointcost, x->mvcost,
- &distortion,
- &x->pred_sse[mbmi->ref_frame[0]],
- NULL, 0, 0);
+ cpi->sf.mv.subpel_force_stop, cpi->sf.mv.subpel_iters_per_step,
+ cond_cost_list(cpi, cost_list), x->nmvjointcost, x->mvcost,
+ &distortion, &x->pred_sse[mbmi->ref_frame[0]], NULL, 0, 0);
// save motion search result for use in compound prediction
seg_mvs[i][mbmi->ref_frame[0]].as_mv = *new_mv;
@@ -1955,9 +1842,8 @@
mi_buf_shift(x, i);
if (cpi->sf.comp_inter_joint_search_thresh <= bsize) {
int rate_mv;
- joint_motion_search(cpi, x, bsize, frame_mv[this_mode],
- mi_row, mi_col, seg_mvs[i],
- &rate_mv);
+ joint_motion_search(cpi, x, bsize, frame_mv[this_mode], mi_row,
+ mi_col, seg_mvs[i], &rate_mv);
seg_mvs[i][mbmi->ref_frame[0]].as_int =
frame_mv[this_mode][mbmi->ref_frame[0]].as_int;
seg_mvs[i][mbmi->ref_frame[1]].as_int =
@@ -1967,10 +1853,9 @@
mi_buf_restore(x, orig_src, orig_pre);
}
- bsi->rdstat[i][mode_idx].brate =
- set_and_cost_bmi_mvs(cpi, x, xd, i, this_mode, mode_mv[this_mode],
- frame_mv, seg_mvs[i], bsi->ref_mv,
- x->nmvjointcost, x->mvcost);
+ bsi->rdstat[i][mode_idx].brate = set_and_cost_bmi_mvs(
+ cpi, x, xd, i, this_mode, mode_mv[this_mode], frame_mv, seg_mvs[i],
+ bsi->ref_mv, x->nmvjointcost, x->mvcost);
for (ref = 0; ref < 1 + has_second_rf; ++ref) {
bsi->rdstat[i][mode_idx].mvs[ref].as_int =
@@ -1985,8 +1870,7 @@
// Trap vectors that reach beyond the UMV borders
if (mv_check_bounds(x, &mode_mv[this_mode][0].as_mv) ||
- (has_second_rf &&
- mv_check_bounds(x, &mode_mv[this_mode][1].as_mv)))
+ (has_second_rf && mv_check_bounds(x, &mode_mv[this_mode][1].as_mv)))
continue;
if (filter_idx > 0) {
@@ -1997,7 +1881,7 @@
for (ref = 0; ref < 1 + has_second_rf; ++ref) {
subpelmv |= mv_has_subpel(&mode_mv[this_mode][ref].as_mv);
have_ref &= mode_mv[this_mode][ref].as_int ==
- ref_bsi->rdstat[i][mode_idx].mvs[ref].as_int;
+ ref_bsi->rdstat[i][mode_idx].mvs[ref].as_int;
}
if (filter_idx > 1 && !subpelmv && !have_ref) {
@@ -2005,7 +1889,7 @@
have_ref = 1;
for (ref = 0; ref < 1 + has_second_rf; ++ref)
have_ref &= mode_mv[this_mode][ref].as_int ==
- ref_bsi->rdstat[i][mode_idx].mvs[ref].as_int;
+ ref_bsi->rdstat[i][mode_idx].mvs[ref].as_int;
}
if (!subpelmv && have_ref &&
@@ -2027,19 +1911,14 @@
}
}
- bsi->rdstat[i][mode_idx].brdcost =
- encode_inter_mb_segment(cpi, x,
- bsi->segment_rd - this_segment_rd, i,
- &bsi->rdstat[i][mode_idx].byrate,
- &bsi->rdstat[i][mode_idx].bdist,
- &bsi->rdstat[i][mode_idx].bsse,
- bsi->rdstat[i][mode_idx].ta,
- bsi->rdstat[i][mode_idx].tl,
- idy, idx,
- mi_row, mi_col);
+ bsi->rdstat[i][mode_idx].brdcost = encode_inter_mb_segment(
+ cpi, x, bsi->segment_rd - this_segment_rd, i,
+ &bsi->rdstat[i][mode_idx].byrate, &bsi->rdstat[i][mode_idx].bdist,
+ &bsi->rdstat[i][mode_idx].bsse, bsi->rdstat[i][mode_idx].ta,
+ bsi->rdstat[i][mode_idx].tl, idy, idx, mi_row, mi_col);
if (bsi->rdstat[i][mode_idx].brdcost < INT64_MAX) {
- bsi->rdstat[i][mode_idx].brdcost += RDCOST(x->rdmult, x->rddiv,
- bsi->rdstat[i][mode_idx].brate, 0);
+ bsi->rdstat[i][mode_idx].brdcost +=
+ RDCOST(x->rdmult, x->rddiv, bsi->rdstat[i][mode_idx].brate, 0);
bsi->rdstat[i][mode_idx].brate += bsi->rdstat[i][mode_idx].byrate;
bsi->rdstat[i][mode_idx].eobs = p->eobs[i];
if (num_4x4_blocks_wide > 1)
@@ -2095,11 +1974,9 @@
bsi->sse = block_sse;
// update the coding decisions
- for (k = 0; k < 4; ++k)
- bsi->modes[k] = mi->bmi[k].as_mode;
+ for (k = 0; k < 4; ++k) bsi->modes[k] = mi->bmi[k].as_mode;
- if (bsi->segment_rd > best_rd)
- return INT64_MAX;
+ if (bsi->segment_rd > best_rd) return INT64_MAX;
/* set it to the best */
for (i = 0; i < 4; i++) {
mode_idx = INTER_OFFSET(bsi->modes[i]);
@@ -2124,16 +2001,15 @@
}
static void estimate_ref_frame_costs(const VP10_COMMON *cm,
- const MACROBLOCKD *xd,
- int segment_id,
+ const MACROBLOCKD *xd, int segment_id,
unsigned int *ref_costs_single,
unsigned int *ref_costs_comp,
vpx_prob *comp_mode_p) {
- int seg_ref_active = segfeature_active(&cm->seg, segment_id,
- SEG_LVL_REF_FRAME);
+ int seg_ref_active =
+ segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME);
if (seg_ref_active) {
memset(ref_costs_single, 0, MAX_REF_FRAMES * sizeof(*ref_costs_single));
- memset(ref_costs_comp, 0, MAX_REF_FRAMES * sizeof(*ref_costs_comp));
+ memset(ref_costs_comp, 0, MAX_REF_FRAMES * sizeof(*ref_costs_comp));
*comp_mode_p = 128;
} else {
vpx_prob intra_inter_p = vp10_get_intra_inter_prob(cm, xd);
@@ -2158,13 +2034,13 @@
ref_costs_single[LAST_FRAME] = ref_costs_single[GOLDEN_FRAME] =
ref_costs_single[ALTREF_FRAME] = base_cost;
- ref_costs_single[LAST_FRAME] += vp10_cost_bit(ref_single_p1, 0);
+ ref_costs_single[LAST_FRAME] += vp10_cost_bit(ref_single_p1, 0);
ref_costs_single[GOLDEN_FRAME] += vp10_cost_bit(ref_single_p1, 1);
ref_costs_single[ALTREF_FRAME] += vp10_cost_bit(ref_single_p1, 1);
ref_costs_single[GOLDEN_FRAME] += vp10_cost_bit(ref_single_p2, 0);
ref_costs_single[ALTREF_FRAME] += vp10_cost_bit(ref_single_p2, 1);
} else {
- ref_costs_single[LAST_FRAME] = 512;
+ ref_costs_single[LAST_FRAME] = 512;
ref_costs_single[GOLDEN_FRAME] = 512;
ref_costs_single[ALTREF_FRAME] = 512;
}
@@ -2175,20 +2051,19 @@
if (cm->reference_mode == REFERENCE_MODE_SELECT)
base_cost += vp10_cost_bit(comp_inter_p, 1);
- ref_costs_comp[LAST_FRAME] = base_cost + vp10_cost_bit(ref_comp_p, 0);
+ ref_costs_comp[LAST_FRAME] = base_cost + vp10_cost_bit(ref_comp_p, 0);
ref_costs_comp[GOLDEN_FRAME] = base_cost + vp10_cost_bit(ref_comp_p, 1);
} else {
- ref_costs_comp[LAST_FRAME] = 512;
+ ref_costs_comp[LAST_FRAME] = 512;
ref_costs_comp[GOLDEN_FRAME] = 512;
}
}
}
-static void store_coding_context(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx,
- int mode_index,
- int64_t comp_pred_diff[REFERENCE_MODES],
- int64_t best_filter_diff[SWITCHABLE_FILTER_CONTEXTS],
- int skippable) {
+static void store_coding_context(
+ MACROBLOCK *x, PICK_MODE_CONTEXT *ctx, int mode_index,
+ int64_t comp_pred_diff[REFERENCE_MODES],
+ int64_t best_filter_diff[SWITCHABLE_FILTER_CONTEXTS], int skippable) {
MACROBLOCKD *const xd = &x->e_mbd;
// Take a snapshot of the coding context so it can be
@@ -2199,7 +2074,7 @@
ctx->mic = *xd->mi[0];
ctx->mbmi_ext = *x->mbmi_ext;
ctx->single_pred_diff = (int)comp_pred_diff[SINGLE_REFERENCE];
- ctx->comp_pred_diff = (int)comp_pred_diff[COMPOUND_REFERENCE];
+ ctx->comp_pred_diff = (int)comp_pred_diff[COMPOUND_REFERENCE];
ctx->hybrid_pred_diff = (int)comp_pred_diff[REFERENCE_MODE_SELECT];
memcpy(ctx->best_filter_diff, best_filter_diff,
@@ -2208,8 +2083,7 @@
static void setup_buffer_inter(VP10_COMP *cpi, MACROBLOCK *x,
MV_REFERENCE_FRAME ref_frame,
- BLOCK_SIZE block_size,
- int mi_row, int mi_col,
+ BLOCK_SIZE block_size, int mi_row, int mi_col,
int_mv frame_nearest_mv[MAX_REF_FRAMES],
int_mv frame_near_mv[MAX_REF_FRAMES],
struct buf_2d yv12_mb[4][MAX_MB_PLANE]) {
@@ -2228,8 +2102,8 @@
vp10_setup_pred_block(xd, yv12_mb[ref_frame], yv12, mi_row, mi_col, sf, sf);
// Gets an initial list of candidate vectors from neighbours and orders them
- vp10_find_mv_refs(cm, xd, mi, ref_frame, candidates, mi_row, mi_col,
- NULL, NULL, mbmi_ext->mode_context);
+ vp10_find_mv_refs(cm, xd, mi, ref_frame, candidates, mi_row, mi_col, NULL,
+ NULL, mbmi_ext->mode_context);
// Candidate refinement carried out at encoder and decoder
vp10_find_best_ref_mvs(cm->allow_high_precision_mv, candidates,
@@ -2240,18 +2114,17 @@
// in full and choose the best as the centre point for subsequent searches.
// The current implementation doesn't support scaling.
if (!vp10_is_scaled(sf) && block_size >= BLOCK_8X8)
- vp10_mv_pred(cpi, x, yv12_mb[ref_frame][0].buf, yv12->y_stride,
- ref_frame, block_size);
+ vp10_mv_pred(cpi, x, yv12_mb[ref_frame][0].buf, yv12->y_stride, ref_frame,
+ block_size);
}
static void single_motion_search(VP10_COMP *cpi, MACROBLOCK *x,
- BLOCK_SIZE bsize,
- int mi_row, int mi_col,
+ BLOCK_SIZE bsize, int mi_row, int mi_col,
int_mv *tmp_mv, int *rate_mv) {
MACROBLOCKD *xd = &x->e_mbd;
const VP10_COMMON *cm = &cpi->common;
MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
- struct buf_2d backup_yv12[MAX_MB_PLANE] = {{0, 0}};
+ struct buf_2d backup_yv12[MAX_MB_PLANE] = { { 0, 0 } };
int bestsme = INT_MAX;
int step_param;
int sadpb = x->sadperbit16;
@@ -2265,8 +2138,8 @@
int tmp_row_max = x->mv_row_max;
int cost_list[5];
- const YV12_BUFFER_CONFIG *scaled_ref_frame = vp10_get_scaled_ref_frame(cpi,
- ref);
+ const YV12_BUFFER_CONFIG *scaled_ref_frame =
+ vp10_get_scaled_ref_frame(cpi, ref);
MV pred_mv[3];
pred_mv[0] = x->mbmi_ext->ref_mvs[ref][0].as_mv;
@@ -2278,8 +2151,7 @@
// Swap out the reference frame for a version that's been scaled to
// match the resolution of the current frame, allowing the existing
// motion search code to be used without additional modifications.
- for (i = 0; i < MAX_MB_PLANE; i++)
- backup_yv12[i] = xd->plane[i].pre[0];
+ for (i = 0; i < MAX_MB_PLANE; i++) backup_yv12[i] = xd->plane[i].pre[0];
vp10_setup_pre_planes(xd, 0, scaled_ref_frame, mi_row, mi_col, NULL);
}
@@ -2292,8 +2164,9 @@
// Take wtd average of the step_params based on the last frame's
// max mv magnitude and that based on the best ref mvs of the current
// block for the given reference.
- step_param = (vp10_init_search_range(x->max_mv_context[ref]) +
- cpi->mv_step_param) / 2;
+ step_param =
+ (vp10_init_search_range(x->max_mv_context[ref]) + cpi->mv_step_param) /
+ 2;
} else {
step_param = cpi->mv_step_param;
}
@@ -2310,8 +2183,7 @@
int bhl = b_height_log2_lookup[bsize];
int tlevel = x->pred_mv_sad[ref] >> (bwl + bhl + 4);
- if (tlevel < 5)
- step_param += 2;
+ if (tlevel < 5) step_param += 2;
// prev_mv_sad is not setup for dynamically scaled frames.
if (cpi->oxcf.resize_mode != RESIZE_DYNAMIC) {
@@ -2339,8 +2211,8 @@
mvp_full.row >>= 3;
bestsme = vp10_full_pixel_search(cpi, x, bsize, &mvp_full, step_param, sadpb,
- cond_cost_list(cpi, cost_list),
- &ref_mv, &tmp_mv->as_mv, INT_MAX, 1);
+ cond_cost_list(cpi, cost_list), &ref_mv,
+ &tmp_mv->as_mv, INT_MAX, 1);
x->mv_col_min = tmp_col_min;
x->mv_col_max = tmp_col_max;
@@ -2348,32 +2220,24 @@
x->mv_row_max = tmp_row_max;
if (bestsme < INT_MAX) {
- int dis; /* TODO: use dis in distortion calculation later. */
- cpi->find_fractional_mv_step(x, &tmp_mv->as_mv, &ref_mv,
- cm->allow_high_precision_mv,
- x->errorperbit,
- &cpi->fn_ptr[bsize],
- cpi->sf.mv.subpel_force_stop,
- cpi->sf.mv.subpel_iters_per_step,
- cond_cost_list(cpi, cost_list),
- x->nmvjointcost, x->mvcost,
- &dis, &x->pred_sse[ref], NULL, 0, 0);
+ int dis; /* TODO: use dis in distortion calculation later. */
+ cpi->find_fractional_mv_step(
+ x, &tmp_mv->as_mv, &ref_mv, cm->allow_high_precision_mv, x->errorperbit,
+ &cpi->fn_ptr[bsize], cpi->sf.mv.subpel_force_stop,
+ cpi->sf.mv.subpel_iters_per_step, cond_cost_list(cpi, cost_list),
+ x->nmvjointcost, x->mvcost, &dis, &x->pred_sse[ref], NULL, 0, 0);
}
- *rate_mv = vp10_mv_bit_cost(&tmp_mv->as_mv, &ref_mv,
- x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
+ *rate_mv = vp10_mv_bit_cost(&tmp_mv->as_mv, &ref_mv, x->nmvjointcost,
+ x->mvcost, MV_COST_WEIGHT);
- if (cpi->sf.adaptive_motion_search)
- x->pred_mv[ref] = tmp_mv->as_mv;
+ if (cpi->sf.adaptive_motion_search) x->pred_mv[ref] = tmp_mv->as_mv;
if (scaled_ref_frame) {
int i;
- for (i = 0; i < MAX_MB_PLANE; i++)
- xd->plane[i].pre[0] = backup_yv12[i];
+ for (i = 0; i < MAX_MB_PLANE; i++) xd->plane[i].pre[0] = backup_yv12[i];
}
}
-
-
static INLINE void restore_dst_buf(MACROBLOCKD *xd,
uint8_t *orig_dst[MAX_MB_PLANE],
int orig_dst_stride[MAX_MB_PLANE]) {
@@ -2391,13 +2255,11 @@
// However, once established that vector may be usable through the nearest and
// near mv modes to reduce distortion in subsequent blocks and also improve
// visual quality.
-static int discount_newmv_test(const VP10_COMP *cpi,
- int this_mode,
+static int discount_newmv_test(const VP10_COMP *cpi, int this_mode,
int_mv this_mv,
int_mv (*mode_mv)[MAX_REF_FRAMES],
int ref_frame) {
- return (!cpi->rc.is_src_frame_alt_ref &&
- (this_mode == NEWMV) &&
+ return (!cpi->rc.is_src_frame_alt_ref && (this_mode == NEWMV) &&
(this_mv.as_int != 0) &&
((mode_mv[NEARESTMV][ref_frame].as_int == 0) ||
(mode_mv[NEARESTMV][ref_frame].as_int == INVALID_MV)) &&
@@ -2406,32 +2268,25 @@
}
#define LEFT_TOP_MARGIN ((VPX_ENC_BORDER_IN_PIXELS - VPX_INTERP_EXTEND) << 3)
-#define RIGHT_BOTTOM_MARGIN ((VPX_ENC_BORDER_IN_PIXELS -\
- VPX_INTERP_EXTEND) << 3)
+#define RIGHT_BOTTOM_MARGIN \
+ ((VPX_ENC_BORDER_IN_PIXELS - VPX_INTERP_EXTEND) << 3)
// TODO(jingning): this mv clamping function should be block size dependent.
static INLINE void clamp_mv2(MV *mv, const MACROBLOCKD *xd) {
clamp_mv(mv, xd->mb_to_left_edge - LEFT_TOP_MARGIN,
- xd->mb_to_right_edge + RIGHT_BOTTOM_MARGIN,
- xd->mb_to_top_edge - LEFT_TOP_MARGIN,
- xd->mb_to_bottom_edge + RIGHT_BOTTOM_MARGIN);
+ xd->mb_to_right_edge + RIGHT_BOTTOM_MARGIN,
+ xd->mb_to_top_edge - LEFT_TOP_MARGIN,
+ xd->mb_to_bottom_edge + RIGHT_BOTTOM_MARGIN);
}
-static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
- BLOCK_SIZE bsize,
- int *rate2, int64_t *distortion,
- int *skippable,
- int *rate_y, int *rate_uv,
- int *disable_skip,
- int_mv (*mode_mv)[MAX_REF_FRAMES],
- int mi_row, int mi_col,
- int_mv single_newmv[MAX_REF_FRAMES],
- INTERP_FILTER (*single_filter)[MAX_REF_FRAMES],
- int (*single_skippable)[MAX_REF_FRAMES],
- int64_t *psse,
- const int64_t ref_best_rd,
- int64_t *mask_filter,
- int64_t filter_cache[]) {
+static int64_t handle_inter_mode(
+ VP10_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize, int *rate2,
+ int64_t *distortion, int *skippable, int *rate_y, int *rate_uv,
+ int *disable_skip, int_mv (*mode_mv)[MAX_REF_FRAMES], int mi_row,
+ int mi_col, int_mv single_newmv[MAX_REF_FRAMES],
+ INTERP_FILTER (*single_filter)[MAX_REF_FRAMES],
+ int (*single_skippable)[MAX_REF_FRAMES], int64_t *psse,
+ const int64_t ref_best_rd, int64_t *mask_filter, int64_t filter_cache[]) {
VP10_COMMON *cm = &cpi->common;
MACROBLOCKD *xd = &x->e_mbd;
MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
@@ -2441,7 +2296,7 @@
int_mv *frame_mv = mode_mv[this_mode];
int i;
int refs[2] = { mbmi->ref_frame[0],
- (mbmi->ref_frame[1] < 0 ? 0 : mbmi->ref_frame[1]) };
+ (mbmi->ref_frame[1] < 0 ? 0 : mbmi->ref_frame[1]) };
int_mv cur_mv[2];
#if CONFIG_VPX_HIGHBITDEPTH
DECLARE_ALIGNED(16, uint16_t, tmp_buf16[MAX_MB_PLANE * 64 * 64]);
@@ -2457,13 +2312,16 @@
int orig_dst_stride[MAX_MB_PLANE];
int rs = 0;
INTERP_FILTER best_filter = SWITCHABLE;
- uint8_t skip_txfm[MAX_MB_PLANE << 2] = {0};
- int64_t bsse[MAX_MB_PLANE << 2] = {0};
+ uint8_t skip_txfm[MAX_MB_PLANE << 2] = { 0 };
+ int64_t bsse[MAX_MB_PLANE << 2] = { 0 };
int bsl = mi_width_log2_lookup[bsize];
- int pred_filter_search = cpi->sf.cb_pred_filter_search ?
- (((mi_row + mi_col) >> bsl) +
- get_chessboard_index(cm->current_video_frame)) & 0x1 : 0;
+ int pred_filter_search =
+ cpi->sf.cb_pred_filter_search
+ ? (((mi_row + mi_col) >> bsl) +
+ get_chessboard_index(cm->current_video_frame)) &
+ 0x1
+ : 0;
int skip_txfm_sb = 0;
int64_t skip_sse_sb = INT64_MAX;
@@ -2479,13 +2337,10 @@
if (pred_filter_search) {
INTERP_FILTER af = SWITCHABLE, lf = SWITCHABLE;
- if (xd->up_available)
- af = xd->mi[-xd->mi_stride]->mbmi.interp_filter;
- if (xd->left_available)
- lf = xd->mi[-1]->mbmi.interp_filter;
+ if (xd->up_available) af = xd->mi[-xd->mi_stride]->mbmi.interp_filter;
+ if (xd->left_available) lf = xd->mi[-1]->mbmi.interp_filter;
- if ((this_mode != NEWMV) || (af == lf))
- best_filter = af;
+ if ((this_mode != NEWMV) || (af == lf)) best_filter = af;
}
if (is_comp_pred) {
@@ -2508,26 +2363,24 @@
frame_mv[refs[1]].as_int = single_newmv[refs[1]].as_int;
if (cpi->sf.comp_inter_joint_search_thresh <= bsize) {
- joint_motion_search(cpi, x, bsize, frame_mv,
- mi_row, mi_col, single_newmv, &rate_mv);
+ joint_motion_search(cpi, x, bsize, frame_mv, mi_row, mi_col,
+ single_newmv, &rate_mv);
} else {
- rate_mv = vp10_mv_bit_cost(&frame_mv[refs[0]].as_mv,
+ rate_mv = vp10_mv_bit_cost(&frame_mv[refs[0]].as_mv,
&x->mbmi_ext->ref_mvs[refs[0]][0].as_mv,
x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
rate_mv += vp10_mv_bit_cost(&frame_mv[refs[1]].as_mv,
- &x->mbmi_ext->ref_mvs[refs[1]][0].as_mv,
- x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
+ &x->mbmi_ext->ref_mvs[refs[1]][0].as_mv,
+ x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
}
*rate2 += rate_mv;
} else {
int_mv tmp_mv;
- single_motion_search(cpi, x, bsize, mi_row, mi_col,
- &tmp_mv, &rate_mv);
- if (tmp_mv.as_int == INVALID_MV)
- return INT64_MAX;
+ single_motion_search(cpi, x, bsize, mi_row, mi_col, &tmp_mv, &rate_mv);
+ if (tmp_mv.as_int == INVALID_MV) return INT64_MAX;
- frame_mv[refs[0]].as_int =
- xd->mi[0]->bmi[0].as_mv[0].as_int = tmp_mv.as_int;
+ frame_mv[refs[0]].as_int = xd->mi[0]->bmi[0].as_mv[0].as_int =
+ tmp_mv.as_int;
single_newmv[refs[0]].as_int = tmp_mv.as_int;
// Estimate the rate implications of a new mv but discount this
@@ -2545,11 +2398,9 @@
for (i = 0; i < is_comp_pred + 1; ++i) {
cur_mv[i] = frame_mv[refs[i]];
// Clip "next_nearest" so that it does not extend to far out of image
- if (this_mode != NEWMV)
- clamp_mv2(&cur_mv[i].as_mv, xd);
+ if (this_mode != NEWMV) clamp_mv2(&cur_mv[i].as_mv, xd);
- if (mv_check_bounds(x, &cur_mv[i].as_mv))
- return INT64_MAX;
+ if (mv_check_bounds(x, &cur_mv[i].as_mv)) return INT64_MAX;
mbmi->mv[i].as_int = cur_mv[i].as_int;
}
@@ -2570,12 +2421,11 @@
//
// Under some circumstances we discount the cost of new mv mode to encourage
// initiation of a motion field.
- if (discount_newmv_test(cpi, this_mode, frame_mv[refs[0]],
- mode_mv, refs[0])) {
- *rate2 += VPXMIN(cost_mv_ref(cpi, this_mode,
- mbmi_ext->mode_context[refs[0]]),
- cost_mv_ref(cpi, NEARESTMV,
- mbmi_ext->mode_context[refs[0]]));
+ if (discount_newmv_test(cpi, this_mode, frame_mv[refs[0]], mode_mv,
+ refs[0])) {
+ *rate2 +=
+ VPXMIN(cost_mv_ref(cpi, this_mode, mbmi_ext->mode_context[refs[0]]),
+ cost_mv_ref(cpi, NEARESTMV, mbmi_ext->mode_context[refs[0]]));
} else {
*rate2 += cost_mv_ref(cpi, this_mode, mbmi_ext->mode_context[refs[0]]);
}
@@ -2587,13 +2437,11 @@
pred_exists = 0;
// Are all MVs integer pel for Y and UV
intpel_mv = !mv_has_subpel(&mbmi->mv[0].as_mv);
- if (is_comp_pred)
- intpel_mv &= !mv_has_subpel(&mbmi->mv[1].as_mv);
+ if (is_comp_pred) intpel_mv &= !mv_has_subpel(&mbmi->mv[1].as_mv);
// Search for best switchable filter by checking the variance of
// pred error irrespective of whether the filter will be used
- for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
- filter_cache[i] = INT64_MAX;
+ for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i) filter_cache[i] = INT64_MAX;
if (cm->interp_filter != BILINEAR) {
if (x->source_variance < cpi->sf.disable_filter_search_var_thresh) {
@@ -2618,8 +2466,7 @@
filter_cache[i] = rd;
filter_cache[SWITCHABLE_FILTERS] =
VPXMIN(filter_cache[SWITCHABLE_FILTERS], rd + rs_rd);
- if (cm->interp_filter == SWITCHABLE)
- rd += rs_rd;
+ if (cm->interp_filter == SWITCHABLE) rd += rs_rd;
*mask_filter = VPXMAX(*mask_filter, rd);
} else {
int rate_sum = 0;
@@ -2631,8 +2478,7 @@
continue;
}
- if ((cm->interp_filter == SWITCHABLE &&
- (!i || best_needs_copy)) ||
+ if ((cm->interp_filter == SWITCHABLE && (!i || best_needs_copy)) ||
(cm->interp_filter != SWITCHABLE &&
(cm->interp_filter == mbmi->interp_filter ||
(i == 0 && intpel_mv)))) {
@@ -2644,15 +2490,14 @@
}
}
vp10_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
- model_rd_for_sb(cpi, bsize, x, xd, &rate_sum, &dist_sum,
- &tmp_skip_sb, &tmp_skip_sse);
+ model_rd_for_sb(cpi, bsize, x, xd, &rate_sum, &dist_sum, &tmp_skip_sb,
+ &tmp_skip_sse);
rd = RDCOST(x->rdmult, x->rddiv, rate_sum, dist_sum);
filter_cache[i] = rd;
filter_cache[SWITCHABLE_FILTERS] =
VPXMIN(filter_cache[SWITCHABLE_FILTERS], rd + rs_rd);
- if (cm->interp_filter == SWITCHABLE)
- rd += rs_rd;
+ if (cm->interp_filter == SWITCHABLE) rd += rs_rd;
*mask_filter = VPXMAX(*mask_filter, rd);
if (i == 0 && intpel_mv) {
@@ -2692,8 +2537,8 @@
}
}
// Set the appropriate filter
- mbmi->interp_filter = cm->interp_filter != SWITCHABLE ?
- cm->interp_filter : best_filter;
+ mbmi->interp_filter =
+ cm->interp_filter != SWITCHABLE ? cm->interp_filter : best_filter;
rs = cm->interp_filter == SWITCHABLE ? vp10_get_switchable_rate(cpi, xd) : 0;
if (pred_exists) {
@@ -2712,15 +2557,14 @@
// switchable list (ex. bilinear) is indicated at the frame level, or
// skip condition holds.
vp10_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
- model_rd_for_sb(cpi, bsize, x, xd, &tmp_rate, &tmp_dist,
- &skip_txfm_sb, &skip_sse_sb);
+ model_rd_for_sb(cpi, bsize, x, xd, &tmp_rate, &tmp_dist, &skip_txfm_sb,
+ &skip_sse_sb);
rd = RDCOST(x->rdmult, x->rddiv, rs + tmp_rate, tmp_dist);
memcpy(skip_txfm, x->skip_txfm, sizeof(skip_txfm));
memcpy(bsse, x->bsse, sizeof(bsse));
}
- if (!is_comp_pred)
- single_filter[this_mode][refs[0]] = mbmi->interp_filter;
+ if (!is_comp_pred) single_filter[this_mode][refs[0]] = mbmi->interp_filter;
if (cpi->sf.adaptive_mode_search)
if (is_comp_pred)
@@ -2737,8 +2581,7 @@
}
}
- if (cm->interp_filter == SWITCHABLE)
- *rate2 += rs;
+ if (cm->interp_filter == SWITCHABLE) *rate2 += rs;
memcpy(x->skip_txfm, skip_txfm, sizeof(skip_txfm));
memcpy(x->bsse, bsse, sizeof(bsse));
@@ -2750,8 +2593,8 @@
// Y cost and distortion
vp10_subtract_plane(x, bsize, 0);
- super_block_yrd(cpi, x, rate_y, &distortion_y, &skippable_y, psse,
- bsize, ref_best_rd);
+ super_block_yrd(cpi, x, rate_y, &distortion_y, &skippable_y, psse, bsize,
+ ref_best_rd);
if (*rate_y == INT_MAX) {
*rate2 = INT_MAX;
@@ -2788,16 +2631,15 @@
*distortion = skip_sse_sb;
}
- if (!is_comp_pred)
- single_skippable[this_mode][refs[0]] = *skippable;
+ if (!is_comp_pred) single_skippable[this_mode][refs[0]] = *skippable;
restore_dst_buf(xd, orig_dst, orig_dst_stride);
return 0; // The rate-distortion cost will be re-calculated by caller.
}
-void vp10_rd_pick_intra_mode_sb(VP10_COMP *cpi, MACROBLOCK *x,
- RD_COST *rd_cost, BLOCK_SIZE bsize,
- PICK_MODE_CONTEXT *ctx, int64_t best_rd) {
+void vp10_rd_pick_intra_mode_sb(VP10_COMP *cpi, MACROBLOCK *x, RD_COST *rd_cost,
+ BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
+ int64_t best_rd) {
VP10_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
struct macroblockd_plane *const pd = xd->plane;
@@ -2810,9 +2652,8 @@
xd->mi[0]->mbmi.ref_frame[1] = NONE;
if (bsize >= BLOCK_8X8) {
- if (rd_pick_intra_sby_mode(cpi, x, &rate_y, &rate_y_tokenonly,
- &dist_y, &y_skip, bsize,
- best_rd) >= best_rd) {
+ if (rd_pick_intra_sby_mode(cpi, x, &rate_y, &rate_y_tokenonly, &dist_y,
+ &y_skip, bsize, best_rd) >= best_rd) {
rd_cost->rate = INT_MAX;
return;
}
@@ -2824,20 +2665,18 @@
return;
}
}
- max_uv_tx_size = get_uv_tx_size_impl(xd->mi[0]->mbmi.tx_size, bsize,
- pd[1].subsampling_x,
- pd[1].subsampling_y);
- rd_pick_intra_sbuv_mode(cpi, x, ctx, &rate_uv, &rate_uv_tokenonly,
- &dist_uv, &uv_skip, VPXMAX(BLOCK_8X8, bsize),
- max_uv_tx_size);
+ max_uv_tx_size = get_uv_tx_size_impl(
+ xd->mi[0]->mbmi.tx_size, bsize, pd[1].subsampling_x, pd[1].subsampling_y);
+ rd_pick_intra_sbuv_mode(cpi, x, ctx, &rate_uv, &rate_uv_tokenonly, &dist_uv,
+ &uv_skip, VPXMAX(BLOCK_8X8, bsize), max_uv_tx_size);
if (y_skip && uv_skip) {
rd_cost->rate = rate_y + rate_uv - rate_y_tokenonly - rate_uv_tokenonly +
vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1);
rd_cost->dist = dist_y + dist_uv;
} else {
- rd_cost->rate = rate_y + rate_uv +
- vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0);
+ rd_cost->rate =
+ rate_y + rate_uv + vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0);
rd_cost->dist = dist_y + dist_uv;
}
@@ -2851,10 +2690,8 @@
#define LOW_VAR_THRESH 16
#define VLOW_ADJ_MAX 25
#define VHIGH_ADJ_MAX 8
-static void rd_variance_adjustment(VP10_COMP *cpi,
- MACROBLOCK *x,
- BLOCK_SIZE bsize,
- int64_t *this_rd,
+static void rd_variance_adjustment(VP10_COMP *cpi, MACROBLOCK *x,
+ BLOCK_SIZE bsize, int64_t *this_rd,
MV_REFERENCE_FRAME ref_frame,
unsigned int source_variance) {
MACROBLOCKD *const xd = &x->e_mbd;
@@ -2863,30 +2700,29 @@
int64_t var_error = 0;
int64_t var_factor = 0;
- if (*this_rd == INT64_MAX)
- return;
+ if (*this_rd == INT64_MAX) return;
#if CONFIG_VPX_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
- recon_variance =
- vp10_high_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize, xd->bd);
+ recon_variance = vp10_high_get_sby_perpixel_variance(cpi, &xd->plane[0].dst,
+ bsize, xd->bd);
} else {
recon_variance =
- vp10_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
+ vp10_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
}
#else
recon_variance =
- vp10_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
+ vp10_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
#endif // CONFIG_VPX_HIGHBITDEPTH
if ((source_variance + recon_variance) > LOW_VAR_THRESH) {
absvar_diff = (source_variance > recon_variance)
- ? (source_variance - recon_variance)
- : (recon_variance - source_variance);
+ ? (source_variance - recon_variance)
+ : (recon_variance - source_variance);
var_error = ((int64_t)200 * source_variance * recon_variance) /
- (((int64_t)source_variance * source_variance) +
- ((int64_t)recon_variance * recon_variance));
+ (((int64_t)source_variance * source_variance) +
+ ((int64_t)recon_variance * recon_variance));
var_error = 100 - var_error;
}
@@ -2896,8 +2732,8 @@
if ((source_variance > LOW_VAR_THRESH) && (ref_frame == INTRA_FRAME) &&
(source_variance > recon_variance)) {
var_factor = VPXMIN(absvar_diff, VPXMIN(VLOW_ADJ_MAX, var_error));
- // A second possible case of interest is where the source variance
- // is very low and we wish to discourage false texture or motion trails.
+ // A second possible case of interest is where the source variance
+ // is very low and we wish to discourage false texture or motion trails.
} else if ((source_variance < (LOW_VAR_THRESH >> 1)) &&
(recon_variance > source_variance)) {
var_factor = VPXMIN(absvar_diff, VPXMIN(VHIGH_ADJ_MAX, var_error));
@@ -2905,12 +2741,11 @@
*this_rd += (*this_rd * var_factor) / 100;
}
-
// Do we have an internal image edge (e.g. formatting bars).
int vp10_internal_image_edge(VP10_COMP *cpi) {
return (cpi->oxcf.pass == 2) &&
- ((cpi->twopass.this_frame_stats.inactive_zone_rows > 0) ||
- (cpi->twopass.this_frame_stats.inactive_zone_cols > 0));
+ ((cpi->twopass.this_frame_stats.inactive_zone_rows > 0) ||
+ (cpi->twopass.this_frame_stats.inactive_zone_cols > 0));
}
// Checks to see if a super block is on a horizontal image edge.
@@ -2970,16 +2805,13 @@
// Checks to see if a super block is at the edge of the active image.
// In most cases this is the "real" edge unless there are formatting
// bars embedded in the stream.
-int vp10_active_edge_sb(VP10_COMP *cpi,
- int mi_row, int mi_col) {
+int vp10_active_edge_sb(VP10_COMP *cpi, int mi_row, int mi_col) {
return vp10_active_h_edge(cpi, mi_row, MI_BLOCK_SIZE) ||
vp10_active_v_edge(cpi, mi_col, MI_BLOCK_SIZE);
}
-void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi,
- TileDataEnc *tile_data,
- MACROBLOCK *x,
- int mi_row, int mi_col,
+void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi, TileDataEnc *tile_data,
+ MACROBLOCK *x, int mi_row, int mi_col,
RD_COST *rd_cost, BLOCK_SIZE bsize,
PICK_MODE_CONTEXT *ctx,
int64_t best_rd_so_far) {
@@ -3034,20 +2866,16 @@
vp10_zero(best_mbmode);
- for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
- filter_cache[i] = INT64_MAX;
+ for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i) filter_cache[i] = INT64_MAX;
estimate_ref_frame_costs(cm, xd, segment_id, ref_costs_single, ref_costs_comp,
&comp_mode_p);
- for (i = 0; i < REFERENCE_MODES; ++i)
- best_pred_rd[i] = INT64_MAX;
+ for (i = 0; i < REFERENCE_MODES; ++i) best_pred_rd[i] = INT64_MAX;
for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++)
best_filter_rd[i] = INT64_MAX;
- for (i = 0; i < TX_SIZES; i++)
- rate_uv_intra[i] = INT_MAX;
- for (i = 0; i < MAX_REF_FRAMES; ++i)
- x->pred_sse[i] = INT_MAX;
+ for (i = 0; i < TX_SIZES; i++) rate_uv_intra[i] = INT_MAX;
+ for (i = 0; i < MAX_REF_FRAMES; ++i) x->pred_sse[i] = INT_MAX;
for (i = 0; i < MB_MODE_COUNT; ++i) {
for (k = 0; k < MAX_REF_FRAMES; ++k) {
single_inter_filter[i][k] = SWITCHABLE;
@@ -3140,12 +2968,11 @@
mode_skip_mask[INTRA_FRAME] |=
~(sf->intra_y_mode_mask[max_txsize_lookup[bsize]]);
- for (i = 0; i <= LAST_NEW_MV_INDEX; ++i)
- mode_threshold[i] = 0;
+ for (i = 0; i <= LAST_NEW_MV_INDEX; ++i) mode_threshold[i] = 0;
for (i = LAST_NEW_MV_INDEX + 1; i < MAX_MODES; ++i)
mode_threshold[i] = ((int64_t)rd_threshes[i] * rd_thresh_freq_fact[i]) >> 5;
- midx = sf->schedule_mode_search ? mode_skip_start : 0;
+ midx = sf->schedule_mode_search ? mode_skip_start : 0;
while (midx > 4) {
uint8_t end_pos = 0;
for (i = 5; i < midx; ++i) {
@@ -3180,8 +3007,7 @@
// skip mask to look at a subset of the remaining modes.
if (midx == mode_skip_start && best_mode_index >= 0) {
switch (best_mbmode.ref_frame[0]) {
- case INTRA_FRAME:
- break;
+ case INTRA_FRAME: break;
case LAST_FRAME:
ref_frame_skip_mask[0] |= LAST_FRAME_MODE_MASK;
ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
@@ -3190,13 +3016,9 @@
ref_frame_skip_mask[0] |= GOLDEN_FRAME_MODE_MASK;
ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
break;
- case ALTREF_FRAME:
- ref_frame_skip_mask[0] |= ALT_REF_MODE_MASK;
- break;
+ case ALTREF_FRAME: ref_frame_skip_mask[0] |= ALT_REF_MODE_MASK; break;
case NONE:
- case MAX_REF_FRAMES:
- assert(0 && "Invalid Reference frame");
- break;
+ case MAX_REF_FRAMES: assert(0 && "Invalid Reference frame"); break;
}
}
@@ -3204,29 +3026,24 @@
(ref_frame_skip_mask[1] & (1 << VPXMAX(0, second_ref_frame))))
continue;
- if (mode_skip_mask[ref_frame] & (1 << this_mode))
- continue;
+ if (mode_skip_mask[ref_frame] & (1 << this_mode)) continue;
// Test best rd so far against threshold for trying this mode.
if (best_mode_skippable && sf->schedule_mode_search)
mode_threshold[mode_index] <<= 1;
- if (best_rd < mode_threshold[mode_index])
- continue;
+ if (best_rd < mode_threshold[mode_index]) continue;
comp_pred = second_ref_frame > INTRA_FRAME;
if (comp_pred) {
- if (!cpi->allow_comp_inter_inter)
- continue;
+ if (!cpi->allow_comp_inter_inter) continue;
// Skip compound inter modes if ARF is not available.
- if (!(cpi->ref_frame_flags & flag_list[second_ref_frame]))
- continue;
+ if (!(cpi->ref_frame_flags & flag_list[second_ref_frame])) continue;
// Do not allow compound prediction if the segment level reference frame
// feature is in use as in this case there can only be one reference.
- if (segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME))
- continue;
+ if (segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) continue;
if ((mode_search_skip_flags & FLAG_SKIP_COMP_BESTINTRA) &&
best_mode_index >= 0 && best_mbmode.ref_frame[0] == INTRA_FRAME)
@@ -3255,19 +3072,17 @@
// one of the neighboring directional modes
if ((mode_search_skip_flags & FLAG_SKIP_INTRA_BESTINTER) &&
(this_mode >= D45_PRED && this_mode <= TM_PRED)) {
- if (best_mode_index >= 0 &&
- best_mbmode.ref_frame[0] > INTRA_FRAME)
+ if (best_mode_index >= 0 && best_mbmode.ref_frame[0] > INTRA_FRAME)
continue;
}
if (mode_search_skip_flags & FLAG_SKIP_INTRA_DIRMISMATCH) {
- if (conditional_skipintra(this_mode, best_intra_mode))
- continue;
+ if (conditional_skipintra(this_mode, best_intra_mode)) continue;
}
}
} else {
- const MV_REFERENCE_FRAME ref_frames[2] = {ref_frame, second_ref_frame};
- if (!check_best_zero_mv(cpi, mbmi_ext->mode_context, frame_mv,
- this_mode, ref_frames))
+ const MV_REFERENCE_FRAME ref_frames[2] = { ref_frame, second_ref_frame };
+ if (!check_best_zero_mv(cpi, mbmi_ext->mode_context, frame_mv, this_mode,
+ ref_frames))
continue;
}
@@ -3277,8 +3092,8 @@
mbmi->ref_frame[1] = second_ref_frame;
// Evaluate all sub-pel filters irrespective of whether we can use
// them for this frame.
- mbmi->interp_filter = cm->interp_filter == SWITCHABLE ? EIGHTTAP
- : cm->interp_filter;
+ mbmi->interp_filter =
+ cm->interp_filter == SWITCHABLE ? EIGHTTAP : cm->interp_filter;
mbmi->mv[0].as_int = mbmi->mv[1].as_int = 0;
x->skip = 0;
@@ -3287,25 +3102,23 @@
// Select prediction reference frames.
for (i = 0; i < MAX_MB_PLANE; i++) {
xd->plane[i].pre[0] = yv12_mb[ref_frame][i];
- if (comp_pred)
- xd->plane[i].pre[1] = yv12_mb[second_ref_frame][i];
+ if (comp_pred) xd->plane[i].pre[1] = yv12_mb[second_ref_frame][i];
}
if (ref_frame == INTRA_FRAME) {
TX_SIZE uv_tx;
struct macroblockd_plane *const pd = &xd->plane[1];
memset(x->skip_txfm, 0, sizeof(x->skip_txfm));
- super_block_yrd(cpi, x, &rate_y, &distortion_y, &skippable,
- NULL, bsize, best_rd);
- if (rate_y == INT_MAX)
- continue;
+ super_block_yrd(cpi, x, &rate_y, &distortion_y, &skippable, NULL, bsize,
+ best_rd);
+ if (rate_y == INT_MAX) continue;
uv_tx = get_uv_tx_size_impl(mbmi->tx_size, bsize, pd->subsampling_x,
pd->subsampling_y);
if (rate_uv_intra[uv_tx] == INT_MAX) {
- choose_intra_uv_mode(cpi, x, ctx, bsize, uv_tx,
- &rate_uv_intra[uv_tx], &rate_uv_tokenonly[uv_tx],
- &dist_uv[uv_tx], &skip_uv[uv_tx], &mode_uv[uv_tx]);
+ choose_intra_uv_mode(cpi, x, ctx, bsize, uv_tx, &rate_uv_intra[uv_tx],
+ &rate_uv_tokenonly[uv_tx], &dist_uv[uv_tx],
+ &skip_uv[uv_tx], &mode_uv[uv_tx]);
}
rate_uv = rate_uv_tokenonly[uv_tx];
@@ -3318,21 +3131,16 @@
rate2 += intra_cost_penalty;
distortion2 = distortion_y + distortion_uv;
} else {
- this_rd = handle_inter_mode(cpi, x, bsize,
- &rate2, &distortion2, &skippable,
- &rate_y, &rate_uv,
- &disable_skip, frame_mv,
- mi_row, mi_col,
- single_newmv, single_inter_filter,
- single_skippable, &total_sse, best_rd,
- &mask_filter, filter_cache);
- if (this_rd == INT64_MAX)
- continue;
+ this_rd = handle_inter_mode(
+ cpi, x, bsize, &rate2, &distortion2, &skippable, &rate_y, &rate_uv,
+ &disable_skip, frame_mv, mi_row, mi_col, single_newmv,
+ single_inter_filter, single_skippable, &total_sse, best_rd,
+ &mask_filter, filter_cache);
+ if (this_rd == INT64_MAX) continue;
compmode_cost = vp10_cost_bit(comp_mode_p, comp_pred);
- if (cm->reference_mode == REFERENCE_MODE_SELECT)
- rate2 += compmode_cost;
+ if (cm->reference_mode == REFERENCE_MODE_SELECT) rate2 += compmode_cost;
}
// Estimate the reference frame signaling cost and add it
@@ -3374,11 +3182,11 @@
// Apply an adjustment to the rd value based on the similarity of the
// source variance and reconstructed variance.
- rd_variance_adjustment(cpi, x, bsize, &this_rd,
- ref_frame, x->source_variance);
+ rd_variance_adjustment(cpi, x, bsize, &this_rd, ref_frame,
+ x->source_variance);
if (ref_frame == INTRA_FRAME) {
- // Keep record of best intra rd
+ // Keep record of best intra rd
if (this_rd < best_intra_rd) {
best_intra_rd = this_rd;
best_intra_mode = mbmi->mode;
@@ -3415,8 +3223,7 @@
best_skip2 = this_skip2;
best_mode_skippable = skippable;
- if (!x->select_tx_size)
- swap_block_ptr(x, ctx, 1, 0, 0, max_plane);
+ if (!x->select_tx_size) swap_block_ptr(x, ctx, 1, 0, 0, max_plane);
memcpy(ctx->zcoeff_blk, x->zcoeff_blk[mbmi->tx_size],
sizeof(ctx->zcoeff_blk[0]) * ctx->num_4x4_blk);
@@ -3436,8 +3243,7 @@
const int var_adjust = (x->source_variance < 16);
scale -= var_adjust;
}
- if (ref_frame > INTRA_FRAME &&
- distortion2 * scale < qstep * qstep) {
+ if (ref_frame > INTRA_FRAME && distortion2 * scale < qstep * qstep) {
early_term = 1;
}
}
@@ -3471,8 +3277,9 @@
/* keep record of best filter type */
if (!mode_excluded && cm->interp_filter != BILINEAR) {
- int64_t ref = filter_cache[cm->interp_filter == SWITCHABLE ?
- SWITCHABLE_FILTERS : cm->interp_filter];
+ int64_t ref =
+ filter_cache[cm->interp_filter == SWITCHABLE ? SWITCHABLE_FILTERS
+ : cm->interp_filter];
for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) {
int64_t adj_rd;
@@ -3493,11 +3300,9 @@
}
}
- if (early_term)
- break;
+ if (early_term) break;
- if (x->skip && !comp_pred)
- break;
+ if (x->skip && !comp_pred) break;
}
// The inter modes' rate costs are not calculated precisely in some cases.
@@ -3505,20 +3310,23 @@
// ZEROMV. Here, checks are added for those cases, and the mode decisions
// are corrected.
if (best_mbmode.mode == NEWMV) {
- const MV_REFERENCE_FRAME refs[2] = {best_mbmode.ref_frame[0],
- best_mbmode.ref_frame[1]};
+ const MV_REFERENCE_FRAME refs[2] = { best_mbmode.ref_frame[0],
+ best_mbmode.ref_frame[1] };
int comp_pred_mode = refs[1] > INTRA_FRAME;
if (frame_mv[NEARESTMV][refs[0]].as_int == best_mbmode.mv[0].as_int &&
- ((comp_pred_mode && frame_mv[NEARESTMV][refs[1]].as_int ==
- best_mbmode.mv[1].as_int) || !comp_pred_mode))
+ ((comp_pred_mode &&
+ frame_mv[NEARESTMV][refs[1]].as_int == best_mbmode.mv[1].as_int) ||
+ !comp_pred_mode))
best_mbmode.mode = NEARESTMV;
else if (frame_mv[NEARMV][refs[0]].as_int == best_mbmode.mv[0].as_int &&
- ((comp_pred_mode && frame_mv[NEARMV][refs[1]].as_int ==
- best_mbmode.mv[1].as_int) || !comp_pred_mode))
+ ((comp_pred_mode &&
+ frame_mv[NEARMV][refs[1]].as_int == best_mbmode.mv[1].as_int) ||
+ !comp_pred_mode))
best_mbmode.mode = NEARMV;
else if (best_mbmode.mv[0].as_int == 0 &&
- ((comp_pred_mode && best_mbmode.mv[1].as_int == 0) || !comp_pred_mode))
+ ((comp_pred_mode && best_mbmode.mv[1].as_int == 0) ||
+ !comp_pred_mode))
best_mbmode.mode = ZEROMV;
}
@@ -3537,8 +3345,7 @@
uv_tx_size = get_uv_tx_size(mbmi, &xd->plane[1]);
rd_pick_intra_sbuv_mode(cpi, x, ctx, &rate_uv_intra[uv_tx_size],
&rate_uv_tokenonly[uv_tx_size],
- &dist_uv[uv_tx_size],
- &skip_uv[uv_tx_size],
+ &dist_uv[uv_tx_size], &skip_uv[uv_tx_size],
bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize,
uv_tx_size);
}
@@ -3550,7 +3357,7 @@
if (!cpi->rc.is_src_frame_alt_ref)
vp10_update_rd_thresh_fact(tile_data->thresh_freq_fact,
- sf->adaptive_rd_thresh, bsize, best_mode_index);
+ sf->adaptive_rd_thresh, bsize, best_mode_index);
// macroblock modes
*mbmi = best_mbmode;
@@ -3583,8 +3390,7 @@
if (!x->skip && !x->select_tx_size) {
int has_high_freq_coeff = 0;
int plane;
- int max_plane = is_inter_block(&xd->mi[0]->mbmi)
- ? MAX_MB_PLANE : 1;
+ int max_plane = is_inter_block(&xd->mi[0]->mbmi) ? MAX_MB_PLANE : 1;
for (plane = 0; plane < max_plane; ++plane) {
x->plane[plane].eobs = ctx->eobs_pbuf[plane][1];
has_high_freq_coeff |= vp10_has_high_freq_in_plane(x, bsize, plane);
@@ -3604,13 +3410,11 @@
best_filter_diff, best_mode_skippable);
}
-void vp10_rd_pick_inter_mode_sb_seg_skip(VP10_COMP *cpi,
- TileDataEnc *tile_data,
- MACROBLOCK *x,
- RD_COST *rd_cost,
- BLOCK_SIZE bsize,
- PICK_MODE_CONTEXT *ctx,
- int64_t best_rd_so_far) {
+void vp10_rd_pick_inter_mode_sb_seg_skip(VP10_COMP *cpi, TileDataEnc *tile_data,
+ MACROBLOCK *x, RD_COST *rd_cost,
+ BLOCK_SIZE bsize,
+ PICK_MODE_CONTEXT *ctx,
+ int64_t best_rd_so_far) {
VP10_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
@@ -3629,10 +3433,8 @@
estimate_ref_frame_costs(cm, xd, segment_id, ref_costs_single, ref_costs_comp,
&comp_mode_p);
- for (i = 0; i < MAX_REF_FRAMES; ++i)
- x->pred_sse[i] = INT_MAX;
- for (i = LAST_FRAME; i < MAX_REF_FRAMES; ++i)
- x->pred_mv_sad[i] = INT_MAX;
+ for (i = 0; i < MAX_REF_FRAMES; ++i) x->pred_sse[i] = INT_MAX;
+ for (i = LAST_FRAME; i < MAX_REF_FRAMES; ++i) x->pred_mv_sad[i] = INT_MAX;
rd_cost->rate = INT_MAX;
@@ -3691,25 +3493,20 @@
(cm->interp_filter == mbmi->interp_filter));
vp10_update_rd_thresh_fact(tile_data->thresh_freq_fact,
- cpi->sf.adaptive_rd_thresh, bsize, THR_ZEROMV);
+ cpi->sf.adaptive_rd_thresh, bsize, THR_ZEROMV);
vp10_zero(best_pred_diff);
vp10_zero(best_filter_diff);
- if (!x->select_tx_size)
- swap_block_ptr(x, ctx, 1, 0, 0, MAX_MB_PLANE);
- store_coding_context(x, ctx, THR_ZEROMV,
- best_pred_diff, best_filter_diff, 0);
+ if (!x->select_tx_size) swap_block_ptr(x, ctx, 1, 0, 0, MAX_MB_PLANE);
+ store_coding_context(x, ctx, THR_ZEROMV, best_pred_diff, best_filter_diff, 0);
}
-void vp10_rd_pick_inter_mode_sub8x8(VP10_COMP *cpi,
- TileDataEnc *tile_data,
- MACROBLOCK *x,
- int mi_row, int mi_col,
- RD_COST *rd_cost,
- BLOCK_SIZE bsize,
- PICK_MODE_CONTEXT *ctx,
- int64_t best_rd_so_far) {
+void vp10_rd_pick_inter_mode_sub8x8(VP10_COMP *cpi, TileDataEnc *tile_data,
+ MACROBLOCK *x, int mi_row, int mi_col,
+ RD_COST *rd_cost, BLOCK_SIZE bsize,
+ PICK_MODE_CONTEXT *ctx,
+ int64_t best_rd_so_far) {
VP10_COMMON *const cm = &cpi->common;
RD_OPT *const rd_opt = &cpi->rd;
SPEED_FEATURES *const sf = &cpi->sf;
@@ -3739,7 +3536,7 @@
int skip_uv;
PREDICTION_MODE mode_uv = DC_PRED;
const int intra_cost_penalty = vp10_get_intra_cost_penalty(
- cm->base_qindex, cm->y_dc_delta_q, cm->bit_depth);
+ cm->base_qindex, cm->y_dc_delta_q, cm->bit_depth);
int_mv seg_mvs[4][MAX_REF_FRAMES];
b_mode_info best_bmodes[4];
int best_skip2 = 0;
@@ -3747,25 +3544,22 @@
int64_t mask_filter = 0;
int64_t filter_cache[SWITCHABLE_FILTER_CONTEXTS];
int internal_active_edge =
- vp10_active_edge_sb(cpi, mi_row, mi_col) && vp10_internal_image_edge(cpi);
+ vp10_active_edge_sb(cpi, mi_row, mi_col) && vp10_internal_image_edge(cpi);
memset(x->zcoeff_blk[TX_4X4], 0, 4);
vp10_zero(best_mbmode);
- for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
- filter_cache[i] = INT64_MAX;
+ for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i) filter_cache[i] = INT64_MAX;
for (i = 0; i < 4; i++) {
int j;
- for (j = 0; j < MAX_REF_FRAMES; j++)
- seg_mvs[i][j].as_int = INVALID_MV;
+ for (j = 0; j < MAX_REF_FRAMES; j++) seg_mvs[i][j].as_int = INVALID_MV;
}
estimate_ref_frame_costs(cm, xd, segment_id, ref_costs_single, ref_costs_comp,
&comp_mode_p);
- for (i = 0; i < REFERENCE_MODES; ++i)
- best_pred_rd[i] = INT64_MAX;
+ for (i = 0; i < REFERENCE_MODES; ++i) best_pred_rd[i] = INT64_MAX;
for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++)
best_filter_rd[i] = INT64_MAX;
rate_uv_intra = INT_MAX;
@@ -3775,8 +3569,7 @@
for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ref_frame++) {
if (cpi->ref_frame_flags & flag_list[ref_frame]) {
setup_buffer_inter(cpi, x, ref_frame, bsize, mi_row, mi_col,
- frame_mv[NEARESTMV], frame_mv[NEARMV],
- yv12_mb);
+ frame_mv[NEARESTMV], frame_mv[NEARMV], yv12_mb);
} else {
ref_frame_skip_mask[0] |= (1 << ref_frame);
ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
@@ -3806,8 +3599,7 @@
if (ref_index > 2 && sf->mode_skip_start < MAX_MODES) {
if (ref_index == 3) {
switch (best_mbmode.ref_frame[0]) {
- case INTRA_FRAME:
- break;
+ case INTRA_FRAME: break;
case LAST_FRAME:
ref_frame_skip_mask[0] |= (1 << GOLDEN_FRAME) | (1 << ALTREF_FRAME);
ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
@@ -3820,9 +3612,7 @@
ref_frame_skip_mask[0] |= (1 << GOLDEN_FRAME) | (1 << LAST_FRAME);
break;
case NONE:
- case MAX_REF_FRAMES:
- assert(0 && "Invalid Reference frame");
- break;
+ case MAX_REF_FRAMES: assert(0 && "Invalid Reference frame"); break;
}
}
}
@@ -3840,14 +3630,11 @@
comp_pred = second_ref_frame > INTRA_FRAME;
if (comp_pred) {
- if (!cpi->allow_comp_inter_inter)
- continue;
- if (!(cpi->ref_frame_flags & flag_list[second_ref_frame]))
- continue;
+ if (!cpi->allow_comp_inter_inter) continue;
+ if (!(cpi->ref_frame_flags & flag_list[second_ref_frame])) continue;
// Do not allow compound prediction if the segment level reference frame
// feature is in use as in this case there can only be one reference.
- if (segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME))
- continue;
+ if (segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) continue;
if ((sf->mode_search_skip_flags & FLAG_SKIP_COMP_BESTINTRA) &&
best_mbmode.ref_frame[0] == INTRA_FRAME)
@@ -3874,9 +3661,9 @@
if (segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME) &&
get_segdata(seg, segment_id, SEG_LVL_REF_FRAME) != (int)ref_frame) {
continue;
- // Disable this drop out case if the ref frame
- // segment level feature is enabled for this segment. This is to
- // prevent the possibility that we end up unable to pick any mode.
+ // Disable this drop out case if the ref frame
+ // segment level feature is enabled for this segment. This is to
+ // prevent the possibility that we end up unable to pick any mode.
} else if (!segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) {
// Only consider ZEROMV/ALTREF_FRAME for alt ref frame,
// unless ARNR filtering is enabled in which case we want
@@ -3892,33 +3679,29 @@
mbmi->ref_frame[1] = second_ref_frame;
// Evaluate all sub-pel filters irrespective of whether we can use
// them for this frame.
- mbmi->interp_filter = cm->interp_filter == SWITCHABLE ? EIGHTTAP
- : cm->interp_filter;
+ mbmi->interp_filter =
+ cm->interp_filter == SWITCHABLE ? EIGHTTAP : cm->interp_filter;
x->skip = 0;
set_ref_ptrs(cm, xd, ref_frame, second_ref_frame);
// Select prediction reference frames.
for (i = 0; i < MAX_MB_PLANE; i++) {
xd->plane[i].pre[0] = yv12_mb[ref_frame][i];
- if (comp_pred)
- xd->plane[i].pre[1] = yv12_mb[second_ref_frame][i];
+ if (comp_pred) xd->plane[i].pre[1] = yv12_mb[second_ref_frame][i];
}
if (ref_frame == INTRA_FRAME) {
int rate;
- if (rd_pick_intra_sub_8x8_y_mode(cpi, x, &rate, &rate_y,
- &distortion_y, best_rd) >= best_rd)
+ if (rd_pick_intra_sub_8x8_y_mode(cpi, x, &rate, &rate_y, &distortion_y,
+ best_rd) >= best_rd)
continue;
rate2 += rate;
rate2 += intra_cost_penalty;
distortion2 += distortion_y;
if (rate_uv_intra == INT_MAX) {
- choose_intra_uv_mode(cpi, x, ctx, bsize, TX_4X4,
- &rate_uv_intra,
- &rate_uv_tokenonly,
- &dist_uv, &skip_uv,
- &mode_uv);
+ choose_intra_uv_mode(cpi, x, ctx, bsize, TX_4X4, &rate_uv_intra,
+ &rate_uv_tokenonly, &dist_uv, &skip_uv, &mode_uv);
}
rate2 += rate_uv_intra;
rate_uv = rate_uv_tokenonly;
@@ -3934,19 +3717,20 @@
int64_t tmp_best_distortion = INT_MAX, tmp_best_sse, uv_sse;
int tmp_best_skippable = 0;
int switchable_filter_index;
- int_mv *second_ref = comp_pred ?
- &x->mbmi_ext->ref_mvs[second_ref_frame][0] : NULL;
+ int_mv *second_ref =
+ comp_pred ? &x->mbmi_ext->ref_mvs[second_ref_frame][0] : NULL;
b_mode_info tmp_best_bmodes[16];
MB_MODE_INFO tmp_best_mbmode;
BEST_SEG_INFO bsi[SWITCHABLE_FILTERS];
int pred_exists = 0;
int uv_skippable;
- this_rd_thresh = (ref_frame == LAST_FRAME) ?
- rd_opt->threshes[segment_id][bsize][THR_LAST] :
- rd_opt->threshes[segment_id][bsize][THR_ALTR];
- this_rd_thresh = (ref_frame == GOLDEN_FRAME) ?
- rd_opt->threshes[segment_id][bsize][THR_GOLD] : this_rd_thresh;
+ this_rd_thresh = (ref_frame == LAST_FRAME)
+ ? rd_opt->threshes[segment_id][bsize][THR_LAST]
+ : rd_opt->threshes[segment_id][bsize][THR_ALTR];
+ this_rd_thresh = (ref_frame == GOLDEN_FRAME)
+ ? rd_opt->threshes[segment_id][bsize][THR_GOLD]
+ : this_rd_thresh;
for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
filter_cache[i] = INT64_MAX;
@@ -3958,8 +3742,9 @@
ctx->pred_interp_filter < SWITCHABLE) {
tmp_best_filter = ctx->pred_interp_filter;
} else if (sf->adaptive_pred_interp_filter == 2) {
- tmp_best_filter = ctx->pred_interp_filter < SWITCHABLE ?
- ctx->pred_interp_filter : 0;
+ tmp_best_filter = ctx->pred_interp_filter < SWITCHABLE
+ ? ctx->pred_interp_filter
+ : 0;
} else {
for (switchable_filter_index = 0;
switchable_filter_index < SWITCHABLE_FILTERS;
@@ -3968,24 +3753,19 @@
int64_t rs_rd;
MB_MODE_INFO_EXT *mbmi_ext = x->mbmi_ext;
mbmi->interp_filter = switchable_filter_index;
- tmp_rd = rd_pick_best_sub8x8_mode(cpi, x,
- &mbmi_ext->ref_mvs[ref_frame][0],
- second_ref, best_yrd, &rate,
- &rate_y, &distortion,
- &skippable, &total_sse,
- (int) this_rd_thresh, seg_mvs,
- bsi, switchable_filter_index,
- mi_row, mi_col);
+ tmp_rd = rd_pick_best_sub8x8_mode(
+ cpi, x, &mbmi_ext->ref_mvs[ref_frame][0], second_ref, best_yrd,
+ &rate, &rate_y, &distortion, &skippable, &total_sse,
+ (int)this_rd_thresh, seg_mvs, bsi, switchable_filter_index,
+ mi_row, mi_col);
- if (tmp_rd == INT64_MAX)
- continue;
+ if (tmp_rd == INT64_MAX) continue;
rs = vp10_get_switchable_rate(cpi, xd);
rs_rd = RDCOST(x->rdmult, x->rddiv, rs, 0);
filter_cache[switchable_filter_index] = tmp_rd;
filter_cache[SWITCHABLE_FILTERS] =
VPXMIN(filter_cache[SWITCHABLE_FILTERS], tmp_rd + rs_rd);
- if (cm->interp_filter == SWITCHABLE)
- tmp_rd += rs_rd;
+ if (cm->interp_filter == SWITCHABLE) tmp_rd += rs_rd;
mask_filter = VPXMAX(mask_filter, tmp_rd);
@@ -4009,8 +3789,7 @@
x->zcoeff_blk[TX_4X4][i] = !x->plane[0].eobs[i];
}
pred_exists = 1;
- if (switchable_filter_index == 0 &&
- sf->use_rd_breakout &&
+ if (switchable_filter_index == 0 && sf->use_rd_breakout &&
best_rd < INT64_MAX) {
if (tmp_best_rdu / 2 > best_rd) {
// skip searching the other filters if the first is
@@ -4025,22 +3804,19 @@
}
}
- if (tmp_best_rdu == INT64_MAX && pred_exists)
- continue;
+ if (tmp_best_rdu == INT64_MAX && pred_exists) continue;
- mbmi->interp_filter = (cm->interp_filter == SWITCHABLE ?
- tmp_best_filter : cm->interp_filter);
+ mbmi->interp_filter =
+ (cm->interp_filter == SWITCHABLE ? tmp_best_filter
+ : cm->interp_filter);
if (!pred_exists) {
// Handles the special case when a filter that is not in the
// switchable list (bilinear, 6-tap) is indicated at the frame level
- tmp_rd = rd_pick_best_sub8x8_mode(cpi, x,
- &x->mbmi_ext->ref_mvs[ref_frame][0],
- second_ref, best_yrd, &rate, &rate_y,
- &distortion, &skippable, &total_sse,
- (int) this_rd_thresh, seg_mvs, bsi, 0,
- mi_row, mi_col);
- if (tmp_rd == INT64_MAX)
- continue;
+ tmp_rd = rd_pick_best_sub8x8_mode(
+ cpi, x, &x->mbmi_ext->ref_mvs[ref_frame][0], second_ref, best_yrd,
+ &rate, &rate_y, &distortion, &skippable, &total_sse,
+ (int)this_rd_thresh, seg_mvs, bsi, 0, mi_row, mi_col);
+ if (tmp_rd == INT64_MAX) continue;
} else {
total_sse = tmp_best_sse;
rate = tmp_best_rate;
@@ -4048,8 +3824,7 @@
distortion = tmp_best_distortion;
skippable = tmp_best_skippable;
*mbmi = tmp_best_mbmode;
- for (i = 0; i < 4; i++)
- xd->mi[0]->bmi[i] = tmp_best_bmodes[i];
+ for (i = 0; i < 4; i++) xd->mi[0]->bmi[i] = tmp_best_bmodes[i];
}
rate2 += rate;
@@ -4064,15 +3839,14 @@
compmode_cost = vp10_cost_bit(comp_mode_p, comp_pred);
- tmp_best_rdu = best_rd -
- VPXMIN(RDCOST(x->rdmult, x->rddiv, rate2, distortion2),
- RDCOST(x->rdmult, x->rddiv, 0, total_sse));
+ tmp_best_rdu =
+ best_rd - VPXMIN(RDCOST(x->rdmult, x->rddiv, rate2, distortion2),
+ RDCOST(x->rdmult, x->rddiv, 0, total_sse));
if (tmp_best_rdu > 0) {
// If even the 'Y' rd value of split is higher than best so far
// then dont bother looking at UV
- vp10_build_inter_predictors_sbuv(&x->e_mbd, mi_row, mi_col,
- BLOCK_8X8);
+ vp10_build_inter_predictors_sbuv(&x->e_mbd, mi_row, mi_col, BLOCK_8X8);
memset(x->skip_txfm, SKIP_TXFM_NONE, sizeof(x->skip_txfm));
if (!super_block_uvrd(cpi, x, &rate_uv, &distortion_uv, &uv_skippable,
&uv_sse, BLOCK_8X8, tmp_best_rdu))
@@ -4085,8 +3859,7 @@
}
}
- if (cm->reference_mode == REFERENCE_MODE_SELECT)
- rate2 += compmode_cost;
+ if (cm->reference_mode == REFERENCE_MODE_SELECT) rate2 += compmode_cost;
// Estimate the reference frame signaling cost and add it
// to the rolling cost variable.
@@ -4148,17 +3921,15 @@
rd_cost->dist = distortion2;
rd_cost->rdcost = this_rd;
best_rd = this_rd;
- best_yrd = best_rd -
- RDCOST(x->rdmult, x->rddiv, rate_uv, distortion_uv);
+ best_yrd =
+ best_rd - RDCOST(x->rdmult, x->rddiv, rate_uv, distortion_uv);
best_mbmode = *mbmi;
best_skip2 = this_skip2;
- if (!x->select_tx_size)
- swap_block_ptr(x, ctx, 1, 0, 0, max_plane);
+ if (!x->select_tx_size) swap_block_ptr(x, ctx, 1, 0, 0, max_plane);
memcpy(ctx->zcoeff_blk, x->zcoeff_blk[TX_4X4],
sizeof(ctx->zcoeff_blk[0]) * ctx->num_4x4_blk);
- for (i = 0; i < 4; i++)
- best_bmodes[i] = xd->mi[0]->bmi[i];
+ for (i = 0; i < 4; i++) best_bmodes[i] = xd->mi[0]->bmi[i];
// TODO(debargha): enhance this test with a better distortion prediction
// based on qp, activity mask and history
@@ -4176,8 +3947,7 @@
const int var_adjust = (x->source_variance < 16);
scale -= var_adjust;
}
- if (ref_frame > INTRA_FRAME &&
- distortion2 * scale < qstep * qstep) {
+ if (ref_frame > INTRA_FRAME && distortion2 * scale < qstep * qstep) {
early_term = 1;
}
}
@@ -4211,8 +3981,9 @@
/* keep record of best filter type */
if (!mode_excluded && !disable_skip && ref_frame != INTRA_FRAME &&
cm->interp_filter != BILINEAR) {
- int64_t ref = filter_cache[cm->interp_filter == SWITCHABLE ?
- SWITCHABLE_FILTERS : cm->interp_filter];
+ int64_t ref =
+ filter_cache[cm->interp_filter == SWITCHABLE ? SWITCHABLE_FILTERS
+ : cm->interp_filter];
int64_t adj_rd;
for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) {
if (ref == INT64_MAX)
@@ -4231,11 +4002,9 @@
}
}
- if (early_term)
- break;
+ if (early_term) break;
- if (x->skip && !comp_pred)
- break;
+ if (x->skip && !comp_pred) break;
}
if (best_rd >= best_rd_so_far) {
@@ -4249,11 +4018,8 @@
// Do Intra UV best rd mode selection if best mode choice above was intra.
if (best_mbmode.ref_frame[0] == INTRA_FRAME) {
*mbmi = best_mbmode;
- rd_pick_intra_sbuv_mode(cpi, x, ctx, &rate_uv_intra,
- &rate_uv_tokenonly,
- &dist_uv,
- &skip_uv,
- BLOCK_8X8, TX_4X4);
+ rd_pick_intra_sbuv_mode(cpi, x, ctx, &rate_uv_intra, &rate_uv_tokenonly,
+ &dist_uv, &skip_uv, BLOCK_8X8, TX_4X4);
}
}
@@ -4269,14 +4035,13 @@
!is_inter_block(&best_mbmode));
vp10_update_rd_thresh_fact(tile_data->thresh_freq_fact,
- sf->adaptive_rd_thresh, bsize, best_ref_index);
+ sf->adaptive_rd_thresh, bsize, best_ref_index);
// macroblock modes
*mbmi = best_mbmode;
x->skip |= best_skip2;
if (!is_inter_block(&best_mbmode)) {
- for (i = 0; i < 4; i++)
- xd->mi[0]->bmi[i].as_mode = best_bmodes[i].as_mode;
+ for (i = 0; i < 4; i++) xd->mi[0]->bmi[i].as_mode = best_bmodes[i].as_mode;
} else {
for (i = 0; i < 4; ++i)
memcpy(&xd->mi[0]->bmi[i], &best_bmodes[i], sizeof(b_mode_info));
@@ -4305,6 +4070,6 @@
vp10_zero(best_filter_diff);
}
- store_coding_context(x, ctx, best_ref_index,
- best_pred_diff, best_filter_diff, 0);
+ store_coding_context(x, ctx, best_ref_index, best_pred_diff, best_filter_diff,
+ 0);
}
diff --git a/vp10/encoder/rdopt.h b/vp10/encoder/rdopt.h
index 6526d2b..b887b8a 100644
--- a/vp10/encoder/rdopt.h
+++ b/vp10/encoder/rdopt.h
@@ -26,33 +26,28 @@
struct RD_COST;
void vp10_rd_pick_intra_mode_sb(struct VP10_COMP *cpi, struct macroblock *x,
- struct RD_COST *rd_cost, BLOCK_SIZE bsize,
- PICK_MODE_CONTEXT *ctx, int64_t best_rd);
+ struct RD_COST *rd_cost, BLOCK_SIZE bsize,
+ PICK_MODE_CONTEXT *ctx, int64_t best_rd);
unsigned int vp10_get_sby_perpixel_variance(VP10_COMP *cpi,
- const struct buf_2d *ref,
- BLOCK_SIZE bs);
+ const struct buf_2d *ref,
+ BLOCK_SIZE bs);
#if CONFIG_VPX_HIGHBITDEPTH
unsigned int vp10_high_get_sby_perpixel_variance(VP10_COMP *cpi,
- const struct buf_2d *ref,
- BLOCK_SIZE bs, int bd);
+ const struct buf_2d *ref,
+ BLOCK_SIZE bs, int bd);
#endif
void vp10_rd_pick_inter_mode_sb(struct VP10_COMP *cpi,
- struct TileDataEnc *tile_data,
- struct macroblock *x,
- int mi_row, int mi_col,
- struct RD_COST *rd_cost,
- BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
- int64_t best_rd_so_far);
+ struct TileDataEnc *tile_data,
+ struct macroblock *x, int mi_row, int mi_col,
+ struct RD_COST *rd_cost, BLOCK_SIZE bsize,
+ PICK_MODE_CONTEXT *ctx, int64_t best_rd_so_far);
-void vp10_rd_pick_inter_mode_sb_seg_skip(struct VP10_COMP *cpi,
- struct TileDataEnc *tile_data,
- struct macroblock *x,
- struct RD_COST *rd_cost,
- BLOCK_SIZE bsize,
- PICK_MODE_CONTEXT *ctx,
- int64_t best_rd_so_far);
+void vp10_rd_pick_inter_mode_sb_seg_skip(
+ struct VP10_COMP *cpi, struct TileDataEnc *tile_data, struct macroblock *x,
+ struct RD_COST *rd_cost, BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
+ int64_t best_rd_so_far);
int vp10_internal_image_edge(struct VP10_COMP *cpi);
int vp10_active_h_edge(struct VP10_COMP *cpi, int mi_row, int mi_step);
@@ -60,12 +55,11 @@
int vp10_active_edge_sb(struct VP10_COMP *cpi, int mi_row, int mi_col);
void vp10_rd_pick_inter_mode_sub8x8(struct VP10_COMP *cpi,
- struct TileDataEnc *tile_data,
- struct macroblock *x,
- int mi_row, int mi_col,
- struct RD_COST *rd_cost,
- BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
- int64_t best_rd_so_far);
+ struct TileDataEnc *tile_data,
+ struct macroblock *x, int mi_row,
+ int mi_col, struct RD_COST *rd_cost,
+ BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
+ int64_t best_rd_so_far);
#ifdef __cplusplus
} // extern "C"
diff --git a/vp10/encoder/resize.c b/vp10/encoder/resize.c
index 862ab02..b75ceb4 100644
--- a/vp10/encoder/resize.c
+++ b/vp10/encoder/resize.c
@@ -22,198 +22,118 @@
#include "vp10/common/common.h"
#include "vp10/encoder/resize.h"
-#define FILTER_BITS 7
+#define FILTER_BITS 7
-#define INTERP_TAPS 8
-#define SUBPEL_BITS 5
-#define SUBPEL_MASK ((1 << SUBPEL_BITS) - 1)
-#define INTERP_PRECISION_BITS 32
+#define INTERP_TAPS 8
+#define SUBPEL_BITS 5
+#define SUBPEL_MASK ((1 << SUBPEL_BITS) - 1)
+#define INTERP_PRECISION_BITS 32
typedef int16_t interp_kernel[INTERP_TAPS];
// Filters for interpolation (0.5-band) - note this also filters integer pels.
static const interp_kernel filteredinterp_filters500[(1 << SUBPEL_BITS)] = {
- {-3, 0, 35, 64, 35, 0, -3, 0},
- {-3, -1, 34, 64, 36, 1, -3, 0},
- {-3, -1, 32, 64, 38, 1, -3, 0},
- {-2, -2, 31, 63, 39, 2, -3, 0},
- {-2, -2, 29, 63, 41, 2, -3, 0},
- {-2, -2, 28, 63, 42, 3, -4, 0},
- {-2, -3, 27, 63, 43, 4, -4, 0},
- {-2, -3, 25, 62, 45, 5, -4, 0},
- {-2, -3, 24, 62, 46, 5, -4, 0},
- {-2, -3, 23, 61, 47, 6, -4, 0},
- {-2, -3, 21, 60, 49, 7, -4, 0},
- {-1, -4, 20, 60, 50, 8, -4, -1},
- {-1, -4, 19, 59, 51, 9, -4, -1},
- {-1, -4, 17, 58, 52, 10, -4, 0},
- {-1, -4, 16, 57, 53, 12, -4, -1},
- {-1, -4, 15, 56, 54, 13, -4, -1},
- {-1, -4, 14, 55, 55, 14, -4, -1},
- {-1, -4, 13, 54, 56, 15, -4, -1},
- {-1, -4, 12, 53, 57, 16, -4, -1},
- {0, -4, 10, 52, 58, 17, -4, -1},
- {-1, -4, 9, 51, 59, 19, -4, -1},
- {-1, -4, 8, 50, 60, 20, -4, -1},
- {0, -4, 7, 49, 60, 21, -3, -2},
- {0, -4, 6, 47, 61, 23, -3, -2},
- {0, -4, 5, 46, 62, 24, -3, -2},
- {0, -4, 5, 45, 62, 25, -3, -2},
- {0, -4, 4, 43, 63, 27, -3, -2},
- {0, -4, 3, 42, 63, 28, -2, -2},
- {0, -3, 2, 41, 63, 29, -2, -2},
- {0, -3, 2, 39, 63, 31, -2, -2},
- {0, -3, 1, 38, 64, 32, -1, -3},
- {0, -3, 1, 36, 64, 34, -1, -3}
+ { -3, 0, 35, 64, 35, 0, -3, 0 }, { -3, -1, 34, 64, 36, 1, -3, 0 },
+ { -3, -1, 32, 64, 38, 1, -3, 0 }, { -2, -2, 31, 63, 39, 2, -3, 0 },
+ { -2, -2, 29, 63, 41, 2, -3, 0 }, { -2, -2, 28, 63, 42, 3, -4, 0 },
+ { -2, -3, 27, 63, 43, 4, -4, 0 }, { -2, -3, 25, 62, 45, 5, -4, 0 },
+ { -2, -3, 24, 62, 46, 5, -4, 0 }, { -2, -3, 23, 61, 47, 6, -4, 0 },
+ { -2, -3, 21, 60, 49, 7, -4, 0 }, { -1, -4, 20, 60, 50, 8, -4, -1 },
+ { -1, -4, 19, 59, 51, 9, -4, -1 }, { -1, -4, 17, 58, 52, 10, -4, 0 },
+ { -1, -4, 16, 57, 53, 12, -4, -1 }, { -1, -4, 15, 56, 54, 13, -4, -1 },
+ { -1, -4, 14, 55, 55, 14, -4, -1 }, { -1, -4, 13, 54, 56, 15, -4, -1 },
+ { -1, -4, 12, 53, 57, 16, -4, -1 }, { 0, -4, 10, 52, 58, 17, -4, -1 },
+ { -1, -4, 9, 51, 59, 19, -4, -1 }, { -1, -4, 8, 50, 60, 20, -4, -1 },
+ { 0, -4, 7, 49, 60, 21, -3, -2 }, { 0, -4, 6, 47, 61, 23, -3, -2 },
+ { 0, -4, 5, 46, 62, 24, -3, -2 }, { 0, -4, 5, 45, 62, 25, -3, -2 },
+ { 0, -4, 4, 43, 63, 27, -3, -2 }, { 0, -4, 3, 42, 63, 28, -2, -2 },
+ { 0, -3, 2, 41, 63, 29, -2, -2 }, { 0, -3, 2, 39, 63, 31, -2, -2 },
+ { 0, -3, 1, 38, 64, 32, -1, -3 }, { 0, -3, 1, 36, 64, 34, -1, -3 }
};
// Filters for interpolation (0.625-band) - note this also filters integer pels.
static const interp_kernel filteredinterp_filters625[(1 << SUBPEL_BITS)] = {
- {-1, -8, 33, 80, 33, -8, -1, 0},
- {-1, -8, 30, 80, 35, -8, -1, 1},
- {-1, -8, 28, 80, 37, -7, -2, 1},
- {0, -8, 26, 79, 39, -7, -2, 1},
- {0, -8, 24, 79, 41, -7, -2, 1},
- {0, -8, 22, 78, 43, -6, -2, 1},
- {0, -8, 20, 78, 45, -5, -3, 1},
- {0, -8, 18, 77, 48, -5, -3, 1},
- {0, -8, 16, 76, 50, -4, -3, 1},
- {0, -8, 15, 75, 52, -3, -4, 1},
- {0, -7, 13, 74, 54, -3, -4, 1},
- {0, -7, 11, 73, 56, -2, -4, 1},
- {0, -7, 10, 71, 58, -1, -4, 1},
- {1, -7, 8, 70, 60, 0, -5, 1},
- {1, -6, 6, 68, 62, 1, -5, 1},
- {1, -6, 5, 67, 63, 2, -5, 1},
- {1, -6, 4, 65, 65, 4, -6, 1},
- {1, -5, 2, 63, 67, 5, -6, 1},
- {1, -5, 1, 62, 68, 6, -6, 1},
- {1, -5, 0, 60, 70, 8, -7, 1},
- {1, -4, -1, 58, 71, 10, -7, 0},
- {1, -4, -2, 56, 73, 11, -7, 0},
- {1, -4, -3, 54, 74, 13, -7, 0},
- {1, -4, -3, 52, 75, 15, -8, 0},
- {1, -3, -4, 50, 76, 16, -8, 0},
- {1, -3, -5, 48, 77, 18, -8, 0},
- {1, -3, -5, 45, 78, 20, -8, 0},
- {1, -2, -6, 43, 78, 22, -8, 0},
- {1, -2, -7, 41, 79, 24, -8, 0},
- {1, -2, -7, 39, 79, 26, -8, 0},
- {1, -2, -7, 37, 80, 28, -8, -1},
- {1, -1, -8, 35, 80, 30, -8, -1},
+ { -1, -8, 33, 80, 33, -8, -1, 0 }, { -1, -8, 30, 80, 35, -8, -1, 1 },
+ { -1, -8, 28, 80, 37, -7, -2, 1 }, { 0, -8, 26, 79, 39, -7, -2, 1 },
+ { 0, -8, 24, 79, 41, -7, -2, 1 }, { 0, -8, 22, 78, 43, -6, -2, 1 },
+ { 0, -8, 20, 78, 45, -5, -3, 1 }, { 0, -8, 18, 77, 48, -5, -3, 1 },
+ { 0, -8, 16, 76, 50, -4, -3, 1 }, { 0, -8, 15, 75, 52, -3, -4, 1 },
+ { 0, -7, 13, 74, 54, -3, -4, 1 }, { 0, -7, 11, 73, 56, -2, -4, 1 },
+ { 0, -7, 10, 71, 58, -1, -4, 1 }, { 1, -7, 8, 70, 60, 0, -5, 1 },
+ { 1, -6, 6, 68, 62, 1, -5, 1 }, { 1, -6, 5, 67, 63, 2, -5, 1 },
+ { 1, -6, 4, 65, 65, 4, -6, 1 }, { 1, -5, 2, 63, 67, 5, -6, 1 },
+ { 1, -5, 1, 62, 68, 6, -6, 1 }, { 1, -5, 0, 60, 70, 8, -7, 1 },
+ { 1, -4, -1, 58, 71, 10, -7, 0 }, { 1, -4, -2, 56, 73, 11, -7, 0 },
+ { 1, -4, -3, 54, 74, 13, -7, 0 }, { 1, -4, -3, 52, 75, 15, -8, 0 },
+ { 1, -3, -4, 50, 76, 16, -8, 0 }, { 1, -3, -5, 48, 77, 18, -8, 0 },
+ { 1, -3, -5, 45, 78, 20, -8, 0 }, { 1, -2, -6, 43, 78, 22, -8, 0 },
+ { 1, -2, -7, 41, 79, 24, -8, 0 }, { 1, -2, -7, 39, 79, 26, -8, 0 },
+ { 1, -2, -7, 37, 80, 28, -8, -1 }, { 1, -1, -8, 35, 80, 30, -8, -1 },
};
// Filters for interpolation (0.75-band) - note this also filters integer pels.
static const interp_kernel filteredinterp_filters750[(1 << SUBPEL_BITS)] = {
- {2, -11, 25, 96, 25, -11, 2, 0},
- {2, -11, 22, 96, 28, -11, 2, 0},
- {2, -10, 19, 95, 31, -11, 2, 0},
- {2, -10, 17, 95, 34, -12, 2, 0},
- {2, -9, 14, 94, 37, -12, 2, 0},
- {2, -8, 12, 93, 40, -12, 1, 0},
- {2, -8, 9, 92, 43, -12, 1, 1},
- {2, -7, 7, 91, 46, -12, 1, 0},
- {2, -7, 5, 90, 49, -12, 1, 0},
- {2, -6, 3, 88, 52, -12, 0, 1},
- {2, -5, 1, 86, 55, -12, 0, 1},
- {2, -5, -1, 84, 58, -11, 0, 1},
- {2, -4, -2, 82, 61, -11, -1, 1},
- {2, -4, -4, 80, 64, -10, -1, 1},
- {1, -3, -5, 77, 67, -9, -1, 1},
- {1, -3, -6, 75, 70, -8, -2, 1},
- {1, -2, -7, 72, 72, -7, -2, 1},
- {1, -2, -8, 70, 75, -6, -3, 1},
- {1, -1, -9, 67, 77, -5, -3, 1},
- {1, -1, -10, 64, 80, -4, -4, 2},
- {1, -1, -11, 61, 82, -2, -4, 2},
- {1, 0, -11, 58, 84, -1, -5, 2},
- {1, 0, -12, 55, 86, 1, -5, 2},
- {1, 0, -12, 52, 88, 3, -6, 2},
- {0, 1, -12, 49, 90, 5, -7, 2},
- {0, 1, -12, 46, 91, 7, -7, 2},
- {1, 1, -12, 43, 92, 9, -8, 2},
- {0, 1, -12, 40, 93, 12, -8, 2},
- {0, 2, -12, 37, 94, 14, -9, 2},
- {0, 2, -12, 34, 95, 17, -10, 2},
- {0, 2, -11, 31, 95, 19, -10, 2},
- {0, 2, -11, 28, 96, 22, -11, 2}
+ { 2, -11, 25, 96, 25, -11, 2, 0 }, { 2, -11, 22, 96, 28, -11, 2, 0 },
+ { 2, -10, 19, 95, 31, -11, 2, 0 }, { 2, -10, 17, 95, 34, -12, 2, 0 },
+ { 2, -9, 14, 94, 37, -12, 2, 0 }, { 2, -8, 12, 93, 40, -12, 1, 0 },
+ { 2, -8, 9, 92, 43, -12, 1, 1 }, { 2, -7, 7, 91, 46, -12, 1, 0 },
+ { 2, -7, 5, 90, 49, -12, 1, 0 }, { 2, -6, 3, 88, 52, -12, 0, 1 },
+ { 2, -5, 1, 86, 55, -12, 0, 1 }, { 2, -5, -1, 84, 58, -11, 0, 1 },
+ { 2, -4, -2, 82, 61, -11, -1, 1 }, { 2, -4, -4, 80, 64, -10, -1, 1 },
+ { 1, -3, -5, 77, 67, -9, -1, 1 }, { 1, -3, -6, 75, 70, -8, -2, 1 },
+ { 1, -2, -7, 72, 72, -7, -2, 1 }, { 1, -2, -8, 70, 75, -6, -3, 1 },
+ { 1, -1, -9, 67, 77, -5, -3, 1 }, { 1, -1, -10, 64, 80, -4, -4, 2 },
+ { 1, -1, -11, 61, 82, -2, -4, 2 }, { 1, 0, -11, 58, 84, -1, -5, 2 },
+ { 1, 0, -12, 55, 86, 1, -5, 2 }, { 1, 0, -12, 52, 88, 3, -6, 2 },
+ { 0, 1, -12, 49, 90, 5, -7, 2 }, { 0, 1, -12, 46, 91, 7, -7, 2 },
+ { 1, 1, -12, 43, 92, 9, -8, 2 }, { 0, 1, -12, 40, 93, 12, -8, 2 },
+ { 0, 2, -12, 37, 94, 14, -9, 2 }, { 0, 2, -12, 34, 95, 17, -10, 2 },
+ { 0, 2, -11, 31, 95, 19, -10, 2 }, { 0, 2, -11, 28, 96, 22, -11, 2 }
};
// Filters for interpolation (0.875-band) - note this also filters integer pels.
static const interp_kernel filteredinterp_filters875[(1 << SUBPEL_BITS)] = {
- {3, -8, 13, 112, 13, -8, 3, 0},
- {3, -7, 10, 112, 17, -9, 3, -1},
- {2, -6, 7, 111, 21, -9, 3, -1},
- {2, -5, 4, 111, 24, -10, 3, -1},
- {2, -4, 1, 110, 28, -11, 3, -1},
- {1, -3, -1, 108, 32, -12, 4, -1},
- {1, -2, -3, 106, 36, -13, 4, -1},
- {1, -1, -6, 105, 40, -14, 4, -1},
- {1, -1, -7, 102, 44, -14, 4, -1},
- {1, 0, -9, 100, 48, -15, 4, -1},
- {1, 1, -11, 97, 53, -16, 4, -1},
- {0, 1, -12, 95, 57, -16, 4, -1},
- {0, 2, -13, 91, 61, -16, 4, -1},
- {0, 2, -14, 88, 65, -16, 4, -1},
- {0, 3, -15, 84, 69, -17, 4, 0},
- {0, 3, -16, 81, 73, -16, 3, 0},
- {0, 3, -16, 77, 77, -16, 3, 0},
- {0, 3, -16, 73, 81, -16, 3, 0},
- {0, 4, -17, 69, 84, -15, 3, 0},
- {-1, 4, -16, 65, 88, -14, 2, 0},
- {-1, 4, -16, 61, 91, -13, 2, 0},
- {-1, 4, -16, 57, 95, -12, 1, 0},
- {-1, 4, -16, 53, 97, -11, 1, 1},
- {-1, 4, -15, 48, 100, -9, 0, 1},
- {-1, 4, -14, 44, 102, -7, -1, 1},
- {-1, 4, -14, 40, 105, -6, -1, 1},
- {-1, 4, -13, 36, 106, -3, -2, 1},
- {-1, 4, -12, 32, 108, -1, -3, 1},
- {-1, 3, -11, 28, 110, 1, -4, 2},
- {-1, 3, -10, 24, 111, 4, -5, 2},
- {-1, 3, -9, 21, 111, 7, -6, 2},
- {-1, 3, -9, 17, 112, 10, -7, 3}
+ { 3, -8, 13, 112, 13, -8, 3, 0 }, { 3, -7, 10, 112, 17, -9, 3, -1 },
+ { 2, -6, 7, 111, 21, -9, 3, -1 }, { 2, -5, 4, 111, 24, -10, 3, -1 },
+ { 2, -4, 1, 110, 28, -11, 3, -1 }, { 1, -3, -1, 108, 32, -12, 4, -1 },
+ { 1, -2, -3, 106, 36, -13, 4, -1 }, { 1, -1, -6, 105, 40, -14, 4, -1 },
+ { 1, -1, -7, 102, 44, -14, 4, -1 }, { 1, 0, -9, 100, 48, -15, 4, -1 },
+ { 1, 1, -11, 97, 53, -16, 4, -1 }, { 0, 1, -12, 95, 57, -16, 4, -1 },
+ { 0, 2, -13, 91, 61, -16, 4, -1 }, { 0, 2, -14, 88, 65, -16, 4, -1 },
+ { 0, 3, -15, 84, 69, -17, 4, 0 }, { 0, 3, -16, 81, 73, -16, 3, 0 },
+ { 0, 3, -16, 77, 77, -16, 3, 0 }, { 0, 3, -16, 73, 81, -16, 3, 0 },
+ { 0, 4, -17, 69, 84, -15, 3, 0 }, { -1, 4, -16, 65, 88, -14, 2, 0 },
+ { -1, 4, -16, 61, 91, -13, 2, 0 }, { -1, 4, -16, 57, 95, -12, 1, 0 },
+ { -1, 4, -16, 53, 97, -11, 1, 1 }, { -1, 4, -15, 48, 100, -9, 0, 1 },
+ { -1, 4, -14, 44, 102, -7, -1, 1 }, { -1, 4, -14, 40, 105, -6, -1, 1 },
+ { -1, 4, -13, 36, 106, -3, -2, 1 }, { -1, 4, -12, 32, 108, -1, -3, 1 },
+ { -1, 3, -11, 28, 110, 1, -4, 2 }, { -1, 3, -10, 24, 111, 4, -5, 2 },
+ { -1, 3, -9, 21, 111, 7, -6, 2 }, { -1, 3, -9, 17, 112, 10, -7, 3 }
};
// Filters for interpolation (full-band) - no filtering for integer pixels
static const interp_kernel filteredinterp_filters1000[(1 << SUBPEL_BITS)] = {
- {0, 0, 0, 128, 0, 0, 0, 0},
- {0, 1, -3, 128, 3, -1, 0, 0},
- {-1, 2, -6, 127, 7, -2, 1, 0},
- {-1, 3, -9, 126, 12, -4, 1, 0},
- {-1, 4, -12, 125, 16, -5, 1, 0},
- {-1, 4, -14, 123, 20, -6, 2, 0},
- {-1, 5, -15, 120, 25, -8, 2, 0},
- {-1, 5, -17, 118, 30, -9, 3, -1},
- {-1, 6, -18, 114, 35, -10, 3, -1},
- {-1, 6, -19, 111, 41, -12, 3, -1},
- {-1, 6, -20, 107, 46, -13, 4, -1},
- {-1, 6, -21, 103, 52, -14, 4, -1},
- {-1, 6, -21, 99, 57, -16, 5, -1},
- {-1, 6, -21, 94, 63, -17, 5, -1},
- {-1, 6, -20, 89, 68, -18, 5, -1},
- {-1, 6, -20, 84, 73, -19, 6, -1},
- {-1, 6, -20, 79, 79, -20, 6, -1},
- {-1, 6, -19, 73, 84, -20, 6, -1},
- {-1, 5, -18, 68, 89, -20, 6, -1},
- {-1, 5, -17, 63, 94, -21, 6, -1},
- {-1, 5, -16, 57, 99, -21, 6, -1},
- {-1, 4, -14, 52, 103, -21, 6, -1},
- {-1, 4, -13, 46, 107, -20, 6, -1},
- {-1, 3, -12, 41, 111, -19, 6, -1},
- {-1, 3, -10, 35, 114, -18, 6, -1},
- {-1, 3, -9, 30, 118, -17, 5, -1},
- {0, 2, -8, 25, 120, -15, 5, -1},
- {0, 2, -6, 20, 123, -14, 4, -1},
- {0, 1, -5, 16, 125, -12, 4, -1},
- {0, 1, -4, 12, 126, -9, 3, -1},
- {0, 1, -2, 7, 127, -6, 2, -1},
- {0, 0, -1, 3, 128, -3, 1, 0}
+ { 0, 0, 0, 128, 0, 0, 0, 0 }, { 0, 1, -3, 128, 3, -1, 0, 0 },
+ { -1, 2, -6, 127, 7, -2, 1, 0 }, { -1, 3, -9, 126, 12, -4, 1, 0 },
+ { -1, 4, -12, 125, 16, -5, 1, 0 }, { -1, 4, -14, 123, 20, -6, 2, 0 },
+ { -1, 5, -15, 120, 25, -8, 2, 0 }, { -1, 5, -17, 118, 30, -9, 3, -1 },
+ { -1, 6, -18, 114, 35, -10, 3, -1 }, { -1, 6, -19, 111, 41, -12, 3, -1 },
+ { -1, 6, -20, 107, 46, -13, 4, -1 }, { -1, 6, -21, 103, 52, -14, 4, -1 },
+ { -1, 6, -21, 99, 57, -16, 5, -1 }, { -1, 6, -21, 94, 63, -17, 5, -1 },
+ { -1, 6, -20, 89, 68, -18, 5, -1 }, { -1, 6, -20, 84, 73, -19, 6, -1 },
+ { -1, 6, -20, 79, 79, -20, 6, -1 }, { -1, 6, -19, 73, 84, -20, 6, -1 },
+ { -1, 5, -18, 68, 89, -20, 6, -1 }, { -1, 5, -17, 63, 94, -21, 6, -1 },
+ { -1, 5, -16, 57, 99, -21, 6, -1 }, { -1, 4, -14, 52, 103, -21, 6, -1 },
+ { -1, 4, -13, 46, 107, -20, 6, -1 }, { -1, 3, -12, 41, 111, -19, 6, -1 },
+ { -1, 3, -10, 35, 114, -18, 6, -1 }, { -1, 3, -9, 30, 118, -17, 5, -1 },
+ { 0, 2, -8, 25, 120, -15, 5, -1 }, { 0, 2, -6, 20, 123, -14, 4, -1 },
+ { 0, 1, -5, 16, 125, -12, 4, -1 }, { 0, 1, -4, 12, 126, -9, 3, -1 },
+ { 0, 1, -2, 7, 127, -6, 2, -1 }, { 0, 0, -1, 3, 128, -3, 1, 0 }
};
// Filters for factor of 2 downsampling.
-static const int16_t vp10_down2_symeven_half_filter[] = {56, 12, -3, -1};
-static const int16_t vp10_down2_symodd_half_filter[] = {64, 35, 0, -3};
+static const int16_t vp10_down2_symeven_half_filter[] = { 56, 12, -3, -1 };
+static const int16_t vp10_down2_symodd_half_filter[] = { 64, 35, 0, -3 };
static const interp_kernel *choose_interp_filter(int inlength, int outlength) {
int outlength16 = outlength * 16;
@@ -231,11 +151,14 @@
static void interpolate(const uint8_t *const input, int inlength,
uint8_t *output, int outlength) {
- const int64_t delta = (((uint64_t)inlength << 32) + outlength / 2) /
- outlength;
- const int64_t offset = inlength > outlength ?
- (((int64_t)(inlength - outlength) << 31) + outlength / 2) / outlength :
- -(((int64_t)(outlength - inlength) << 31) + outlength / 2) / outlength;
+ const int64_t delta =
+ (((uint64_t)inlength << 32) + outlength / 2) / outlength;
+ const int64_t offset =
+ inlength > outlength
+ ? (((int64_t)(inlength - outlength) << 31) + outlength / 2) /
+ outlength
+ : -(((int64_t)(outlength - inlength) << 31) + outlength / 2) /
+ outlength;
uint8_t *optr = output;
int x, x1, x2, sum, k, int_pel, sub_pel;
int64_t y;
@@ -252,8 +175,8 @@
x1 = x;
x = outlength - 1;
y = delta * x + offset;
- while ((y >> INTERP_PRECISION_BITS) +
- (int64_t)(INTERP_TAPS / 2) >= inlength) {
+ while ((y >> INTERP_PRECISION_BITS) + (int64_t)(INTERP_TAPS / 2) >=
+ inlength) {
x--;
y -= delta;
}
@@ -267,8 +190,8 @@
sum = 0;
for (k = 0; k < INTERP_TAPS; ++k) {
const int pk = int_pel - INTERP_TAPS / 2 + 1 + k;
- sum += filter[k] * input[(pk < 0 ? 0 :
- (pk >= inlength ? inlength - 1 : pk))];
+ sum += filter[k] *
+ input[(pk < 0 ? 0 : (pk >= inlength ? inlength - 1 : pk))];
}
*optr++ = clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS));
}
@@ -281,9 +204,9 @@
filter = interp_filters[sub_pel];
sum = 0;
for (k = 0; k < INTERP_TAPS; ++k)
- sum += filter[k] * input[(int_pel - INTERP_TAPS / 2 + 1 + k < 0 ?
- 0 :
- int_pel - INTERP_TAPS / 2 + 1 + k)];
+ sum += filter[k] * input[(int_pel - INTERP_TAPS / 2 + 1 + k < 0
+ ? 0
+ : int_pel - INTERP_TAPS / 2 + 1 + k)];
*optr++ = clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS));
}
// Middle part.
@@ -305,9 +228,9 @@
filter = interp_filters[sub_pel];
sum = 0;
for (k = 0; k < INTERP_TAPS; ++k)
- sum += filter[k] * input[(int_pel - INTERP_TAPS / 2 + 1 + k >=
- inlength ? inlength - 1 :
- int_pel - INTERP_TAPS / 2 + 1 + k)];
+ sum += filter[k] * input[(int_pel - INTERP_TAPS / 2 + 1 + k >= inlength
+ ? inlength - 1
+ : int_pel - INTERP_TAPS / 2 + 1 + k)];
*optr++ = clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS));
}
}
@@ -331,7 +254,7 @@
for (j = 0; j < filter_len_half; ++j) {
sum += (input[(i - j < 0 ? 0 : i - j)] +
input[(i + 1 + j >= length ? length - 1 : i + 1 + j)]) *
- filter[j];
+ filter[j];
}
sum >>= FILTER_BITS;
*optr++ = clip_pixel(sum);
@@ -361,7 +284,7 @@
for (j = 0; j < filter_len_half; ++j) {
sum += (input[i - j] +
input[(i + 1 + j >= length ? length - 1 : i + 1 + j)]) *
- filter[j];
+ filter[j];
}
sum >>= FILTER_BITS;
*optr++ = clip_pixel(sum);
@@ -387,7 +310,7 @@
for (j = 1; j < filter_len_half; ++j) {
sum += (input[(i - j < 0 ? 0 : i - j)] +
input[(i + j >= length ? length - 1 : i + j)]) *
- filter[j];
+ filter[j];
}
sum >>= FILTER_BITS;
*optr++ = clip_pixel(sum);
@@ -416,7 +339,7 @@
int sum = (1 << (FILTER_BITS - 1)) + input[i] * filter[0];
for (j = 1; j < filter_len_half; ++j) {
sum += (input[i - j] + input[(i + j >= length ? length - 1 : i + j)]) *
- filter[j];
+ filter[j];
}
sum >>= FILTER_BITS;
*optr++ = clip_pixel(sum);
@@ -426,8 +349,7 @@
static int get_down2_length(int length, int steps) {
int s;
- for (s = 0; s < steps; ++s)
- length = (length + 1) >> 1;
+ for (s = 0; s < steps; ++s) length = (length + 1) >> 1;
return length;
}
@@ -441,11 +363,8 @@
return steps;
}
-static void resize_multistep(const uint8_t *const input,
- int length,
- uint8_t *output,
- int olength,
- uint8_t *buf) {
+static void resize_multistep(const uint8_t *const input, int length,
+ uint8_t *output, int olength, uint8_t *buf) {
int steps;
if (length == olength) {
memcpy(output, input, sizeof(output[0]) * length);
@@ -482,8 +401,7 @@
if (filteredlength != olength) {
interpolate(out, filteredlength, output, olength);
}
- if (tmpbuf)
- free(tmpbuf);
+ if (tmpbuf) free(tmpbuf);
} else {
interpolate(input, length, output, olength);
}
@@ -507,26 +425,21 @@
}
}
-void vp10_resize_plane(const uint8_t *const input,
- int height,
- int width,
- int in_stride,
- uint8_t *output,
- int height2,
- int width2,
- int out_stride) {
+void vp10_resize_plane(const uint8_t *const input, int height, int width,
+ int in_stride, uint8_t *output, int height2, int width2,
+ int out_stride) {
int i;
uint8_t *intbuf = (uint8_t *)malloc(sizeof(uint8_t) * width2 * height);
- uint8_t *tmpbuf = (uint8_t *)malloc(sizeof(uint8_t) *
- (width < height ? height : width));
+ uint8_t *tmpbuf =
+ (uint8_t *)malloc(sizeof(uint8_t) * (width < height ? height : width));
uint8_t *arrbuf = (uint8_t *)malloc(sizeof(uint8_t) * (height + height2));
assert(width > 0);
assert(height > 0);
assert(width2 > 0);
assert(height2 > 0);
for (i = 0; i < height; ++i)
- resize_multistep(input + in_stride * i, width,
- intbuf + width2 * i, width2, tmpbuf);
+ resize_multistep(input + in_stride * i, width, intbuf + width2 * i, width2,
+ tmpbuf);
for (i = 0; i < width2; ++i) {
fill_col_to_arr(intbuf + i, width2, height, arrbuf);
resize_multistep(arrbuf, height, arrbuf + height, height2, tmpbuf);
@@ -542,9 +455,12 @@
uint16_t *output, int outlength, int bd) {
const int64_t delta =
(((uint64_t)inlength << 32) + outlength / 2) / outlength;
- const int64_t offset = inlength > outlength ?
- (((int64_t)(inlength - outlength) << 31) + outlength / 2) / outlength :
- -(((int64_t)(outlength - inlength) << 31) + outlength / 2) / outlength;
+ const int64_t offset =
+ inlength > outlength
+ ? (((int64_t)(inlength - outlength) << 31) + outlength / 2) /
+ outlength
+ : -(((int64_t)(outlength - inlength) << 31) + outlength / 2) /
+ outlength;
uint16_t *optr = output;
int x, x1, x2, sum, k, int_pel, sub_pel;
int64_t y;
@@ -561,8 +477,8 @@
x1 = x;
x = outlength - 1;
y = delta * x + offset;
- while ((y >> INTERP_PRECISION_BITS) +
- (int64_t)(INTERP_TAPS / 2) >= inlength) {
+ while ((y >> INTERP_PRECISION_BITS) + (int64_t)(INTERP_TAPS / 2) >=
+ inlength) {
x--;
y -= delta;
}
@@ -577,7 +493,7 @@
for (k = 0; k < INTERP_TAPS; ++k) {
const int pk = int_pel - INTERP_TAPS / 2 + 1 + k;
sum += filter[k] *
- input[(pk < 0 ? 0 : (pk >= inlength ? inlength - 1 : pk))];
+ input[(pk < 0 ? 0 : (pk >= inlength ? inlength - 1 : pk))];
}
*optr++ = clip_pixel_highbd(ROUND_POWER_OF_TWO(sum, FILTER_BITS), bd);
}
@@ -590,9 +506,9 @@
filter = interp_filters[sub_pel];
sum = 0;
for (k = 0; k < INTERP_TAPS; ++k)
- sum += filter[k] *
- input[(int_pel - INTERP_TAPS / 2 + 1 + k < 0 ?
- 0 : int_pel - INTERP_TAPS / 2 + 1 + k)];
+ sum += filter[k] * input[(int_pel - INTERP_TAPS / 2 + 1 + k < 0
+ ? 0
+ : int_pel - INTERP_TAPS / 2 + 1 + k)];
*optr++ = clip_pixel_highbd(ROUND_POWER_OF_TWO(sum, FILTER_BITS), bd);
}
// Middle part.
@@ -614,9 +530,9 @@
filter = interp_filters[sub_pel];
sum = 0;
for (k = 0; k < INTERP_TAPS; ++k)
- sum += filter[k] * input[(int_pel - INTERP_TAPS / 2 + 1 + k >=
- inlength ? inlength - 1 :
- int_pel - INTERP_TAPS / 2 + 1 + k)];
+ sum += filter[k] * input[(int_pel - INTERP_TAPS / 2 + 1 + k >= inlength
+ ? inlength - 1
+ : int_pel - INTERP_TAPS / 2 + 1 + k)];
*optr++ = clip_pixel_highbd(ROUND_POWER_OF_TWO(sum, FILTER_BITS), bd);
}
}
@@ -640,7 +556,7 @@
for (j = 0; j < filter_len_half; ++j) {
sum += (input[(i - j < 0 ? 0 : i - j)] +
input[(i + 1 + j >= length ? length - 1 : i + 1 + j)]) *
- filter[j];
+ filter[j];
}
sum >>= FILTER_BITS;
*optr++ = clip_pixel_highbd(sum, bd);
@@ -670,7 +586,7 @@
for (j = 0; j < filter_len_half; ++j) {
sum += (input[i - j] +
input[(i + 1 + j >= length ? length - 1 : i + 1 + j)]) *
- filter[j];
+ filter[j];
}
sum >>= FILTER_BITS;
*optr++ = clip_pixel_highbd(sum, bd);
@@ -679,7 +595,7 @@
}
static void highbd_down2_symodd(const uint16_t *const input, int length,
- uint16_t *output, int bd) {
+ uint16_t *output, int bd) {
// Actual filter len = 2 * filter_len_half - 1.
static const int16_t *filter = vp10_down2_symodd_half_filter;
const int filter_len_half = sizeof(vp10_down2_symodd_half_filter) / 2;
@@ -696,7 +612,7 @@
for (j = 1; j < filter_len_half; ++j) {
sum += (input[(i - j < 0 ? 0 : i - j)] +
input[(i + j >= length ? length - 1 : i + j)]) *
- filter[j];
+ filter[j];
}
sum >>= FILTER_BITS;
*optr++ = clip_pixel_highbd(sum, bd);
@@ -725,7 +641,7 @@
int sum = (1 << (FILTER_BITS - 1)) + input[i] * filter[0];
for (j = 1; j < filter_len_half; ++j) {
sum += (input[i - j] + input[(i + j >= length ? length - 1 : i + j)]) *
- filter[j];
+ filter[j];
}
sum >>= FILTER_BITS;
*optr++ = clip_pixel_highbd(sum, bd);
@@ -733,12 +649,9 @@
}
}
-static void highbd_resize_multistep(const uint16_t *const input,
- int length,
- uint16_t *output,
- int olength,
- uint16_t *buf,
- int bd) {
+static void highbd_resize_multistep(const uint16_t *const input, int length,
+ uint16_t *output, int olength,
+ uint16_t *buf, int bd) {
int steps;
if (length == olength) {
memcpy(output, input, sizeof(output[0]) * length);
@@ -775,8 +688,7 @@
if (filteredlength != olength) {
highbd_interpolate(out, filteredlength, output, olength, bd);
}
- if (tmpbuf)
- free(tmpbuf);
+ if (tmpbuf) free(tmpbuf);
} else {
highbd_interpolate(input, length, output, olength, bd);
}
@@ -802,19 +714,13 @@
}
}
-void vp10_highbd_resize_plane(const uint8_t *const input,
- int height,
- int width,
- int in_stride,
- uint8_t *output,
- int height2,
- int width2,
- int out_stride,
- int bd) {
+void vp10_highbd_resize_plane(const uint8_t *const input, int height, int width,
+ int in_stride, uint8_t *output, int height2,
+ int width2, int out_stride, int bd) {
int i;
uint16_t *intbuf = (uint16_t *)malloc(sizeof(uint16_t) * width2 * height);
- uint16_t *tmpbuf = (uint16_t *)malloc(sizeof(uint16_t) *
- (width < height ? height : width));
+ uint16_t *tmpbuf =
+ (uint16_t *)malloc(sizeof(uint16_t) * (width < height ? height : width));
uint16_t *arrbuf = (uint16_t *)malloc(sizeof(uint16_t) * (height + height2));
for (i = 0; i < height; ++i) {
highbd_resize_multistep(CONVERT_TO_SHORTPTR(input + in_stride * i), width,
@@ -833,96 +739,82 @@
}
#endif // CONFIG_VPX_HIGHBITDEPTH
-void vp10_resize_frame420(const uint8_t *const y,
- int y_stride,
- const uint8_t *const u, const uint8_t *const v,
- int uv_stride,
- int height, int width,
- uint8_t *oy, int oy_stride,
- uint8_t *ou, uint8_t *ov, int ouv_stride,
- int oheight, int owidth) {
- vp10_resize_plane(y, height, width, y_stride,
- oy, oheight, owidth, oy_stride);
- vp10_resize_plane(u, height / 2, width / 2, uv_stride,
- ou, oheight / 2, owidth / 2, ouv_stride);
- vp10_resize_plane(v, height / 2, width / 2, uv_stride,
- ov, oheight / 2, owidth / 2, ouv_stride);
+void vp10_resize_frame420(const uint8_t *const y, int y_stride,
+ const uint8_t *const u, const uint8_t *const v,
+ int uv_stride, int height, int width, uint8_t *oy,
+ int oy_stride, uint8_t *ou, uint8_t *ov,
+ int ouv_stride, int oheight, int owidth) {
+ vp10_resize_plane(y, height, width, y_stride, oy, oheight, owidth, oy_stride);
+ vp10_resize_plane(u, height / 2, width / 2, uv_stride, ou, oheight / 2,
+ owidth / 2, ouv_stride);
+ vp10_resize_plane(v, height / 2, width / 2, uv_stride, ov, oheight / 2,
+ owidth / 2, ouv_stride);
}
void vp10_resize_frame422(const uint8_t *const y, int y_stride,
- const uint8_t *const u, const uint8_t *const v,
- int uv_stride,
- int height, int width,
- uint8_t *oy, int oy_stride,
- uint8_t *ou, uint8_t *ov, int ouv_stride,
- int oheight, int owidth) {
- vp10_resize_plane(y, height, width, y_stride,
- oy, oheight, owidth, oy_stride);
- vp10_resize_plane(u, height, width / 2, uv_stride,
- ou, oheight, owidth / 2, ouv_stride);
- vp10_resize_plane(v, height, width / 2, uv_stride,
- ov, oheight, owidth / 2, ouv_stride);
+ const uint8_t *const u, const uint8_t *const v,
+ int uv_stride, int height, int width, uint8_t *oy,
+ int oy_stride, uint8_t *ou, uint8_t *ov,
+ int ouv_stride, int oheight, int owidth) {
+ vp10_resize_plane(y, height, width, y_stride, oy, oheight, owidth, oy_stride);
+ vp10_resize_plane(u, height, width / 2, uv_stride, ou, oheight, owidth / 2,
+ ouv_stride);
+ vp10_resize_plane(v, height, width / 2, uv_stride, ov, oheight, owidth / 2,
+ ouv_stride);
}
void vp10_resize_frame444(const uint8_t *const y, int y_stride,
- const uint8_t *const u, const uint8_t *const v,
- int uv_stride,
- int height, int width,
- uint8_t *oy, int oy_stride,
- uint8_t *ou, uint8_t *ov, int ouv_stride,
- int oheight, int owidth) {
- vp10_resize_plane(y, height, width, y_stride,
- oy, oheight, owidth, oy_stride);
- vp10_resize_plane(u, height, width, uv_stride,
- ou, oheight, owidth, ouv_stride);
- vp10_resize_plane(v, height, width, uv_stride,
- ov, oheight, owidth, ouv_stride);
+ const uint8_t *const u, const uint8_t *const v,
+ int uv_stride, int height, int width, uint8_t *oy,
+ int oy_stride, uint8_t *ou, uint8_t *ov,
+ int ouv_stride, int oheight, int owidth) {
+ vp10_resize_plane(y, height, width, y_stride, oy, oheight, owidth, oy_stride);
+ vp10_resize_plane(u, height, width, uv_stride, ou, oheight, owidth,
+ ouv_stride);
+ vp10_resize_plane(v, height, width, uv_stride, ov, oheight, owidth,
+ ouv_stride);
}
#if CONFIG_VPX_HIGHBITDEPTH
-void vp10_highbd_resize_frame420(const uint8_t *const y,
- int y_stride,
- const uint8_t *const u, const uint8_t *const v,
- int uv_stride,
- int height, int width,
- uint8_t *oy, int oy_stride,
- uint8_t *ou, uint8_t *ov, int ouv_stride,
- int oheight, int owidth, int bd) {
- vp10_highbd_resize_plane(y, height, width, y_stride,
- oy, oheight, owidth, oy_stride, bd);
- vp10_highbd_resize_plane(u, height / 2, width / 2, uv_stride,
- ou, oheight / 2, owidth / 2, ouv_stride, bd);
- vp10_highbd_resize_plane(v, height / 2, width / 2, uv_stride,
- ov, oheight / 2, owidth / 2, ouv_stride, bd);
+void vp10_highbd_resize_frame420(const uint8_t *const y, int y_stride,
+ const uint8_t *const u, const uint8_t *const v,
+ int uv_stride, int height, int width,
+ uint8_t *oy, int oy_stride, uint8_t *ou,
+ uint8_t *ov, int ouv_stride, int oheight,
+ int owidth, int bd) {
+ vp10_highbd_resize_plane(y, height, width, y_stride, oy, oheight, owidth,
+ oy_stride, bd);
+ vp10_highbd_resize_plane(u, height / 2, width / 2, uv_stride, ou, oheight / 2,
+ owidth / 2, ouv_stride, bd);
+ vp10_highbd_resize_plane(v, height / 2, width / 2, uv_stride, ov, oheight / 2,
+ owidth / 2, ouv_stride, bd);
}
void vp10_highbd_resize_frame422(const uint8_t *const y, int y_stride,
- const uint8_t *const u, const uint8_t *const v,
- int uv_stride,
- int height, int width,
- uint8_t *oy, int oy_stride,
- uint8_t *ou, uint8_t *ov, int ouv_stride,
- int oheight, int owidth, int bd) {
- vp10_highbd_resize_plane(y, height, width, y_stride,
- oy, oheight, owidth, oy_stride, bd);
- vp10_highbd_resize_plane(u, height, width / 2, uv_stride,
- ou, oheight, owidth / 2, ouv_stride, bd);
- vp10_highbd_resize_plane(v, height, width / 2, uv_stride,
- ov, oheight, owidth / 2, ouv_stride, bd);
+ const uint8_t *const u, const uint8_t *const v,
+ int uv_stride, int height, int width,
+ uint8_t *oy, int oy_stride, uint8_t *ou,
+ uint8_t *ov, int ouv_stride, int oheight,
+ int owidth, int bd) {
+ vp10_highbd_resize_plane(y, height, width, y_stride, oy, oheight, owidth,
+ oy_stride, bd);
+ vp10_highbd_resize_plane(u, height, width / 2, uv_stride, ou, oheight,
+ owidth / 2, ouv_stride, bd);
+ vp10_highbd_resize_plane(v, height, width / 2, uv_stride, ov, oheight,
+ owidth / 2, ouv_stride, bd);
}
void vp10_highbd_resize_frame444(const uint8_t *const y, int y_stride,
- const uint8_t *const u, const uint8_t *const v,
- int uv_stride,
- int height, int width,
- uint8_t *oy, int oy_stride,
- uint8_t *ou, uint8_t *ov, int ouv_stride,
- int oheight, int owidth, int bd) {
- vp10_highbd_resize_plane(y, height, width, y_stride,
- oy, oheight, owidth, oy_stride, bd);
- vp10_highbd_resize_plane(u, height, width, uv_stride,
- ou, oheight, owidth, ouv_stride, bd);
- vp10_highbd_resize_plane(v, height, width, uv_stride,
- ov, oheight, owidth, ouv_stride, bd);
+ const uint8_t *const u, const uint8_t *const v,
+ int uv_stride, int height, int width,
+ uint8_t *oy, int oy_stride, uint8_t *ou,
+ uint8_t *ov, int ouv_stride, int oheight,
+ int owidth, int bd) {
+ vp10_highbd_resize_plane(y, height, width, y_stride, oy, oheight, owidth,
+ oy_stride, bd);
+ vp10_highbd_resize_plane(u, height, width, uv_stride, ou, oheight, owidth,
+ ouv_stride, bd);
+ vp10_highbd_resize_plane(v, height, width, uv_stride, ov, oheight, owidth,
+ ouv_stride, bd);
}
#endif // CONFIG_VPX_HIGHBITDEPTH
diff --git a/vp10/encoder/resize.h b/vp10/encoder/resize.h
index a85735b..9dc34ef 100644
--- a/vp10/encoder/resize.h
+++ b/vp10/encoder/resize.h
@@ -18,116 +18,51 @@
extern "C" {
#endif
-void vp10_resize_plane(const uint8_t *const input,
- int height,
- int width,
- int in_stride,
- uint8_t *output,
- int height2,
- int width2,
- int out_stride);
-void vp10_resize_frame420(const uint8_t *const y,
- int y_stride,
- const uint8_t *const u,
- const uint8_t *const v,
- int uv_stride,
- int height,
- int width,
- uint8_t *oy,
- int oy_stride,
- uint8_t *ou,
- uint8_t *ov,
- int ouv_stride,
- int oheight,
- int owidth);
-void vp10_resize_frame422(const uint8_t *const y,
- int y_stride,
- const uint8_t *const u,
- const uint8_t *const v,
- int uv_stride,
- int height,
- int width,
- uint8_t *oy,
- int oy_stride,
- uint8_t *ou,
- uint8_t *ov,
- int ouv_stride,
- int oheight,
- int owidth);
-void vp10_resize_frame444(const uint8_t *const y,
- int y_stride,
- const uint8_t *const u,
- const uint8_t *const v,
- int uv_stride,
- int height,
- int width,
- uint8_t *oy,
- int oy_stride,
- uint8_t *ou,
- uint8_t *ov,
- int ouv_stride,
- int oheight,
- int owidth);
+void vp10_resize_plane(const uint8_t *const input, int height, int width,
+ int in_stride, uint8_t *output, int height2, int width2,
+ int out_stride);
+void vp10_resize_frame420(const uint8_t *const y, int y_stride,
+ const uint8_t *const u, const uint8_t *const v,
+ int uv_stride, int height, int width, uint8_t *oy,
+ int oy_stride, uint8_t *ou, uint8_t *ov,
+ int ouv_stride, int oheight, int owidth);
+void vp10_resize_frame422(const uint8_t *const y, int y_stride,
+ const uint8_t *const u, const uint8_t *const v,
+ int uv_stride, int height, int width, uint8_t *oy,
+ int oy_stride, uint8_t *ou, uint8_t *ov,
+ int ouv_stride, int oheight, int owidth);
+void vp10_resize_frame444(const uint8_t *const y, int y_stride,
+ const uint8_t *const u, const uint8_t *const v,
+ int uv_stride, int height, int width, uint8_t *oy,
+ int oy_stride, uint8_t *ou, uint8_t *ov,
+ int ouv_stride, int oheight, int owidth);
#if CONFIG_VPX_HIGHBITDEPTH
-void vp10_highbd_resize_plane(const uint8_t *const input,
- int height,
- int width,
- int in_stride,
- uint8_t *output,
- int height2,
- int width2,
- int out_stride,
- int bd);
-void vp10_highbd_resize_frame420(const uint8_t *const y,
- int y_stride,
- const uint8_t *const u,
- const uint8_t *const v,
- int uv_stride,
- int height,
- int width,
- uint8_t *oy,
- int oy_stride,
- uint8_t *ou,
- uint8_t *ov,
- int ouv_stride,
- int oheight,
- int owidth,
- int bd);
-void vp10_highbd_resize_frame422(const uint8_t *const y,
- int y_stride,
- const uint8_t *const u,
- const uint8_t *const v,
- int uv_stride,
- int height,
- int width,
- uint8_t *oy,
- int oy_stride,
- uint8_t *ou,
- uint8_t *ov,
- int ouv_stride,
- int oheight,
- int owidth,
- int bd);
-void vp10_highbd_resize_frame444(const uint8_t *const y,
- int y_stride,
- const uint8_t *const u,
- const uint8_t *const v,
- int uv_stride,
- int height,
- int width,
- uint8_t *oy,
- int oy_stride,
- uint8_t *ou,
- uint8_t *ov,
- int ouv_stride,
- int oheight,
- int owidth,
- int bd);
-#endif // CONFIG_VPX_HIGHBITDEPTH
+void vp10_highbd_resize_plane(const uint8_t *const input, int height, int width,
+ int in_stride, uint8_t *output, int height2,
+ int width2, int out_stride, int bd);
+void vp10_highbd_resize_frame420(const uint8_t *const y, int y_stride,
+ const uint8_t *const u, const uint8_t *const v,
+ int uv_stride, int height, int width,
+ uint8_t *oy, int oy_stride, uint8_t *ou,
+ uint8_t *ov, int ouv_stride, int oheight,
+ int owidth, int bd);
+void vp10_highbd_resize_frame422(const uint8_t *const y, int y_stride,
+ const uint8_t *const u, const uint8_t *const v,
+ int uv_stride, int height, int width,
+ uint8_t *oy, int oy_stride, uint8_t *ou,
+ uint8_t *ov, int ouv_stride, int oheight,
+ int owidth, int bd);
+void vp10_highbd_resize_frame444(const uint8_t *const y, int y_stride,
+ const uint8_t *const u, const uint8_t *const v,
+ int uv_stride, int height, int width,
+ uint8_t *oy, int oy_stride, uint8_t *ou,
+ uint8_t *ov, int ouv_stride, int oheight,
+ int owidth, int bd);
+#endif // CONFIG_VPX_HIGHBITDEPTH
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_ENCODER_RESIZE_H_
+#endif // VP10_ENCODER_RESIZE_H_
diff --git a/vp10/encoder/segmentation.c b/vp10/encoder/segmentation.c
index 677910f..1b29366 100644
--- a/vp10/encoder/segmentation.c
+++ b/vp10/encoder/segmentation.c
@@ -8,7 +8,6 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
#include <limits.h>
#include "vpx_mem/vpx_mem.h"
@@ -32,31 +31,31 @@
seg->update_data = 0;
}
-void vp10_set_segment_data(struct segmentation *seg,
- signed char *feature_data,
- unsigned char abs_delta) {
+void vp10_set_segment_data(struct segmentation *seg, signed char *feature_data,
+ unsigned char abs_delta) {
seg->abs_delta = abs_delta;
memcpy(seg->feature_data, feature_data, sizeof(seg->feature_data));
}
void vp10_disable_segfeature(struct segmentation *seg, int segment_id,
- SEG_LVL_FEATURES feature_id) {
+ SEG_LVL_FEATURES feature_id) {
seg->feature_mask[segment_id] &= ~(1 << feature_id);
}
void vp10_clear_segdata(struct segmentation *seg, int segment_id,
- SEG_LVL_FEATURES feature_id) {
+ SEG_LVL_FEATURES feature_id) {
seg->feature_data[segment_id][feature_id] = 0;
}
// Based on set of segment counts calculate a probability tree
static void calc_segtree_probs(unsigned *segcounts,
- vpx_prob *segment_tree_probs, const vpx_prob *cur_tree_probs) {
+ vpx_prob *segment_tree_probs,
+ const vpx_prob *cur_tree_probs) {
// Work out probabilities of each segment
- const unsigned cc[4] = {
- segcounts[0] + segcounts[1], segcounts[2] + segcounts[3],
- segcounts[4] + segcounts[5], segcounts[6] + segcounts[7]
- };
+ const unsigned cc[4] = { segcounts[0] + segcounts[1],
+ segcounts[2] + segcounts[3],
+ segcounts[4] + segcounts[5],
+ segcounts[6] + segcounts[7] };
const unsigned ccc[2] = { cc[0] + cc[1], cc[2] + cc[3] };
#if CONFIG_MISC_FIXES
int i;
@@ -72,13 +71,13 @@
#if CONFIG_MISC_FIXES
for (i = 0; i < 7; i++) {
- const unsigned *ct = i == 0 ? ccc : i < 3 ? cc + (i & 2)
- : segcounts + (i - 3) * 2;
- vp10_prob_diff_update_savings_search(ct,
- cur_tree_probs[i], &segment_tree_probs[i], DIFF_UPDATE_PROB);
+ const unsigned *ct =
+ i == 0 ? ccc : i < 3 ? cc + (i & 2) : segcounts + (i - 3) * 2;
+ vp10_prob_diff_update_savings_search(
+ ct, cur_tree_probs[i], &segment_tree_probs[i], DIFF_UPDATE_PROB);
}
#else
- (void) cur_tree_probs;
+ (void)cur_tree_probs;
#endif
}
@@ -92,13 +91,11 @@
const int c4567 = c45 + c67;
// Cost the top node of the tree
- int cost = c0123 * vp10_cost_zero(probs[0]) +
- c4567 * vp10_cost_one(probs[0]);
+ int cost = c0123 * vp10_cost_zero(probs[0]) + c4567 * vp10_cost_one(probs[0]);
// Cost subsequent levels
if (c0123 > 0) {
- cost += c01 * vp10_cost_zero(probs[1]) +
- c23 * vp10_cost_one(probs[1]);
+ cost += c01 * vp10_cost_zero(probs[1]) + c23 * vp10_cost_one(probs[1]);
if (c01 > 0)
cost += segcounts[0] * vp10_cost_zero(probs[3]) +
@@ -109,8 +106,7 @@
}
if (c4567 > 0) {
- cost += c45 * vp10_cost_zero(probs[2]) +
- c67 * vp10_cost_one(probs[2]);
+ cost += c45 * vp10_cost_zero(probs[2]) + c67 * vp10_cost_one(probs[2]);
if (c45 > 0)
cost += segcounts[4] * vp10_cost_zero(probs[5]) +
@@ -127,12 +123,11 @@
const TileInfo *tile, MODE_INFO **mi,
unsigned *no_pred_segcounts,
unsigned (*temporal_predictor_count)[2],
- unsigned *t_unpred_seg_counts,
- int bw, int bh, int mi_row, int mi_col) {
+ unsigned *t_unpred_seg_counts, int bw, int bh,
+ int mi_row, int mi_col) {
int segment_id;
- if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
- return;
+ if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
xd->mi = mi;
segment_id = xd->mi[0]->mbmi.segment_id;
@@ -146,8 +141,8 @@
if (cm->frame_type != KEY_FRAME) {
const BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
// Test to see if the segment id matches the predicted value.
- const int pred_segment_id = get_segment_id(cm, cm->last_frame_seg_map,
- bsize, mi_row, mi_col);
+ const int pred_segment_id =
+ get_segment_id(cm, cm->last_frame_seg_map, bsize, mi_row, mi_col);
const int pred_flag = pred_segment_id == segment_id;
const int pred_context = vp10_get_pred_context_seg_id(xd);
@@ -157,8 +152,7 @@
temporal_predictor_count[pred_context][pred_flag]++;
// Update the "unpredicted" segment count
- if (!pred_flag)
- t_unpred_seg_counts[segment_id]++;
+ if (!pred_flag) t_unpred_seg_counts[segment_id]++;
}
}
@@ -166,15 +160,13 @@
const TileInfo *tile, MODE_INFO **mi,
unsigned *no_pred_segcounts,
unsigned (*temporal_predictor_count)[2],
- unsigned *t_unpred_seg_counts,
- int mi_row, int mi_col,
+ unsigned *t_unpred_seg_counts, int mi_row, int mi_col,
BLOCK_SIZE bsize) {
const int mis = cm->mi_stride;
int bw, bh;
const int bs = num_8x8_blocks_wide_lookup[bsize], hbs = bs / 2;
- if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
- return;
+ if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return;
bw = num_8x8_blocks_wide_lookup[mi[0]->mbmi.sb_type];
bh = num_8x8_blocks_high_lookup[mi[0]->mbmi.sb_type];
@@ -191,9 +183,9 @@
} else if (bw < bs && bh == bs) {
count_segs(cm, xd, tile, mi, no_pred_segcounts, temporal_predictor_count,
t_unpred_seg_counts, hbs, bs, mi_row, mi_col);
- count_segs(cm, xd, tile, mi + hbs,
- no_pred_segcounts, temporal_predictor_count, t_unpred_seg_counts,
- hbs, bs, mi_row, mi_col + hbs);
+ count_segs(cm, xd, tile, mi + hbs, no_pred_segcounts,
+ temporal_predictor_count, t_unpred_seg_counts, hbs, bs, mi_row,
+ mi_col + hbs);
} else {
const BLOCK_SIZE subsize = subsize_lookup[PARTITION_SPLIT][bsize];
int n;
@@ -204,9 +196,8 @@
const int mi_dc = hbs * (n & 1);
const int mi_dr = hbs * (n >> 1);
- count_segs_sb(cm, xd, tile, &mi[mi_dr * mis + mi_dc],
- no_pred_segcounts, temporal_predictor_count,
- t_unpred_seg_counts,
+ count_segs_sb(cm, xd, tile, &mi[mi_dr * mis + mi_dc], no_pred_segcounts,
+ temporal_predictor_count, t_unpred_seg_counts,
mi_row + mi_dr, mi_col + mi_dc, subsize);
}
}
@@ -226,7 +217,7 @@
int i, tile_col, mi_row, mi_col;
#if CONFIG_MISC_FIXES
- unsigned (*temporal_predictor_count)[2] = cm->counts.seg.pred;
+ unsigned(*temporal_predictor_count)[2] = cm->counts.seg.pred;
unsigned *no_pred_segcounts = cm->counts.seg.tree_total;
unsigned *t_unpred_seg_counts = cm->counts.seg.tree_mispred;
#else
@@ -240,7 +231,7 @@
vpx_prob t_nopred_prob[PREDICTION_PROBS];
#if CONFIG_MISC_FIXES
- (void) xd;
+ (void)xd;
#else
// Set default state for the segment tree probabilities and the
// temporal coding probabilities
@@ -262,8 +253,8 @@
for (mi_col = tile.mi_col_start; mi_col < tile.mi_col_end;
mi_col += 8, mi += 8)
count_segs_sb(cm, xd, &tile, mi, no_pred_segcounts,
- temporal_predictor_count, t_unpred_seg_counts,
- mi_row, mi_col, BLOCK_64X64);
+ temporal_predictor_count, t_unpred_seg_counts, mi_row,
+ mi_col, BLOCK_64X64);
}
}
diff --git a/vp10/encoder/segmentation.h b/vp10/encoder/segmentation.h
index b8e6c06..c07f37f 100644
--- a/vp10/encoder/segmentation.h
+++ b/vp10/encoder/segmentation.h
@@ -8,7 +8,6 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
#ifndef VP10_ENCODER_SEGMENTATION_H_
#define VP10_ENCODER_SEGMENTATION_H_
@@ -22,12 +21,10 @@
void vp10_enable_segmentation(struct segmentation *seg);
void vp10_disable_segmentation(struct segmentation *seg);
-void vp10_disable_segfeature(struct segmentation *seg,
- int segment_id,
- SEG_LVL_FEATURES feature_id);
-void vp10_clear_segdata(struct segmentation *seg,
- int segment_id,
- SEG_LVL_FEATURES feature_id);
+void vp10_disable_segfeature(struct segmentation *seg, int segment_id,
+ SEG_LVL_FEATURES feature_id);
+void vp10_clear_segdata(struct segmentation *seg, int segment_id,
+ SEG_LVL_FEATURES feature_id);
// The values given for each segment can be either deltas (from the default
// value chosen for the frame) or absolute values.
@@ -40,7 +37,7 @@
// abs_delta = SEGMENT_DELTADATA (deltas) abs_delta = SEGMENT_ABSDATA (use
// the absolute values given).
void vp10_set_segment_data(struct segmentation *seg, signed char *feature_data,
- unsigned char abs_delta);
+ unsigned char abs_delta);
void vp10_choose_segmap_coding_method(VP10_COMMON *cm, MACROBLOCKD *xd);
diff --git a/vp10/encoder/skin_detection.c b/vp10/encoder/skin_detection.c
index 1714cd3..324ac59 100644
--- a/vp10/encoder/skin_detection.c
+++ b/vp10/encoder/skin_detection.c
@@ -16,9 +16,9 @@
#include "vp10/encoder/skin_detection.h"
// Fixed-point skin color model parameters.
-static const int skin_mean[2] = {7463, 9614}; // q6
-static const int skin_inv_cov[4] = {4107, 1663, 1663, 2157}; // q16
-static const int skin_threshold = 1570636; // q18
+static const int skin_mean[2] = { 7463, 9614 }; // q6
+static const int skin_inv_cov[4] = { 4107, 1663, 1663, 2157 }; // q16
+static const int skin_threshold = 1570636; // q18
// Thresholds on luminance.
static const int y_low = 20;
@@ -34,10 +34,9 @@
const int cb_diff_q2 = (cb_diff_q12 + (1 << 9)) >> 10;
const int cbcr_diff_q2 = (cbcr_diff_q12 + (1 << 9)) >> 10;
const int cr_diff_q2 = (cr_diff_q12 + (1 << 9)) >> 10;
- const int skin_diff = skin_inv_cov[0] * cb_diff_q2 +
- skin_inv_cov[1] * cbcr_diff_q2 +
- skin_inv_cov[2] * cbcr_diff_q2 +
- skin_inv_cov[3] * cr_diff_q2;
+ const int skin_diff =
+ skin_inv_cov[0] * cb_diff_q2 + skin_inv_cov[1] * cbcr_diff_q2 +
+ skin_inv_cov[2] * cbcr_diff_q2 + skin_inv_cov[3] * cr_diff_q2;
return skin_diff;
}
@@ -61,11 +60,11 @@
const int src_uvstride = cpi->Source->uv_stride;
YV12_BUFFER_CONFIG skinmap;
memset(&skinmap, 0, sizeof(YV12_BUFFER_CONFIG));
- if (vpx_alloc_frame_buffer(&skinmap, cm->width, cm->height,
- cm->subsampling_x, cm->subsampling_y,
- VPX_ENC_BORDER_IN_PIXELS, cm->byte_alignment)) {
- vpx_free_frame_buffer(&skinmap);
- return;
+ if (vpx_alloc_frame_buffer(&skinmap, cm->width, cm->height, cm->subsampling_x,
+ cm->subsampling_y, VPX_ENC_BORDER_IN_PIXELS,
+ cm->byte_alignment)) {
+ vpx_free_frame_buffer(&skinmap);
+ return;
}
memset(skinmap.buffer_alloc, 128, skinmap.frame_size);
y = skinmap.y_buffer;
diff --git a/vp10/encoder/speed_features.c b/vp10/encoder/speed_features.c
index b82dec0..d95dc65 100644
--- a/vp10/encoder/speed_features.c
+++ b/vp10/encoder/speed_features.c
@@ -17,21 +17,23 @@
#include "vpx_dsp/vpx_dsp_common.h"
// Mesh search patters for various speed settings
-static MESH_PATTERN best_quality_mesh_pattern[MAX_MESH_STEP] =
- {{64, 4}, {28, 2}, {15, 1}, {7, 1}};
+static MESH_PATTERN best_quality_mesh_pattern[MAX_MESH_STEP] = {
+ { 64, 4 }, { 28, 2 }, { 15, 1 }, { 7, 1 }
+};
#define MAX_MESH_SPEED 5 // Max speed setting for mesh motion method
-static MESH_PATTERN good_quality_mesh_patterns[MAX_MESH_SPEED + 1]
- [MAX_MESH_STEP] =
- {{{64, 8}, {28, 4}, {15, 1}, {7, 1}},
- {{64, 8}, {28, 4}, {15, 1}, {7, 1}},
- {{64, 8}, {14, 2}, {7, 1}, {7, 1}},
- {{64, 16}, {24, 8}, {12, 4}, {7, 1}},
- {{64, 16}, {24, 8}, {12, 4}, {7, 1}},
- {{64, 16}, {24, 8}, {12, 4}, {7, 1}},
+static MESH_PATTERN
+ good_quality_mesh_patterns[MAX_MESH_SPEED + 1][MAX_MESH_STEP] = {
+ { { 64, 8 }, { 28, 4 }, { 15, 1 }, { 7, 1 } },
+ { { 64, 8 }, { 28, 4 }, { 15, 1 }, { 7, 1 } },
+ { { 64, 8 }, { 14, 2 }, { 7, 1 }, { 7, 1 } },
+ { { 64, 16 }, { 24, 8 }, { 12, 4 }, { 7, 1 } },
+ { { 64, 16 }, { 24, 8 }, { 12, 4 }, { 7, 1 } },
+ { { 64, 16 }, { 24, 8 }, { 12, 4 }, { 7, 1 } },
};
-static unsigned char good_quality_max_mesh_pct[MAX_MESH_SPEED + 1] =
- {50, 25, 15, 5, 1, 1};
+static unsigned char good_quality_max_mesh_pct[MAX_MESH_SPEED + 1] = {
+ 50, 25, 15, 5, 1, 1
+};
// Intra only frames, golden frames (except alt ref overlays) and
// alt ref frames tend to be coded at a higher than ambient quality
@@ -68,8 +70,8 @@
if (speed >= 1) {
if (VPXMIN(cm->width, cm->height) >= 720) {
- sf->disable_split_mask = cm->show_frame ? DISABLE_ALL_SPLIT
- : DISABLE_ALL_INTER_SPLIT;
+ sf->disable_split_mask =
+ cm->show_frame ? DISABLE_ALL_SPLIT : DISABLE_ALL_INTER_SPLIT;
sf->partition_search_breakout_dist_thr = (1 << 23);
} else {
sf->disable_split_mask = DISABLE_COMPOUND_SPLIT;
@@ -79,8 +81,8 @@
if (speed >= 2) {
if (VPXMIN(cm->width, cm->height) >= 720) {
- sf->disable_split_mask = cm->show_frame ? DISABLE_ALL_SPLIT
- : DISABLE_ALL_INTER_SPLIT;
+ sf->disable_split_mask =
+ cm->show_frame ? DISABLE_ALL_SPLIT : DISABLE_ALL_INTER_SPLIT;
sf->adaptive_pred_interp_filter = 0;
sf->partition_search_breakout_dist_thr = (1 << 24);
sf->partition_search_breakout_rate_thr = 120;
@@ -141,7 +143,7 @@
sf->use_square_partition_only = !frame_is_intra_only(cm);
}
- sf->less_rectangular_check = 1;
+ sf->less_rectangular_check = 1;
sf->use_rd_breakout = 1;
sf->adaptive_motion_search = 1;
@@ -162,14 +164,14 @@
}
if (speed >= 2) {
- sf->tx_size_search_method = frame_is_boosted(cpi) ? USE_FULL_RD
- : USE_LARGESTALL;
+ sf->tx_size_search_method =
+ frame_is_boosted(cpi) ? USE_FULL_RD : USE_LARGESTALL;
- sf->mode_search_skip_flags = (cm->frame_type == KEY_FRAME) ? 0 :
- FLAG_SKIP_INTRA_DIRMISMATCH |
- FLAG_SKIP_INTRA_BESTINTER |
- FLAG_SKIP_COMP_BESTINTRA |
- FLAG_SKIP_INTRA_LOWVAR;
+ sf->mode_search_skip_flags =
+ (cm->frame_type == KEY_FRAME) ? 0 : FLAG_SKIP_INTRA_DIRMISMATCH |
+ FLAG_SKIP_INTRA_BESTINTER |
+ FLAG_SKIP_COMP_BESTINTRA |
+ FLAG_SKIP_INTRA_LOWVAR;
sf->disable_filter_search_var_thresh = 100;
sf->comp_inter_joint_search_thresh = BLOCK_SIZES;
sf->auto_min_max_partition_size = RELAXED_NEIGHBORING_MIN_MAX;
@@ -178,8 +180,8 @@
if (speed >= 3) {
sf->use_square_partition_only = !frame_is_intra_only(cm);
- sf->tx_size_search_method = frame_is_intra_only(cm) ? USE_FULL_RD
- : USE_LARGESTALL;
+ sf->tx_size_search_method =
+ frame_is_intra_only(cm) ? USE_FULL_RD : USE_LARGESTALL;
sf->mv.subpel_search_method = SUBPEL_TREE_PRUNED;
sf->adaptive_pred_interp_filter = 0;
sf->adaptive_mode_search = 1;
@@ -225,13 +227,14 @@
}
static void set_rt_speed_feature_framesize_dependent(VP10_COMP *cpi,
- SPEED_FEATURES *sf, int speed) {
+ SPEED_FEATURES *sf,
+ int speed) {
VP10_COMMON *const cm = &cpi->common;
if (speed >= 1) {
if (VPXMIN(cm->width, cm->height) >= 720) {
- sf->disable_split_mask = cm->show_frame ? DISABLE_ALL_SPLIT
- : DISABLE_ALL_INTER_SPLIT;
+ sf->disable_split_mask =
+ cm->show_frame ? DISABLE_ALL_SPLIT : DISABLE_ALL_INTER_SPLIT;
} else {
sf->disable_split_mask = DISABLE_COMPOUND_SPLIT;
}
@@ -239,8 +242,8 @@
if (speed >= 2) {
if (VPXMIN(cm->width, cm->height) >= 720) {
- sf->disable_split_mask = cm->show_frame ? DISABLE_ALL_SPLIT
- : DISABLE_ALL_INTER_SPLIT;
+ sf->disable_split_mask =
+ cm->show_frame ? DISABLE_ALL_SPLIT : DISABLE_ALL_INTER_SPLIT;
} else {
sf->disable_split_mask = LAST_AND_INTRA_SPLIT_ONLY;
}
@@ -255,13 +258,13 @@
}
if (speed >= 7) {
- sf->encode_breakout_thresh = (VPXMIN(cm->width, cm->height) >= 720) ?
- 800 : 300;
+ sf->encode_breakout_thresh =
+ (VPXMIN(cm->width, cm->height) >= 720) ? 800 : 300;
}
}
-static void set_rt_speed_feature(VP10_COMP *cpi, SPEED_FEATURES *sf,
- int speed, vpx_tune_content content) {
+static void set_rt_speed_feature(VP10_COMP *cpi, SPEED_FEATURES *sf, int speed,
+ vpx_tune_content content) {
VP10_COMMON *const cm = &cpi->common;
const int is_keyframe = cm->frame_type == KEY_FRAME;
const int frames_since_key = is_keyframe ? 0 : cpi->rc.frames_since_key;
@@ -274,8 +277,8 @@
if (speed >= 1) {
sf->use_square_partition_only = !frame_is_intra_only(cm);
sf->less_rectangular_check = 1;
- sf->tx_size_search_method = frame_is_intra_only(cm) ? USE_FULL_RD
- : USE_LARGESTALL;
+ sf->tx_size_search_method =
+ frame_is_intra_only(cm) ? USE_FULL_RD : USE_LARGESTALL;
sf->use_rd_breakout = 1;
@@ -289,11 +292,11 @@
}
if (speed >= 2) {
- sf->mode_search_skip_flags = (cm->frame_type == KEY_FRAME) ? 0 :
- FLAG_SKIP_INTRA_DIRMISMATCH |
- FLAG_SKIP_INTRA_BESTINTER |
- FLAG_SKIP_COMP_BESTINTRA |
- FLAG_SKIP_INTRA_LOWVAR;
+ sf->mode_search_skip_flags =
+ (cm->frame_type == KEY_FRAME) ? 0 : FLAG_SKIP_INTRA_DIRMISMATCH |
+ FLAG_SKIP_INTRA_BESTINTER |
+ FLAG_SKIP_COMP_BESTINTRA |
+ FLAG_SKIP_INTRA_LOWVAR;
sf->adaptive_pred_interp_filter = 2;
sf->disable_filter_search_var_thresh = 50;
sf->comp_inter_joint_search_thresh = BLOCK_SIZES;
@@ -326,8 +329,8 @@
sf->use_fast_coef_costing = 0;
sf->auto_min_max_partition_size = STRICT_NEIGHBORING_MIN_MAX;
sf->adjust_partitioning_from_last_frame =
- cm->last_frame_type != cm->frame_type || (0 ==
- (frames_since_key + 1) % sf->last_partitioning_redo_frequency);
+ cm->last_frame_type != cm->frame_type ||
+ (0 == (frames_since_key + 1) % sf->last_partitioning_redo_frequency);
sf->mv.subpel_force_stop = 1;
for (i = 0; i < TX_SIZES; i++) {
sf->intra_y_mode_mask[i] = INTRA_DC_H_V;
@@ -347,11 +350,12 @@
if (speed >= 5) {
sf->use_quant_fp = !is_keyframe;
- sf->auto_min_max_partition_size = is_keyframe ? RELAXED_NEIGHBORING_MIN_MAX
- : STRICT_NEIGHBORING_MIN_MAX;
+ sf->auto_min_max_partition_size =
+ is_keyframe ? RELAXED_NEIGHBORING_MIN_MAX : STRICT_NEIGHBORING_MIN_MAX;
sf->default_max_partition_size = BLOCK_32X32;
sf->default_min_partition_size = BLOCK_8X8;
- sf->force_frame_boost = is_keyframe ||
+ sf->force_frame_boost =
+ is_keyframe ||
(frames_since_key % (sf->last_partitioning_redo_frequency << 1) == 1);
sf->max_delta_qindex = is_keyframe ? 20 : 15;
sf->partition_search_type = REFERENCE_PARTITION;
@@ -494,8 +498,7 @@
sf->use_fast_coef_costing = 0;
sf->mode_skip_start = MAX_MODES; // Mode index at which mode skip mask set
sf->schedule_mode_search = 0;
- for (i = 0; i < BLOCK_SIZES; ++i)
- sf->inter_mode_mask[i] = INTER_ALL;
+ for (i = 0; i < BLOCK_SIZES; ++i) sf->inter_mode_mask[i] = INTER_ALL;
sf->max_intra_bsize = BLOCK_64X64;
sf->reuse_inter_pred_sby = 0;
// This setting only takes effect when partition_search_type is set
@@ -541,8 +544,7 @@
sf->exhaustive_searches_thresh = sf->exhaustive_searches_thresh << 1;
for (i = 0; i < MAX_MESH_STEP; ++i) {
- sf->mesh_patterns[i].range =
- good_quality_mesh_patterns[speed][i].range;
+ sf->mesh_patterns[i].range = good_quality_mesh_patterns[speed][i].range;
sf->mesh_patterns[i].interval =
good_quality_mesh_patterns[speed][i].interval;
}
@@ -550,8 +552,7 @@
// Slow quant, dct and trellis not worthwhile for first pass
// so make sure they are always turned off.
- if (oxcf->pass == 1)
- sf->optimize_coefficients = 0;
+ if (oxcf->pass == 1) sf->optimize_coefficients = 0;
// No recode for 1 pass.
if (oxcf->pass == 0) {
@@ -566,7 +567,8 @@
} else if (sf->mv.subpel_search_method == SUBPEL_TREE_PRUNED_MORE) {
cpi->find_fractional_mv_step = vp10_find_best_sub_pixel_tree_pruned_more;
} else if (sf->mv.subpel_search_method == SUBPEL_TREE_PRUNED_EVENMORE) {
- cpi->find_fractional_mv_step = vp10_find_best_sub_pixel_tree_pruned_evenmore;
+ cpi->find_fractional_mv_step =
+ vp10_find_best_sub_pixel_tree_pruned_evenmore;
}
x->optimize = sf->optimize_coefficients == 1 && oxcf->pass != 1;
diff --git a/vp10/encoder/speed_features.h b/vp10/encoder/speed_features.h
index 3b91999..72b5847 100644
--- a/vp10/encoder/speed_features.h
+++ b/vp10/encoder/speed_features.h
@@ -18,17 +18,14 @@
#endif
enum {
- INTRA_ALL = (1 << DC_PRED) |
- (1 << V_PRED) | (1 << H_PRED) |
- (1 << D45_PRED) | (1 << D135_PRED) |
- (1 << D117_PRED) | (1 << D153_PRED) |
- (1 << D207_PRED) | (1 << D63_PRED) |
- (1 << TM_PRED),
- INTRA_DC = (1 << DC_PRED),
- INTRA_DC_TM = (1 << DC_PRED) | (1 << TM_PRED),
- INTRA_DC_H_V = (1 << DC_PRED) | (1 << V_PRED) | (1 << H_PRED),
- INTRA_DC_TM_H_V = (1 << DC_PRED) | (1 << TM_PRED) | (1 << V_PRED) |
- (1 << H_PRED)
+ INTRA_ALL = (1 << DC_PRED) | (1 << V_PRED) | (1 << H_PRED) | (1 << D45_PRED) |
+ (1 << D135_PRED) | (1 << D117_PRED) | (1 << D153_PRED) |
+ (1 << D207_PRED) | (1 << D63_PRED) | (1 << TM_PRED),
+ INTRA_DC = (1 << DC_PRED),
+ INTRA_DC_TM = (1 << DC_PRED) | (1 << TM_PRED),
+ INTRA_DC_H_V = (1 << DC_PRED) | (1 << V_PRED) | (1 << H_PRED),
+ INTRA_DC_TM_H_V =
+ (1 << DC_PRED) | (1 << TM_PRED) | (1 << V_PRED) | (1 << H_PRED)
};
enum {
@@ -42,20 +39,15 @@
};
enum {
- DISABLE_ALL_INTER_SPLIT = (1 << THR_COMP_GA) |
- (1 << THR_COMP_LA) |
- (1 << THR_ALTR) |
- (1 << THR_GOLD) |
- (1 << THR_LAST),
+ DISABLE_ALL_INTER_SPLIT = (1 << THR_COMP_GA) | (1 << THR_COMP_LA) |
+ (1 << THR_ALTR) | (1 << THR_GOLD) | (1 << THR_LAST),
- DISABLE_ALL_SPLIT = (1 << THR_INTRA) | DISABLE_ALL_INTER_SPLIT,
+ DISABLE_ALL_SPLIT = (1 << THR_INTRA) | DISABLE_ALL_INTER_SPLIT,
- DISABLE_COMPOUND_SPLIT = (1 << THR_COMP_GA) | (1 << THR_COMP_LA),
+ DISABLE_COMPOUND_SPLIT = (1 << THR_COMP_GA) | (1 << THR_COMP_LA),
- LAST_AND_INTRA_SPLIT_ONLY = (1 << THR_COMP_GA) |
- (1 << THR_COMP_LA) |
- (1 << THR_ALTR) |
- (1 << THR_GOLD)
+ LAST_AND_INTRA_SPLIT_ONLY = (1 << THR_COMP_GA) | (1 << THR_COMP_LA) |
+ (1 << THR_ALTR) | (1 << THR_GOLD)
};
typedef enum {
diff --git a/vp10/encoder/subexp.c b/vp10/encoder/subexp.c
index d407477..db76a27 100644
--- a/vp10/encoder/subexp.c
+++ b/vp10/encoder/subexp.c
@@ -14,26 +14,29 @@
#include "vp10/encoder/cost.h"
#include "vp10/encoder/subexp.h"
-#define vp10_cost_upd256 ((int)(vp10_cost_one(upd) - vp10_cost_zero(upd)))
+#define vp10_cost_upd256 ((int)(vp10_cost_one(upd) - vp10_cost_zero(upd)))
static const uint8_t update_bits[255] = {
- 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
- 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
- 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
- 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
- 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
- 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
- 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
- 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
- 10, 11 - CONFIG_MISC_FIXES,
- 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
- 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
- 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
- 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
- 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
- 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
- 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
- 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 0,
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 6, 6, 8, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11 - CONFIG_MISC_FIXES,
+ 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+ 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+ 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+ 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+ 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+ 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+ 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+ 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+ 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+ 11, 11, 11, 11, 11, 11, 11, 0,
};
static int recenter_nonneg(int v, int m) {
@@ -50,23 +53,23 @@
static const uint8_t map_table[MAX_PROB - 1] = {
// generated by:
// map_table[j] = split_index(j, MAX_PROB - 1, MODULUS_PARAM);
- 20, 21, 22, 23, 24, 25, 0, 26, 27, 28, 29, 30, 31, 32, 33,
- 34, 35, 36, 37, 1, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
- 48, 49, 2, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
- 3, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 4, 74,
- 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 5, 86, 87, 88,
- 89, 90, 91, 92, 93, 94, 95, 96, 97, 6, 98, 99, 100, 101, 102,
- 103, 104, 105, 106, 107, 108, 109, 7, 110, 111, 112, 113, 114, 115, 116,
- 117, 118, 119, 120, 121, 8, 122, 123, 124, 125, 126, 127, 128, 129, 130,
- 131, 132, 133, 9, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144,
- 145, 10, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 11,
- 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 12, 170, 171,
- 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 13, 182, 183, 184, 185,
- 186, 187, 188, 189, 190, 191, 192, 193, 14, 194, 195, 196, 197, 198, 199,
- 200, 201, 202, 203, 204, 205, 15, 206, 207, 208, 209, 210, 211, 212, 213,
- 214, 215, 216, 217, 16, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227,
- 228, 229, 17, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241,
- 18, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 19,
+ 20, 21, 22, 23, 24, 25, 0, 26, 27, 28, 29, 30, 31, 32, 33,
+ 34, 35, 36, 37, 1, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 2, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
+ 3, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 4, 74,
+ 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 5, 86, 87, 88,
+ 89, 90, 91, 92, 93, 94, 95, 96, 97, 6, 98, 99, 100, 101, 102,
+ 103, 104, 105, 106, 107, 108, 109, 7, 110, 111, 112, 113, 114, 115, 116,
+ 117, 118, 119, 120, 121, 8, 122, 123, 124, 125, 126, 127, 128, 129, 130,
+ 131, 132, 133, 9, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144,
+ 145, 10, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 11,
+ 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 12, 170, 171,
+ 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 13, 182, 183, 184, 185,
+ 186, 187, 188, 189, 190, 191, 192, 193, 14, 194, 195, 196, 197, 198, 199,
+ 200, 201, 202, 203, 204, 205, 15, 206, 207, 208, 209, 210, 211, 212, 213,
+ 214, 215, 216, 217, 16, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227,
+ 228, 229, 17, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241,
+ 18, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 19,
};
v--;
m--;
@@ -117,9 +120,8 @@
encode_term_subexp(w, delp);
}
-int vp10_prob_diff_update_savings_search(const unsigned int *ct,
- vpx_prob oldp, vpx_prob *bestp,
- vpx_prob upd) {
+int vp10_prob_diff_update_savings_search(const unsigned int *ct, vpx_prob oldp,
+ vpx_prob *bestp, vpx_prob upd) {
const int old_b = cost_branch256(ct, oldp);
int bestsavings = 0;
vpx_prob newp, bestnewp = oldp;
@@ -139,10 +141,9 @@
}
int vp10_prob_diff_update_savings_search_model(const unsigned int *ct,
- const vpx_prob *oldp,
- vpx_prob *bestp,
- vpx_prob upd,
- int stepsize) {
+ const vpx_prob *oldp,
+ vpx_prob *bestp, vpx_prob upd,
+ int stepsize) {
int i, old_b, new_b, update_b, savings, bestsavings, step;
int newp;
vpx_prob bestnewp, newplist[ENTROPY_NODES], oldplist[ENTROPY_NODES];
@@ -158,15 +159,14 @@
if (*bestp > oldp[PIVOT_NODE]) {
step = -stepsize;
for (newp = *bestp; newp > oldp[PIVOT_NODE]; newp += step) {
- if (newp < 1 || newp > 255)
- continue;
+ if (newp < 1 || newp > 255) continue;
newplist[PIVOT_NODE] = newp;
vp10_model_to_full_probs(newplist, newplist);
for (i = UNCONSTRAINED_NODES, new_b = 0; i < ENTROPY_NODES; ++i)
new_b += cost_branch256(ct + 2 * i, newplist[i]);
new_b += cost_branch256(ct + 2 * PIVOT_NODE, newplist[PIVOT_NODE]);
- update_b = prob_diff_update_cost(newp, oldp[PIVOT_NODE]) +
- vp10_cost_upd256;
+ update_b =
+ prob_diff_update_cost(newp, oldp[PIVOT_NODE]) + vp10_cost_upd256;
savings = old_b - new_b - update_b;
if (savings > bestsavings) {
bestsavings = savings;
@@ -176,15 +176,14 @@
} else {
step = stepsize;
for (newp = *bestp; newp < oldp[PIVOT_NODE]; newp += step) {
- if (newp < 1 || newp > 255)
- continue;
+ if (newp < 1 || newp > 255) continue;
newplist[PIVOT_NODE] = newp;
vp10_model_to_full_probs(newplist, newplist);
for (i = UNCONSTRAINED_NODES, new_b = 0; i < ENTROPY_NODES; ++i)
new_b += cost_branch256(ct + 2 * i, newplist[i]);
new_b += cost_branch256(ct + 2 * PIVOT_NODE, newplist[PIVOT_NODE]);
- update_b = prob_diff_update_cost(newp, oldp[PIVOT_NODE]) +
- vp10_cost_upd256;
+ update_b =
+ prob_diff_update_cost(newp, oldp[PIVOT_NODE]) + vp10_cost_upd256;
savings = old_b - new_b - update_b;
if (savings > bestsavings) {
bestsavings = savings;
@@ -198,11 +197,11 @@
}
void vp10_cond_prob_diff_update(vpx_writer *w, vpx_prob *oldp,
- const unsigned int ct[2]) {
+ const unsigned int ct[2]) {
const vpx_prob upd = DIFF_UPDATE_PROB;
vpx_prob newp = get_binary_prob(ct[0], ct[1]);
- const int savings = vp10_prob_diff_update_savings_search(ct, *oldp, &newp,
- upd);
+ const int savings =
+ vp10_prob_diff_update_savings_search(ct, *oldp, &newp, upd);
assert(newp >= 1);
if (savings > 0) {
vpx_write(w, 1, upd);
@@ -217,7 +216,7 @@
const unsigned int ct[2]) {
const vpx_prob upd = DIFF_UPDATE_PROB;
vpx_prob newp = get_binary_prob(ct[0], ct[1]);
- const int savings = vp10_prob_diff_update_savings_search(ct, *oldp, &newp,
- upd);
+ const int savings =
+ vp10_prob_diff_update_savings_search(ct, *oldp, &newp, upd);
return savings;
}
diff --git a/vp10/encoder/subexp.h b/vp10/encoder/subexp.h
index 091334f..5adc6e5 100644
--- a/vp10/encoder/subexp.h
+++ b/vp10/encoder/subexp.h
@@ -8,7 +8,6 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
#ifndef VP10_ENCODER_SUBEXP_H_
#define VP10_ENCODER_SUBEXP_H_
@@ -20,22 +19,19 @@
struct vpx_writer;
-void vp10_write_prob_diff_update(struct vpx_writer *w,
- vpx_prob newp, vpx_prob oldp);
+void vp10_write_prob_diff_update(struct vpx_writer *w, vpx_prob newp,
+ vpx_prob oldp);
void vp10_cond_prob_diff_update(struct vpx_writer *w, vpx_prob *oldp,
- const unsigned int ct[2]);
+ const unsigned int ct[2]);
-int vp10_prob_diff_update_savings_search(const unsigned int *ct,
- vpx_prob oldp, vpx_prob *bestp,
- vpx_prob upd);
-
+int vp10_prob_diff_update_savings_search(const unsigned int *ct, vpx_prob oldp,
+ vpx_prob *bestp, vpx_prob upd);
int vp10_prob_diff_update_savings_search_model(const unsigned int *ct,
- const vpx_prob *oldp,
- vpx_prob *bestp,
- vpx_prob upd,
- int stepsize);
+ const vpx_prob *oldp,
+ vpx_prob *bestp, vpx_prob upd,
+ int stepsize);
int vp10_cond_prob_diff_update_savings(vpx_prob *oldp,
const unsigned int ct[2]);
diff --git a/vp10/encoder/temporal_filter.c b/vp10/encoder/temporal_filter.c
index 51c7324..a88564d 100644
--- a/vp10/encoder/temporal_filter.c
+++ b/vp10/encoder/temporal_filter.c
@@ -31,22 +31,14 @@
static int fixed_divide[512];
-static void temporal_filter_predictors_mb_c(MACROBLOCKD *xd,
- uint8_t *y_mb_ptr,
- uint8_t *u_mb_ptr,
- uint8_t *v_mb_ptr,
- int stride,
- int uv_block_width,
- int uv_block_height,
- int mv_row,
- int mv_col,
- uint8_t *pred,
- struct scale_factors *scale,
- int x, int y) {
+static void temporal_filter_predictors_mb_c(
+ MACROBLOCKD *xd, uint8_t *y_mb_ptr, uint8_t *u_mb_ptr, uint8_t *v_mb_ptr,
+ int stride, int uv_block_width, int uv_block_height, int mv_row, int mv_col,
+ uint8_t *pred, struct scale_factors *scale, int x, int y) {
const int which_mv = 0;
const MV mv = { mv_row, mv_col };
const InterpKernel *const kernel =
- vp10_filter_kernels[xd->mi[0]->mbmi.interp_filter];
+ vp10_filter_kernels[xd->mi[0]->mbmi.interp_filter];
enum mv_precision mv_precision_uv;
int uv_stride;
@@ -60,74 +52,46 @@
#if CONFIG_VPX_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
- vp10_highbd_build_inter_predictor(y_mb_ptr, stride,
- &pred[0], 16,
- &mv,
- scale,
- 16, 16,
- which_mv,
- kernel, MV_PRECISION_Q3, x, y, xd->bd);
+ vp10_highbd_build_inter_predictor(y_mb_ptr, stride, &pred[0], 16, &mv,
+ scale, 16, 16, which_mv, kernel,
+ MV_PRECISION_Q3, x, y, xd->bd);
- vp10_highbd_build_inter_predictor(u_mb_ptr, uv_stride,
- &pred[256], uv_block_width,
- &mv,
- scale,
- uv_block_width, uv_block_height,
- which_mv,
- kernel, mv_precision_uv, x, y, xd->bd);
+ vp10_highbd_build_inter_predictor(u_mb_ptr, uv_stride, &pred[256],
+ uv_block_width, &mv, scale,
+ uv_block_width, uv_block_height, which_mv,
+ kernel, mv_precision_uv, x, y, xd->bd);
- vp10_highbd_build_inter_predictor(v_mb_ptr, uv_stride,
- &pred[512], uv_block_width,
- &mv,
- scale,
- uv_block_width, uv_block_height,
- which_mv,
- kernel, mv_precision_uv, x, y, xd->bd);
+ vp10_highbd_build_inter_predictor(v_mb_ptr, uv_stride, &pred[512],
+ uv_block_width, &mv, scale,
+ uv_block_width, uv_block_height, which_mv,
+ kernel, mv_precision_uv, x, y, xd->bd);
return;
}
#endif // CONFIG_VPX_HIGHBITDEPTH
- vp10_build_inter_predictor(y_mb_ptr, stride,
- &pred[0], 16,
- &mv,
- scale,
- 16, 16,
- which_mv,
- kernel, MV_PRECISION_Q3, x, y);
+ vp10_build_inter_predictor(y_mb_ptr, stride, &pred[0], 16, &mv, scale, 16, 16,
+ which_mv, kernel, MV_PRECISION_Q3, x, y);
- vp10_build_inter_predictor(u_mb_ptr, uv_stride,
- &pred[256], uv_block_width,
- &mv,
- scale,
- uv_block_width, uv_block_height,
- which_mv,
- kernel, mv_precision_uv, x, y);
+ vp10_build_inter_predictor(u_mb_ptr, uv_stride, &pred[256], uv_block_width,
+ &mv, scale, uv_block_width, uv_block_height,
+ which_mv, kernel, mv_precision_uv, x, y);
- vp10_build_inter_predictor(v_mb_ptr, uv_stride,
- &pred[512], uv_block_width,
- &mv,
- scale,
- uv_block_width, uv_block_height,
- which_mv,
- kernel, mv_precision_uv, x, y);
+ vp10_build_inter_predictor(v_mb_ptr, uv_stride, &pred[512], uv_block_width,
+ &mv, scale, uv_block_width, uv_block_height,
+ which_mv, kernel, mv_precision_uv, x, y);
}
void vp10_temporal_filter_init(void) {
int i;
fixed_divide[0] = 0;
- for (i = 1; i < 512; ++i)
- fixed_divide[i] = 0x80000 / i;
+ for (i = 1; i < 512; ++i) fixed_divide[i] = 0x80000 / i;
}
-void vp10_temporal_filter_apply_c(uint8_t *frame1,
- unsigned int stride,
- uint8_t *frame2,
- unsigned int block_width,
- unsigned int block_height,
- int strength,
- int filter_weight,
- unsigned int *accumulator,
- uint16_t *count) {
+void vp10_temporal_filter_apply_c(uint8_t *frame1, unsigned int stride,
+ uint8_t *frame2, unsigned int block_width,
+ unsigned int block_height, int strength,
+ int filter_weight, unsigned int *accumulator,
+ uint16_t *count) {
unsigned int i, j, k;
int modifier;
int byte = 0;
@@ -138,17 +102,16 @@
int src_byte = frame1[byte];
int pixel_value = *frame2++;
- modifier = src_byte - pixel_value;
+ modifier = src_byte - pixel_value;
// This is an integer approximation of:
// float coeff = (3.0 * modifer * modifier) / pow(2, strength);
// modifier = (int)roundf(coeff > 16 ? 0 : 16-coeff);
- modifier *= modifier;
- modifier *= 3;
- modifier += rounding;
+ modifier *= modifier;
+ modifier *= 3;
+ modifier += rounding;
modifier >>= strength;
- if (modifier > 16)
- modifier = 16;
+ if (modifier > 16) modifier = 16;
modifier = 16 - modifier;
modifier *= filter_weight;
@@ -164,15 +127,10 @@
}
#if CONFIG_VPX_HIGHBITDEPTH
-void vp10_highbd_temporal_filter_apply_c(uint8_t *frame1_8,
- unsigned int stride,
- uint8_t *frame2_8,
- unsigned int block_width,
- unsigned int block_height,
- int strength,
- int filter_weight,
- unsigned int *accumulator,
- uint16_t *count) {
+void vp10_highbd_temporal_filter_apply_c(
+ uint8_t *frame1_8, unsigned int stride, uint8_t *frame2_8,
+ unsigned int block_width, unsigned int block_height, int strength,
+ int filter_weight, unsigned int *accumulator, uint16_t *count) {
uint16_t *frame1 = CONVERT_TO_SHORTPTR(frame1_8);
uint16_t *frame2 = CONVERT_TO_SHORTPTR(frame2_8);
unsigned int i, j, k;
@@ -185,7 +143,7 @@
int src_byte = frame1[byte];
int pixel_value = *frame2++;
- modifier = src_byte - pixel_value;
+ modifier = src_byte - pixel_value;
// This is an integer approximation of:
// float coeff = (3.0 * modifer * modifier) / pow(2, strength);
// modifier = (int)roundf(coeff > 16 ? 0 : 16-coeff);
@@ -194,8 +152,7 @@
modifier += rounding;
modifier >>= strength;
- if (modifier > 16)
- modifier = 16;
+ if (modifier > 16) modifier = 16;
modifier = 16 - modifier;
modifier *= filter_weight;
@@ -225,7 +182,7 @@
unsigned int sse;
int cost_list[5];
- MV best_ref_mv1 = {0, 0};
+ MV best_ref_mv1 = { 0, 0 };
MV best_ref_mv1_full; /* full-pixel value of best_ref_mv1 */
MV *ref_mv = &x->e_mbd.mi[0]->bmi[0].as_mv[0].as_mv;
@@ -247,19 +204,15 @@
// Ignore mv costing by sending NULL pointer instead of cost arrays
vp10_hex_search(x, &best_ref_mv1_full, step_param, sadpb, 1,
- cond_cost_list(cpi, cost_list),
- &cpi->fn_ptr[BLOCK_16X16], 0, &best_ref_mv1, ref_mv);
+ cond_cost_list(cpi, cost_list), &cpi->fn_ptr[BLOCK_16X16], 0,
+ &best_ref_mv1, ref_mv);
// Ignore mv costing by sending NULL pointer instead of cost array
- bestsme = cpi->find_fractional_mv_step(x, ref_mv,
- &best_ref_mv1,
- cpi->common.allow_high_precision_mv,
- x->errorperbit,
- &cpi->fn_ptr[BLOCK_16X16],
- 0, mv_sf->subpel_iters_per_step,
- cond_cost_list(cpi, cost_list),
- NULL, NULL,
- &distortion, &sse, NULL, 0, 0);
+ bestsme = cpi->find_fractional_mv_step(
+ x, ref_mv, &best_ref_mv1, cpi->common.allow_high_precision_mv,
+ x->errorperbit, &cpi->fn_ptr[BLOCK_16X16], 0,
+ mv_sf->subpel_iters_per_step, cond_cost_list(cpi, cost_list), NULL, NULL,
+ &distortion, &sse, NULL, 0, 0);
// Restore input state
x->plane[0].src = src;
@@ -270,8 +223,7 @@
static void temporal_filter_iterate_c(VP10_COMP *cpi,
YV12_BUFFER_CONFIG **frames,
- int frame_count,
- int alt_ref_index,
+ int frame_count, int alt_ref_index,
int strength,
struct scale_factors *scale) {
int byte;
@@ -288,17 +240,17 @@
YV12_BUFFER_CONFIG *f = frames[alt_ref_index];
uint8_t *dst1, *dst2;
#if CONFIG_VPX_HIGHBITDEPTH
- DECLARE_ALIGNED(16, uint16_t, predictor16[16 * 16 * 3]);
- DECLARE_ALIGNED(16, uint8_t, predictor8[16 * 16 * 3]);
+ DECLARE_ALIGNED(16, uint16_t, predictor16[16 * 16 * 3]);
+ DECLARE_ALIGNED(16, uint8_t, predictor8[16 * 16 * 3]);
uint8_t *predictor;
#else
- DECLARE_ALIGNED(16, uint8_t, predictor[16 * 16 * 3]);
+ DECLARE_ALIGNED(16, uint8_t, predictor[16 * 16 * 3]);
#endif
const int mb_uv_height = 16 >> mbd->plane[1].subsampling_y;
- const int mb_uv_width = 16 >> mbd->plane[1].subsampling_x;
+ const int mb_uv_width = 16 >> mbd->plane[1].subsampling_x;
// Save input state
- uint8_t* input_buffer[MAX_MB_PLANE];
+ uint8_t *input_buffer[MAX_MB_PLANE];
int i;
#if CONFIG_VPX_HIGHBITDEPTH
if (mbd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
@@ -308,8 +260,7 @@
}
#endif
- for (i = 0; i < MAX_MB_PLANE; i++)
- input_buffer[i] = mbd->plane[i].pre[0].buf;
+ for (i = 0; i < MAX_MB_PLANE; i++) input_buffer[i] = mbd->plane[i].pre[0].buf;
for (mb_row = 0; mb_row < mb_rows; mb_row++) {
// Source frames are extended to 16 pixels. This is different than
@@ -324,8 +275,8 @@
// To keep the mv in play for both Y and UV planes the max that it
// can be on a border is therefore 16 - (2*VPX_INTERP_EXTEND+1).
cpi->td.mb.mv_row_min = -((mb_row * 16) + (17 - 2 * VPX_INTERP_EXTEND));
- cpi->td.mb.mv_row_max = ((mb_rows - 1 - mb_row) * 16)
- + (17 - 2 * VPX_INTERP_EXTEND);
+ cpi->td.mb.mv_row_max =
+ ((mb_rows - 1 - mb_row) * 16) + (17 - 2 * VPX_INTERP_EXTEND);
for (mb_col = 0; mb_col < mb_cols; mb_col++) {
int i, j, k;
@@ -335,15 +286,14 @@
memset(count, 0, 16 * 16 * 3 * sizeof(count[0]));
cpi->td.mb.mv_col_min = -((mb_col * 16) + (17 - 2 * VPX_INTERP_EXTEND));
- cpi->td.mb.mv_col_max = ((mb_cols - 1 - mb_col) * 16)
- + (17 - 2 * VPX_INTERP_EXTEND);
+ cpi->td.mb.mv_col_max =
+ ((mb_cols - 1 - mb_col) * 16) + (17 - 2 * VPX_INTERP_EXTEND);
for (frame = 0; frame < frame_count; frame++) {
- const int thresh_low = 10000;
+ const int thresh_low = 10000;
const int thresh_high = 20000;
- if (frames[frame] == NULL)
- continue;
+ if (frames[frame] == NULL) continue;
mbd->mi[0]->bmi[0].as_mv[0].as_mv.row = 0;
mbd->mi[0]->bmi[0].as_mv[0].as_mv.col = 0;
@@ -352,84 +302,68 @@
filter_weight = 2;
} else {
// Find best match in this frame by MC
- int err = temporal_filter_find_matching_mb_c(cpi,
- frames[alt_ref_index]->y_buffer + mb_y_offset,
- frames[frame]->y_buffer + mb_y_offset,
- frames[frame]->y_stride);
+ int err = temporal_filter_find_matching_mb_c(
+ cpi, frames[alt_ref_index]->y_buffer + mb_y_offset,
+ frames[frame]->y_buffer + mb_y_offset, frames[frame]->y_stride);
// Assign higher weight to matching MB if it's error
// score is lower. If not applying MC default behavior
// is to weight all MBs equal.
- filter_weight = err < thresh_low
- ? 2 : err < thresh_high ? 1 : 0;
+ filter_weight = err < thresh_low ? 2 : err < thresh_high ? 1 : 0;
}
if (filter_weight != 0) {
// Construct the predictors
- temporal_filter_predictors_mb_c(mbd,
- frames[frame]->y_buffer + mb_y_offset,
+ temporal_filter_predictors_mb_c(
+ mbd, frames[frame]->y_buffer + mb_y_offset,
frames[frame]->u_buffer + mb_uv_offset,
- frames[frame]->v_buffer + mb_uv_offset,
- frames[frame]->y_stride,
- mb_uv_width, mb_uv_height,
- mbd->mi[0]->bmi[0].as_mv[0].as_mv.row,
- mbd->mi[0]->bmi[0].as_mv[0].as_mv.col,
- predictor, scale,
+ frames[frame]->v_buffer + mb_uv_offset, frames[frame]->y_stride,
+ mb_uv_width, mb_uv_height, mbd->mi[0]->bmi[0].as_mv[0].as_mv.row,
+ mbd->mi[0]->bmi[0].as_mv[0].as_mv.col, predictor, scale,
mb_col * 16, mb_row * 16);
#if CONFIG_VPX_HIGHBITDEPTH
if (mbd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
int adj_strength = strength + 2 * (mbd->bd - 8);
// Apply the filter (YUV)
- vp10_highbd_temporal_filter_apply(f->y_buffer + mb_y_offset,
- f->y_stride,
- predictor, 16, 16, adj_strength,
- filter_weight,
- accumulator, count);
- vp10_highbd_temporal_filter_apply(f->u_buffer + mb_uv_offset,
- f->uv_stride, predictor + 256,
- mb_uv_width, mb_uv_height,
- adj_strength,
- filter_weight, accumulator + 256,
- count + 256);
- vp10_highbd_temporal_filter_apply(f->v_buffer + mb_uv_offset,
- f->uv_stride, predictor + 512,
- mb_uv_width, mb_uv_height,
- adj_strength, filter_weight,
- accumulator + 512, count + 512);
+ vp10_highbd_temporal_filter_apply(
+ f->y_buffer + mb_y_offset, f->y_stride, predictor, 16, 16,
+ adj_strength, filter_weight, accumulator, count);
+ vp10_highbd_temporal_filter_apply(
+ f->u_buffer + mb_uv_offset, f->uv_stride, predictor + 256,
+ mb_uv_width, mb_uv_height, adj_strength, filter_weight,
+ accumulator + 256, count + 256);
+ vp10_highbd_temporal_filter_apply(
+ f->v_buffer + mb_uv_offset, f->uv_stride, predictor + 512,
+ mb_uv_width, mb_uv_height, adj_strength, filter_weight,
+ accumulator + 512, count + 512);
} else {
// Apply the filter (YUV)
vp10_temporal_filter_apply(f->y_buffer + mb_y_offset, f->y_stride,
- predictor, 16, 16,
- strength, filter_weight,
- accumulator, count);
+ predictor, 16, 16, strength,
+ filter_weight, accumulator, count);
vp10_temporal_filter_apply(f->u_buffer + mb_uv_offset, f->uv_stride,
- predictor + 256,
- mb_uv_width, mb_uv_height, strength,
- filter_weight, accumulator + 256,
- count + 256);
+ predictor + 256, mb_uv_width,
+ mb_uv_height, strength, filter_weight,
+ accumulator + 256, count + 256);
vp10_temporal_filter_apply(f->v_buffer + mb_uv_offset, f->uv_stride,
- predictor + 512,
- mb_uv_width, mb_uv_height, strength,
- filter_weight, accumulator + 512,
- count + 512);
+ predictor + 512, mb_uv_width,
+ mb_uv_height, strength, filter_weight,
+ accumulator + 512, count + 512);
}
#else
// Apply the filter (YUV)
vp10_temporal_filter_apply(f->y_buffer + mb_y_offset, f->y_stride,
- predictor, 16, 16,
- strength, filter_weight,
- accumulator, count);
+ predictor, 16, 16, strength, filter_weight,
+ accumulator, count);
vp10_temporal_filter_apply(f->u_buffer + mb_uv_offset, f->uv_stride,
- predictor + 256,
- mb_uv_width, mb_uv_height, strength,
- filter_weight, accumulator + 256,
- count + 256);
+ predictor + 256, mb_uv_width, mb_uv_height,
+ strength, filter_weight, accumulator + 256,
+ count + 256);
vp10_temporal_filter_apply(f->v_buffer + mb_uv_offset, f->uv_stride,
- predictor + 512,
- mb_uv_width, mb_uv_height, strength,
- filter_weight, accumulator + 512,
- count + 512);
+ predictor + 512, mb_uv_width, mb_uv_height,
+ strength, filter_weight, accumulator + 512,
+ count + 512);
#endif // CONFIG_VPX_HIGHBITDEPTH
}
}
@@ -584,13 +518,11 @@
}
// Restore input state
- for (i = 0; i < MAX_MB_PLANE; i++)
- mbd->plane[i].pre[0].buf = input_buffer[i];
+ for (i = 0; i < MAX_MB_PLANE; i++) mbd->plane[i].pre[0].buf = input_buffer[i];
}
// Apply buffer limits and context specific adjustments to arnr filter.
-static void adjust_arnr_filter(VP10_COMP *cpi,
- int distance, int group_boost,
+static void adjust_arnr_filter(VP10_COMP *cpi, int distance, int group_boost,
int *arnr_frames, int *arnr_strength) {
const VP10EncoderConfig *const oxcf = &cpi->oxcf;
const int frames_after_arf =
@@ -600,34 +532,30 @@
int q, frames, strength;
// Define the forward and backwards filter limits for this arnr group.
- if (frames_fwd > frames_after_arf)
- frames_fwd = frames_after_arf;
- if (frames_fwd > distance)
- frames_fwd = distance;
+ if (frames_fwd > frames_after_arf) frames_fwd = frames_after_arf;
+ if (frames_fwd > distance) frames_fwd = distance;
frames_bwd = frames_fwd;
// For even length filter there is one more frame backward
// than forward: e.g. len=6 ==> bbbAff, len=7 ==> bbbAfff.
- if (frames_bwd < distance)
- frames_bwd += (oxcf->arnr_max_frames + 1) & 0x1;
+ if (frames_bwd < distance) frames_bwd += (oxcf->arnr_max_frames + 1) & 0x1;
// Set the baseline active filter size.
frames = frames_bwd + 1 + frames_fwd;
// Adjust the strength based on active max q.
if (cpi->common.current_video_frame > 1)
- q = ((int)vp10_convert_qindex_to_q(
- cpi->rc.avg_frame_qindex[INTER_FRAME], cpi->common.bit_depth));
+ q = ((int)vp10_convert_qindex_to_q(cpi->rc.avg_frame_qindex[INTER_FRAME],
+ cpi->common.bit_depth));
else
- q = ((int)vp10_convert_qindex_to_q(
- cpi->rc.avg_frame_qindex[KEY_FRAME], cpi->common.bit_depth));
+ q = ((int)vp10_convert_qindex_to_q(cpi->rc.avg_frame_qindex[KEY_FRAME],
+ cpi->common.bit_depth));
if (q > 16) {
strength = oxcf->arnr_strength;
} else {
strength = oxcf->arnr_strength - ((16 - q) / 2);
- if (strength < 0)
- strength = 0;
+ if (strength < 0) strength = 0;
}
// Adjust number of frames in filter and strength based on gf boost level.
@@ -661,7 +589,7 @@
int frames_to_blur_backward;
int frames_to_blur_forward;
struct scale_factors sf;
- YV12_BUFFER_CONFIG *frames[MAX_LAG_BUFFERS] = {NULL};
+ YV12_BUFFER_CONFIG *frames[MAX_LAG_BUFFERS] = { NULL };
// Apply context specific adjustments to the arnr filter parameters.
adjust_arnr_filter(cpi, distance, rc->gfu_boost, &frames_to_blur, &strength);
@@ -672,28 +600,24 @@
// Setup frame pointers, NULL indicates frame not included in filter.
for (frame = 0; frame < frames_to_blur; ++frame) {
const int which_buffer = start_frame - frame;
- struct lookahead_entry *buf = vp10_lookahead_peek(cpi->lookahead,
- which_buffer);
+ struct lookahead_entry *buf =
+ vp10_lookahead_peek(cpi->lookahead, which_buffer);
frames[frames_to_blur - 1 - frame] = &buf->img;
}
if (frames_to_blur > 0) {
- // Setup scaling factors. Scaling on each of the arnr frames is not
- // supported.
- // ARF is produced at the native frame size and resized when coded.
+// Setup scaling factors. Scaling on each of the arnr frames is not
+// supported.
+// ARF is produced at the native frame size and resized when coded.
#if CONFIG_VPX_HIGHBITDEPTH
- vp10_setup_scale_factors_for_frame(&sf,
- frames[0]->y_crop_width,
- frames[0]->y_crop_height,
- frames[0]->y_crop_width,
- frames[0]->y_crop_height,
- cpi->common.use_highbitdepth);
+ vp10_setup_scale_factors_for_frame(
+ &sf, frames[0]->y_crop_width, frames[0]->y_crop_height,
+ frames[0]->y_crop_width, frames[0]->y_crop_height,
+ cpi->common.use_highbitdepth);
#else
- vp10_setup_scale_factors_for_frame(&sf,
- frames[0]->y_crop_width,
- frames[0]->y_crop_height,
- frames[0]->y_crop_width,
- frames[0]->y_crop_height);
+ vp10_setup_scale_factors_for_frame(
+ &sf, frames[0]->y_crop_width, frames[0]->y_crop_height,
+ frames[0]->y_crop_width, frames[0]->y_crop_height);
#endif // CONFIG_VPX_HIGHBITDEPTH
}
diff --git a/vp10/encoder/tokenize.c b/vp10/encoder/tokenize.c
index 46f3b65..60960c6 100644
--- a/vp10/encoder/tokenize.c
+++ b/vp10/encoder/tokenize.c
@@ -25,439 +25,456 @@
#include "vp10/encoder/tokenize.h"
static const TOKENVALUE dct_cat_lt_10_value_tokens[] = {
- {9, 63}, {9, 61}, {9, 59}, {9, 57}, {9, 55}, {9, 53}, {9, 51}, {9, 49},
- {9, 47}, {9, 45}, {9, 43}, {9, 41}, {9, 39}, {9, 37}, {9, 35}, {9, 33},
- {9, 31}, {9, 29}, {9, 27}, {9, 25}, {9, 23}, {9, 21}, {9, 19}, {9, 17},
- {9, 15}, {9, 13}, {9, 11}, {9, 9}, {9, 7}, {9, 5}, {9, 3}, {9, 1},
- {8, 31}, {8, 29}, {8, 27}, {8, 25}, {8, 23}, {8, 21},
- {8, 19}, {8, 17}, {8, 15}, {8, 13}, {8, 11}, {8, 9},
- {8, 7}, {8, 5}, {8, 3}, {8, 1},
- {7, 15}, {7, 13}, {7, 11}, {7, 9}, {7, 7}, {7, 5}, {7, 3}, {7, 1},
- {6, 7}, {6, 5}, {6, 3}, {6, 1}, {5, 3}, {5, 1},
- {4, 1}, {3, 1}, {2, 1}, {1, 1}, {0, 0},
- {1, 0}, {2, 0}, {3, 0}, {4, 0},
- {5, 0}, {5, 2}, {6, 0}, {6, 2}, {6, 4}, {6, 6},
- {7, 0}, {7, 2}, {7, 4}, {7, 6}, {7, 8}, {7, 10}, {7, 12}, {7, 14},
- {8, 0}, {8, 2}, {8, 4}, {8, 6}, {8, 8}, {8, 10}, {8, 12},
- {8, 14}, {8, 16}, {8, 18}, {8, 20}, {8, 22}, {8, 24},
- {8, 26}, {8, 28}, {8, 30}, {9, 0}, {9, 2},
- {9, 4}, {9, 6}, {9, 8}, {9, 10}, {9, 12}, {9, 14}, {9, 16},
- {9, 18}, {9, 20}, {9, 22}, {9, 24}, {9, 26}, {9, 28},
- {9, 30}, {9, 32}, {9, 34}, {9, 36}, {9, 38}, {9, 40},
- {9, 42}, {9, 44}, {9, 46}, {9, 48}, {9, 50}, {9, 52},
- {9, 54}, {9, 56}, {9, 58}, {9, 60}, {9, 62}
+ { 9, 63 }, { 9, 61 }, { 9, 59 }, { 9, 57 }, { 9, 55 }, { 9, 53 }, { 9, 51 },
+ { 9, 49 }, { 9, 47 }, { 9, 45 }, { 9, 43 }, { 9, 41 }, { 9, 39 }, { 9, 37 },
+ { 9, 35 }, { 9, 33 }, { 9, 31 }, { 9, 29 }, { 9, 27 }, { 9, 25 }, { 9, 23 },
+ { 9, 21 }, { 9, 19 }, { 9, 17 }, { 9, 15 }, { 9, 13 }, { 9, 11 }, { 9, 9 },
+ { 9, 7 }, { 9, 5 }, { 9, 3 }, { 9, 1 }, { 8, 31 }, { 8, 29 }, { 8, 27 },
+ { 8, 25 }, { 8, 23 }, { 8, 21 }, { 8, 19 }, { 8, 17 }, { 8, 15 }, { 8, 13 },
+ { 8, 11 }, { 8, 9 }, { 8, 7 }, { 8, 5 }, { 8, 3 }, { 8, 1 }, { 7, 15 },
+ { 7, 13 }, { 7, 11 }, { 7, 9 }, { 7, 7 }, { 7, 5 }, { 7, 3 }, { 7, 1 },
+ { 6, 7 }, { 6, 5 }, { 6, 3 }, { 6, 1 }, { 5, 3 }, { 5, 1 }, { 4, 1 },
+ { 3, 1 }, { 2, 1 }, { 1, 1 }, { 0, 0 }, { 1, 0 }, { 2, 0 }, { 3, 0 },
+ { 4, 0 }, { 5, 0 }, { 5, 2 }, { 6, 0 }, { 6, 2 }, { 6, 4 }, { 6, 6 },
+ { 7, 0 }, { 7, 2 }, { 7, 4 }, { 7, 6 }, { 7, 8 }, { 7, 10 }, { 7, 12 },
+ { 7, 14 }, { 8, 0 }, { 8, 2 }, { 8, 4 }, { 8, 6 }, { 8, 8 }, { 8, 10 },
+ { 8, 12 }, { 8, 14 }, { 8, 16 }, { 8, 18 }, { 8, 20 }, { 8, 22 }, { 8, 24 },
+ { 8, 26 }, { 8, 28 }, { 8, 30 }, { 9, 0 }, { 9, 2 }, { 9, 4 }, { 9, 6 },
+ { 9, 8 }, { 9, 10 }, { 9, 12 }, { 9, 14 }, { 9, 16 }, { 9, 18 }, { 9, 20 },
+ { 9, 22 }, { 9, 24 }, { 9, 26 }, { 9, 28 }, { 9, 30 }, { 9, 32 }, { 9, 34 },
+ { 9, 36 }, { 9, 38 }, { 9, 40 }, { 9, 42 }, { 9, 44 }, { 9, 46 }, { 9, 48 },
+ { 9, 50 }, { 9, 52 }, { 9, 54 }, { 9, 56 }, { 9, 58 }, { 9, 60 }, { 9, 62 }
};
-const TOKENVALUE *vp10_dct_cat_lt_10_value_tokens = dct_cat_lt_10_value_tokens +
- (sizeof(dct_cat_lt_10_value_tokens) / sizeof(*dct_cat_lt_10_value_tokens))
- / 2;
+const TOKENVALUE *vp10_dct_cat_lt_10_value_tokens =
+ dct_cat_lt_10_value_tokens +
+ (sizeof(dct_cat_lt_10_value_tokens) / sizeof(*dct_cat_lt_10_value_tokens)) /
+ 2;
// Array indices are identical to previously-existing CONTEXT_NODE indices
const vpx_tree_index vp10_coef_tree[TREE_SIZE(ENTROPY_TOKENS)] = {
- -EOB_TOKEN, 2, // 0 = EOB
- -ZERO_TOKEN, 4, // 1 = ZERO
- -ONE_TOKEN, 6, // 2 = ONE
- 8, 12, // 3 = LOW_VAL
- -TWO_TOKEN, 10, // 4 = TWO
- -THREE_TOKEN, -FOUR_TOKEN, // 5 = THREE
- 14, 16, // 6 = HIGH_LOW
- -CATEGORY1_TOKEN, -CATEGORY2_TOKEN, // 7 = CAT_ONE
- 18, 20, // 8 = CAT_THREEFOUR
- -CATEGORY3_TOKEN, -CATEGORY4_TOKEN, // 9 = CAT_THREE
- -CATEGORY5_TOKEN, -CATEGORY6_TOKEN // 10 = CAT_FIVE
+ -EOB_TOKEN,
+ 2, // 0 = EOB
+ -ZERO_TOKEN,
+ 4, // 1 = ZERO
+ -ONE_TOKEN,
+ 6, // 2 = ONE
+ 8,
+ 12, // 3 = LOW_VAL
+ -TWO_TOKEN,
+ 10, // 4 = TWO
+ -THREE_TOKEN,
+ -FOUR_TOKEN, // 5 = THREE
+ 14,
+ 16, // 6 = HIGH_LOW
+ -CATEGORY1_TOKEN,
+ -CATEGORY2_TOKEN, // 7 = CAT_ONE
+ 18,
+ 20, // 8 = CAT_THREEFOUR
+ -CATEGORY3_TOKEN,
+ -CATEGORY4_TOKEN, // 9 = CAT_THREE
+ -CATEGORY5_TOKEN,
+ -CATEGORY6_TOKEN // 10 = CAT_FIVE
};
-static const vpx_tree_index cat1[2] = {0, 0};
-static const vpx_tree_index cat2[4] = {2, 2, 0, 0};
-static const vpx_tree_index cat3[6] = {2, 2, 4, 4, 0, 0};
-static const vpx_tree_index cat4[8] = {2, 2, 4, 4, 6, 6, 0, 0};
-static const vpx_tree_index cat5[10] = {2, 2, 4, 4, 6, 6, 8, 8, 0, 0};
-static const vpx_tree_index cat6[28] = {2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12,
- 14, 14, 16, 16, 18, 18, 20, 20, 22, 22, 24, 24, 26, 26, 0, 0};
+static const vpx_tree_index cat1[2] = { 0, 0 };
+static const vpx_tree_index cat2[4] = { 2, 2, 0, 0 };
+static const vpx_tree_index cat3[6] = { 2, 2, 4, 4, 0, 0 };
+static const vpx_tree_index cat4[8] = { 2, 2, 4, 4, 6, 6, 0, 0 };
+static const vpx_tree_index cat5[10] = { 2, 2, 4, 4, 6, 6, 8, 8, 0, 0 };
+static const vpx_tree_index cat6[28] = { 2, 2, 4, 4, 6, 6, 8, 8, 10, 10,
+ 12, 12, 14, 14, 16, 16, 18, 18, 20, 20,
+ 22, 22, 24, 24, 26, 26, 0, 0 };
-static const int16_t zero_cost[] = {0};
-static const int16_t sign_cost[] = {255, 257};
-static const int16_t cat1_cost[] = {429, 431, 616, 618};
-static const int16_t cat2_cost[] = {624, 626, 727, 729, 848, 850, 951, 953};
-static const int16_t cat3_cost[] = {
- 820, 822, 893, 895, 940, 942, 1013, 1015, 1096, 1098, 1169, 1171, 1216, 1218,
- 1289, 1291
-};
-static const int16_t cat4_cost[] = {
- 1032, 1034, 1075, 1077, 1105, 1107, 1148, 1150, 1194, 1196, 1237, 1239,
- 1267, 1269, 1310, 1312, 1328, 1330, 1371, 1373, 1401, 1403, 1444, 1446,
- 1490, 1492, 1533, 1535, 1563, 1565, 1606, 1608
-};
+static const int16_t zero_cost[] = { 0 };
+static const int16_t sign_cost[] = { 255, 257 };
+static const int16_t cat1_cost[] = { 429, 431, 616, 618 };
+static const int16_t cat2_cost[] = { 624, 626, 727, 729, 848, 850, 951, 953 };
+static const int16_t cat3_cost[] = { 820, 822, 893, 895, 940, 942,
+ 1013, 1015, 1096, 1098, 1169, 1171,
+ 1216, 1218, 1289, 1291 };
+static const int16_t cat4_cost[] = { 1032, 1034, 1075, 1077, 1105, 1107, 1148,
+ 1150, 1194, 1196, 1237, 1239, 1267, 1269,
+ 1310, 1312, 1328, 1330, 1371, 1373, 1401,
+ 1403, 1444, 1446, 1490, 1492, 1533, 1535,
+ 1563, 1565, 1606, 1608 };
static const int16_t cat5_cost[] = {
- 1269, 1271, 1283, 1285, 1306, 1308, 1320,
- 1322, 1347, 1349, 1361, 1363, 1384, 1386, 1398, 1400, 1443, 1445, 1457,
- 1459, 1480, 1482, 1494, 1496, 1521, 1523, 1535, 1537, 1558, 1560, 1572,
- 1574, 1592, 1594, 1606, 1608, 1629, 1631, 1643, 1645, 1670, 1672, 1684,
- 1686, 1707, 1709, 1721, 1723, 1766, 1768, 1780, 1782, 1803, 1805, 1817,
- 1819, 1844, 1846, 1858, 1860, 1881, 1883, 1895, 1897
+ 1269, 1271, 1283, 1285, 1306, 1308, 1320, 1322, 1347, 1349, 1361, 1363, 1384,
+ 1386, 1398, 1400, 1443, 1445, 1457, 1459, 1480, 1482, 1494, 1496, 1521, 1523,
+ 1535, 1537, 1558, 1560, 1572, 1574, 1592, 1594, 1606, 1608, 1629, 1631, 1643,
+ 1645, 1670, 1672, 1684, 1686, 1707, 1709, 1721, 1723, 1766, 1768, 1780, 1782,
+ 1803, 1805, 1817, 1819, 1844, 1846, 1858, 1860, 1881, 1883, 1895, 1897
};
const int16_t vp10_cat6_low_cost[256] = {
- 1638, 1640, 1646, 1648, 1652, 1654, 1660, 1662,
- 1670, 1672, 1678, 1680, 1684, 1686, 1692, 1694, 1711, 1713, 1719, 1721,
- 1725, 1727, 1733, 1735, 1743, 1745, 1751, 1753, 1757, 1759, 1765, 1767,
- 1787, 1789, 1795, 1797, 1801, 1803, 1809, 1811, 1819, 1821, 1827, 1829,
- 1833, 1835, 1841, 1843, 1860, 1862, 1868, 1870, 1874, 1876, 1882, 1884,
- 1892, 1894, 1900, 1902, 1906, 1908, 1914, 1916, 1940, 1942, 1948, 1950,
- 1954, 1956, 1962, 1964, 1972, 1974, 1980, 1982, 1986, 1988, 1994, 1996,
- 2013, 2015, 2021, 2023, 2027, 2029, 2035, 2037, 2045, 2047, 2053, 2055,
- 2059, 2061, 2067, 2069, 2089, 2091, 2097, 2099, 2103, 2105, 2111, 2113,
- 2121, 2123, 2129, 2131, 2135, 2137, 2143, 2145, 2162, 2164, 2170, 2172,
- 2176, 2178, 2184, 2186, 2194, 2196, 2202, 2204, 2208, 2210, 2216, 2218,
- 2082, 2084, 2090, 2092, 2096, 2098, 2104, 2106, 2114, 2116, 2122, 2124,
- 2128, 2130, 2136, 2138, 2155, 2157, 2163, 2165, 2169, 2171, 2177, 2179,
- 2187, 2189, 2195, 2197, 2201, 2203, 2209, 2211, 2231, 2233, 2239, 2241,
- 2245, 2247, 2253, 2255, 2263, 2265, 2271, 2273, 2277, 2279, 2285, 2287,
- 2304, 2306, 2312, 2314, 2318, 2320, 2326, 2328, 2336, 2338, 2344, 2346,
- 2350, 2352, 2358, 2360, 2384, 2386, 2392, 2394, 2398, 2400, 2406, 2408,
- 2416, 2418, 2424, 2426, 2430, 2432, 2438, 2440, 2457, 2459, 2465, 2467,
- 2471, 2473, 2479, 2481, 2489, 2491, 2497, 2499, 2503, 2505, 2511, 2513,
- 2533, 2535, 2541, 2543, 2547, 2549, 2555, 2557, 2565, 2567, 2573, 2575,
- 2579, 2581, 2587, 2589, 2606, 2608, 2614, 2616, 2620, 2622, 2628, 2630,
- 2638, 2640, 2646, 2648, 2652, 2654, 2660, 2662
+ 1638, 1640, 1646, 1648, 1652, 1654, 1660, 1662, 1670, 1672, 1678, 1680, 1684,
+ 1686, 1692, 1694, 1711, 1713, 1719, 1721, 1725, 1727, 1733, 1735, 1743, 1745,
+ 1751, 1753, 1757, 1759, 1765, 1767, 1787, 1789, 1795, 1797, 1801, 1803, 1809,
+ 1811, 1819, 1821, 1827, 1829, 1833, 1835, 1841, 1843, 1860, 1862, 1868, 1870,
+ 1874, 1876, 1882, 1884, 1892, 1894, 1900, 1902, 1906, 1908, 1914, 1916, 1940,
+ 1942, 1948, 1950, 1954, 1956, 1962, 1964, 1972, 1974, 1980, 1982, 1986, 1988,
+ 1994, 1996, 2013, 2015, 2021, 2023, 2027, 2029, 2035, 2037, 2045, 2047, 2053,
+ 2055, 2059, 2061, 2067, 2069, 2089, 2091, 2097, 2099, 2103, 2105, 2111, 2113,
+ 2121, 2123, 2129, 2131, 2135, 2137, 2143, 2145, 2162, 2164, 2170, 2172, 2176,
+ 2178, 2184, 2186, 2194, 2196, 2202, 2204, 2208, 2210, 2216, 2218, 2082, 2084,
+ 2090, 2092, 2096, 2098, 2104, 2106, 2114, 2116, 2122, 2124, 2128, 2130, 2136,
+ 2138, 2155, 2157, 2163, 2165, 2169, 2171, 2177, 2179, 2187, 2189, 2195, 2197,
+ 2201, 2203, 2209, 2211, 2231, 2233, 2239, 2241, 2245, 2247, 2253, 2255, 2263,
+ 2265, 2271, 2273, 2277, 2279, 2285, 2287, 2304, 2306, 2312, 2314, 2318, 2320,
+ 2326, 2328, 2336, 2338, 2344, 2346, 2350, 2352, 2358, 2360, 2384, 2386, 2392,
+ 2394, 2398, 2400, 2406, 2408, 2416, 2418, 2424, 2426, 2430, 2432, 2438, 2440,
+ 2457, 2459, 2465, 2467, 2471, 2473, 2479, 2481, 2489, 2491, 2497, 2499, 2503,
+ 2505, 2511, 2513, 2533, 2535, 2541, 2543, 2547, 2549, 2555, 2557, 2565, 2567,
+ 2573, 2575, 2579, 2581, 2587, 2589, 2606, 2608, 2614, 2616, 2620, 2622, 2628,
+ 2630, 2638, 2640, 2646, 2648, 2652, 2654, 2660, 2662
};
const int16_t vp10_cat6_high_cost[128] = {
- 72, 892, 1183, 2003, 1448, 2268, 2559, 3379,
- 1709, 2529, 2820, 3640, 3085, 3905, 4196, 5016, 2118, 2938, 3229, 4049,
- 3494, 4314, 4605, 5425, 3755, 4575, 4866, 5686, 5131, 5951, 6242, 7062,
- 2118, 2938, 3229, 4049, 3494, 4314, 4605, 5425, 3755, 4575, 4866, 5686,
- 5131, 5951, 6242, 7062, 4164, 4984, 5275, 6095, 5540, 6360, 6651, 7471,
- 5801, 6621, 6912, 7732, 7177, 7997, 8288, 9108, 2118, 2938, 3229, 4049,
- 3494, 4314, 4605, 5425, 3755, 4575, 4866, 5686, 5131, 5951, 6242, 7062,
- 4164, 4984, 5275, 6095, 5540, 6360, 6651, 7471, 5801, 6621, 6912, 7732,
- 7177, 7997, 8288, 9108, 4164, 4984, 5275, 6095, 5540, 6360, 6651, 7471,
- 5801, 6621, 6912, 7732, 7177, 7997, 8288, 9108, 6210, 7030, 7321, 8141,
- 7586, 8406, 8697, 9517, 7847, 8667, 8958, 9778, 9223, 10043, 10334, 11154
+ 72, 892, 1183, 2003, 1448, 2268, 2559, 3379, 1709, 2529, 2820, 3640,
+ 3085, 3905, 4196, 5016, 2118, 2938, 3229, 4049, 3494, 4314, 4605, 5425,
+ 3755, 4575, 4866, 5686, 5131, 5951, 6242, 7062, 2118, 2938, 3229, 4049,
+ 3494, 4314, 4605, 5425, 3755, 4575, 4866, 5686, 5131, 5951, 6242, 7062,
+ 4164, 4984, 5275, 6095, 5540, 6360, 6651, 7471, 5801, 6621, 6912, 7732,
+ 7177, 7997, 8288, 9108, 2118, 2938, 3229, 4049, 3494, 4314, 4605, 5425,
+ 3755, 4575, 4866, 5686, 5131, 5951, 6242, 7062, 4164, 4984, 5275, 6095,
+ 5540, 6360, 6651, 7471, 5801, 6621, 6912, 7732, 7177, 7997, 8288, 9108,
+ 4164, 4984, 5275, 6095, 5540, 6360, 6651, 7471, 5801, 6621, 6912, 7732,
+ 7177, 7997, 8288, 9108, 6210, 7030, 7321, 8141, 7586, 8406, 8697, 9517,
+ 7847, 8667, 8958, 9778, 9223, 10043, 10334, 11154
};
#if CONFIG_VPX_HIGHBITDEPTH
const int16_t vp10_cat6_high10_high_cost[512] = {
- 74, 894, 1185, 2005, 1450, 2270, 2561,
- 3381, 1711, 2531, 2822, 3642, 3087, 3907, 4198, 5018, 2120, 2940, 3231,
- 4051, 3496, 4316, 4607, 5427, 3757, 4577, 4868, 5688, 5133, 5953, 6244,
- 7064, 2120, 2940, 3231, 4051, 3496, 4316, 4607, 5427, 3757, 4577, 4868,
- 5688, 5133, 5953, 6244, 7064, 4166, 4986, 5277, 6097, 5542, 6362, 6653,
- 7473, 5803, 6623, 6914, 7734, 7179, 7999, 8290, 9110, 2120, 2940, 3231,
- 4051, 3496, 4316, 4607, 5427, 3757, 4577, 4868, 5688, 5133, 5953, 6244,
- 7064, 4166, 4986, 5277, 6097, 5542, 6362, 6653, 7473, 5803, 6623, 6914,
- 7734, 7179, 7999, 8290, 9110, 4166, 4986, 5277, 6097, 5542, 6362, 6653,
- 7473, 5803, 6623, 6914, 7734, 7179, 7999, 8290, 9110, 6212, 7032, 7323,
- 8143, 7588, 8408, 8699, 9519, 7849, 8669, 8960, 9780, 9225, 10045, 10336,
- 11156, 2120, 2940, 3231, 4051, 3496, 4316, 4607, 5427, 3757, 4577, 4868,
- 5688, 5133, 5953, 6244, 7064, 4166, 4986, 5277, 6097, 5542, 6362, 6653,
- 7473, 5803, 6623, 6914, 7734, 7179, 7999, 8290, 9110, 4166, 4986, 5277,
- 6097, 5542, 6362, 6653, 7473, 5803, 6623, 6914, 7734, 7179, 7999, 8290,
- 9110, 6212, 7032, 7323, 8143, 7588, 8408, 8699, 9519, 7849, 8669, 8960,
- 9780, 9225, 10045, 10336, 11156, 4166, 4986, 5277, 6097, 5542, 6362, 6653,
- 7473, 5803, 6623, 6914, 7734, 7179, 7999, 8290, 9110, 6212, 7032, 7323,
- 8143, 7588, 8408, 8699, 9519, 7849, 8669, 8960, 9780, 9225, 10045, 10336,
- 11156, 6212, 7032, 7323, 8143, 7588, 8408, 8699, 9519, 7849, 8669, 8960,
- 9780, 9225, 10045, 10336, 11156, 8258, 9078, 9369, 10189, 9634, 10454,
- 10745, 11565, 9895, 10715, 11006, 11826, 11271, 12091, 12382, 13202, 2120,
- 2940, 3231, 4051, 3496, 4316, 4607, 5427, 3757, 4577, 4868, 5688, 5133,
- 5953, 6244, 7064, 4166, 4986, 5277, 6097, 5542, 6362, 6653, 7473, 5803,
- 6623, 6914, 7734, 7179, 7999, 8290, 9110, 4166, 4986, 5277, 6097, 5542,
- 6362, 6653, 7473, 5803, 6623, 6914, 7734, 7179, 7999, 8290, 9110, 6212,
- 7032, 7323, 8143, 7588, 8408, 8699, 9519, 7849, 8669, 8960, 9780, 9225,
- 10045, 10336, 11156, 4166, 4986, 5277, 6097, 5542, 6362, 6653, 7473, 5803,
- 6623, 6914, 7734, 7179, 7999, 8290, 9110, 6212, 7032, 7323, 8143, 7588,
- 8408, 8699, 9519, 7849, 8669, 8960, 9780, 9225, 10045, 10336, 11156, 6212,
- 7032, 7323, 8143, 7588, 8408, 8699, 9519, 7849, 8669, 8960, 9780, 9225,
- 10045, 10336, 11156, 8258, 9078, 9369, 10189, 9634, 10454, 10745, 11565,
- 9895, 10715, 11006, 11826, 11271, 12091, 12382, 13202, 4166, 4986, 5277,
- 6097, 5542, 6362, 6653, 7473, 5803, 6623, 6914, 7734, 7179, 7999, 8290,
- 9110, 6212, 7032, 7323, 8143, 7588, 8408, 8699, 9519, 7849, 8669, 8960,
- 9780, 9225, 10045, 10336, 11156, 6212, 7032, 7323, 8143, 7588, 8408, 8699,
- 9519, 7849, 8669, 8960, 9780, 9225, 10045, 10336, 11156, 8258, 9078, 9369,
- 10189, 9634, 10454, 10745, 11565, 9895, 10715, 11006, 11826, 11271, 12091,
- 12382, 13202, 6212, 7032, 7323, 8143, 7588, 8408, 8699, 9519, 7849, 8669,
- 8960, 9780, 9225, 10045, 10336, 11156, 8258, 9078, 9369, 10189, 9634, 10454,
- 10745, 11565, 9895, 10715, 11006, 11826, 11271, 12091, 12382, 13202, 8258,
- 9078, 9369, 10189, 9634, 10454, 10745, 11565, 9895, 10715, 11006, 11826,
- 11271, 12091, 12382, 13202, 10304, 11124, 11415, 12235, 11680, 12500, 12791,
- 13611, 11941, 12761, 13052, 13872, 13317, 14137, 14428, 15248,
+ 74, 894, 1185, 2005, 1450, 2270, 2561, 3381, 1711, 2531, 2822,
+ 3642, 3087, 3907, 4198, 5018, 2120, 2940, 3231, 4051, 3496, 4316,
+ 4607, 5427, 3757, 4577, 4868, 5688, 5133, 5953, 6244, 7064, 2120,
+ 2940, 3231, 4051, 3496, 4316, 4607, 5427, 3757, 4577, 4868, 5688,
+ 5133, 5953, 6244, 7064, 4166, 4986, 5277, 6097, 5542, 6362, 6653,
+ 7473, 5803, 6623, 6914, 7734, 7179, 7999, 8290, 9110, 2120, 2940,
+ 3231, 4051, 3496, 4316, 4607, 5427, 3757, 4577, 4868, 5688, 5133,
+ 5953, 6244, 7064, 4166, 4986, 5277, 6097, 5542, 6362, 6653, 7473,
+ 5803, 6623, 6914, 7734, 7179, 7999, 8290, 9110, 4166, 4986, 5277,
+ 6097, 5542, 6362, 6653, 7473, 5803, 6623, 6914, 7734, 7179, 7999,
+ 8290, 9110, 6212, 7032, 7323, 8143, 7588, 8408, 8699, 9519, 7849,
+ 8669, 8960, 9780, 9225, 10045, 10336, 11156, 2120, 2940, 3231, 4051,
+ 3496, 4316, 4607, 5427, 3757, 4577, 4868, 5688, 5133, 5953, 6244,
+ 7064, 4166, 4986, 5277, 6097, 5542, 6362, 6653, 7473, 5803, 6623,
+ 6914, 7734, 7179, 7999, 8290, 9110, 4166, 4986, 5277, 6097, 5542,
+ 6362, 6653, 7473, 5803, 6623, 6914, 7734, 7179, 7999, 8290, 9110,
+ 6212, 7032, 7323, 8143, 7588, 8408, 8699, 9519, 7849, 8669, 8960,
+ 9780, 9225, 10045, 10336, 11156, 4166, 4986, 5277, 6097, 5542, 6362,
+ 6653, 7473, 5803, 6623, 6914, 7734, 7179, 7999, 8290, 9110, 6212,
+ 7032, 7323, 8143, 7588, 8408, 8699, 9519, 7849, 8669, 8960, 9780,
+ 9225, 10045, 10336, 11156, 6212, 7032, 7323, 8143, 7588, 8408, 8699,
+ 9519, 7849, 8669, 8960, 9780, 9225, 10045, 10336, 11156, 8258, 9078,
+ 9369, 10189, 9634, 10454, 10745, 11565, 9895, 10715, 11006, 11826, 11271,
+ 12091, 12382, 13202, 2120, 2940, 3231, 4051, 3496, 4316, 4607, 5427,
+ 3757, 4577, 4868, 5688, 5133, 5953, 6244, 7064, 4166, 4986, 5277,
+ 6097, 5542, 6362, 6653, 7473, 5803, 6623, 6914, 7734, 7179, 7999,
+ 8290, 9110, 4166, 4986, 5277, 6097, 5542, 6362, 6653, 7473, 5803,
+ 6623, 6914, 7734, 7179, 7999, 8290, 9110, 6212, 7032, 7323, 8143,
+ 7588, 8408, 8699, 9519, 7849, 8669, 8960, 9780, 9225, 10045, 10336,
+ 11156, 4166, 4986, 5277, 6097, 5542, 6362, 6653, 7473, 5803, 6623,
+ 6914, 7734, 7179, 7999, 8290, 9110, 6212, 7032, 7323, 8143, 7588,
+ 8408, 8699, 9519, 7849, 8669, 8960, 9780, 9225, 10045, 10336, 11156,
+ 6212, 7032, 7323, 8143, 7588, 8408, 8699, 9519, 7849, 8669, 8960,
+ 9780, 9225, 10045, 10336, 11156, 8258, 9078, 9369, 10189, 9634, 10454,
+ 10745, 11565, 9895, 10715, 11006, 11826, 11271, 12091, 12382, 13202, 4166,
+ 4986, 5277, 6097, 5542, 6362, 6653, 7473, 5803, 6623, 6914, 7734,
+ 7179, 7999, 8290, 9110, 6212, 7032, 7323, 8143, 7588, 8408, 8699,
+ 9519, 7849, 8669, 8960, 9780, 9225, 10045, 10336, 11156, 6212, 7032,
+ 7323, 8143, 7588, 8408, 8699, 9519, 7849, 8669, 8960, 9780, 9225,
+ 10045, 10336, 11156, 8258, 9078, 9369, 10189, 9634, 10454, 10745, 11565,
+ 9895, 10715, 11006, 11826, 11271, 12091, 12382, 13202, 6212, 7032, 7323,
+ 8143, 7588, 8408, 8699, 9519, 7849, 8669, 8960, 9780, 9225, 10045,
+ 10336, 11156, 8258, 9078, 9369, 10189, 9634, 10454, 10745, 11565, 9895,
+ 10715, 11006, 11826, 11271, 12091, 12382, 13202, 8258, 9078, 9369, 10189,
+ 9634, 10454, 10745, 11565, 9895, 10715, 11006, 11826, 11271, 12091, 12382,
+ 13202, 10304, 11124, 11415, 12235, 11680, 12500, 12791, 13611, 11941, 12761,
+ 13052, 13872, 13317, 14137, 14428, 15248,
};
const int16_t vp10_cat6_high12_high_cost[2048] = {
- 76, 896, 1187, 2007, 1452, 2272, 2563,
- 3383, 1713, 2533, 2824, 3644, 3089, 3909, 4200, 5020, 2122, 2942, 3233,
- 4053, 3498, 4318, 4609, 5429, 3759, 4579, 4870, 5690, 5135, 5955, 6246,
- 7066, 2122, 2942, 3233, 4053, 3498, 4318, 4609, 5429, 3759, 4579, 4870,
- 5690, 5135, 5955, 6246, 7066, 4168, 4988, 5279, 6099, 5544, 6364, 6655,
- 7475, 5805, 6625, 6916, 7736, 7181, 8001, 8292, 9112, 2122, 2942, 3233,
- 4053, 3498, 4318, 4609, 5429, 3759, 4579, 4870, 5690, 5135, 5955, 6246,
- 7066, 4168, 4988, 5279, 6099, 5544, 6364, 6655, 7475, 5805, 6625, 6916,
- 7736, 7181, 8001, 8292, 9112, 4168, 4988, 5279, 6099, 5544, 6364, 6655,
- 7475, 5805, 6625, 6916, 7736, 7181, 8001, 8292, 9112, 6214, 7034, 7325,
- 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338,
- 11158, 2122, 2942, 3233, 4053, 3498, 4318, 4609, 5429, 3759, 4579, 4870,
- 5690, 5135, 5955, 6246, 7066, 4168, 4988, 5279, 6099, 5544, 6364, 6655,
- 7475, 5805, 6625, 6916, 7736, 7181, 8001, 8292, 9112, 4168, 4988, 5279,
- 6099, 5544, 6364, 6655, 7475, 5805, 6625, 6916, 7736, 7181, 8001, 8292,
- 9112, 6214, 7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962,
- 9782, 9227, 10047, 10338, 11158, 4168, 4988, 5279, 6099, 5544, 6364, 6655,
- 7475, 5805, 6625, 6916, 7736, 7181, 8001, 8292, 9112, 6214, 7034, 7325,
- 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338,
- 11158, 6214, 7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962,
- 9782, 9227, 10047, 10338, 11158, 8260, 9080, 9371, 10191, 9636, 10456,
- 10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204, 2122,
- 2942, 3233, 4053, 3498, 4318, 4609, 5429, 3759, 4579, 4870, 5690, 5135,
- 5955, 6246, 7066, 4168, 4988, 5279, 6099, 5544, 6364, 6655, 7475, 5805,
- 6625, 6916, 7736, 7181, 8001, 8292, 9112, 4168, 4988, 5279, 6099, 5544,
- 6364, 6655, 7475, 5805, 6625, 6916, 7736, 7181, 8001, 8292, 9112, 6214,
- 7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227,
- 10047, 10338, 11158, 4168, 4988, 5279, 6099, 5544, 6364, 6655, 7475, 5805,
- 6625, 6916, 7736, 7181, 8001, 8292, 9112, 6214, 7034, 7325, 8145, 7590,
- 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338, 11158, 6214,
- 7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227,
- 10047, 10338, 11158, 8260, 9080, 9371, 10191, 9636, 10456, 10747, 11567,
- 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204, 4168, 4988, 5279,
- 6099, 5544, 6364, 6655, 7475, 5805, 6625, 6916, 7736, 7181, 8001, 8292,
- 9112, 6214, 7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962,
- 9782, 9227, 10047, 10338, 11158, 6214, 7034, 7325, 8145, 7590, 8410, 8701,
- 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338, 11158, 8260, 9080, 9371,
- 10191, 9636, 10456, 10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093,
- 12384, 13204, 6214, 7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671,
- 8962, 9782, 9227, 10047, 10338, 11158, 8260, 9080, 9371, 10191, 9636, 10456,
- 10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204, 8260,
- 9080, 9371, 10191, 9636, 10456, 10747, 11567, 9897, 10717, 11008, 11828,
- 11273, 12093, 12384, 13204, 10306, 11126, 11417, 12237, 11682, 12502, 12793,
- 13613, 11943, 12763, 13054, 13874, 13319, 14139, 14430, 15250, 2122, 2942,
- 3233, 4053, 3498, 4318, 4609, 5429, 3759, 4579, 4870, 5690, 5135, 5955,
- 6246, 7066, 4168, 4988, 5279, 6099, 5544, 6364, 6655, 7475, 5805, 6625,
- 6916, 7736, 7181, 8001, 8292, 9112, 4168, 4988, 5279, 6099, 5544, 6364,
- 6655, 7475, 5805, 6625, 6916, 7736, 7181, 8001, 8292, 9112, 6214, 7034,
- 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047,
- 10338, 11158, 4168, 4988, 5279, 6099, 5544, 6364, 6655, 7475, 5805, 6625,
- 6916, 7736, 7181, 8001, 8292, 9112, 6214, 7034, 7325, 8145, 7590, 8410,
- 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338, 11158, 6214, 7034,
- 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047,
- 10338, 11158, 8260, 9080, 9371, 10191, 9636, 10456, 10747, 11567, 9897,
- 10717, 11008, 11828, 11273, 12093, 12384, 13204, 4168, 4988, 5279, 6099,
- 5544, 6364, 6655, 7475, 5805, 6625, 6916, 7736, 7181, 8001, 8292, 9112,
- 6214, 7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782,
- 9227, 10047, 10338, 11158, 6214, 7034, 7325, 8145, 7590, 8410, 8701, 9521,
- 7851, 8671, 8962, 9782, 9227, 10047, 10338, 11158, 8260, 9080, 9371, 10191,
- 9636, 10456, 10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384,
- 13204, 6214, 7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962,
- 9782, 9227, 10047, 10338, 11158, 8260, 9080, 9371, 10191, 9636, 10456,
- 10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204, 8260,
- 9080, 9371, 10191, 9636, 10456, 10747, 11567, 9897, 10717, 11008, 11828,
- 11273, 12093, 12384, 13204, 10306, 11126, 11417, 12237, 11682, 12502, 12793,
- 13613, 11943, 12763, 13054, 13874, 13319, 14139, 14430, 15250, 4168, 4988,
- 5279, 6099, 5544, 6364, 6655, 7475, 5805, 6625, 6916, 7736, 7181, 8001,
- 8292, 9112, 6214, 7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671,
- 8962, 9782, 9227, 10047, 10338, 11158, 6214, 7034, 7325, 8145, 7590, 8410,
- 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338, 11158, 8260, 9080,
- 9371, 10191, 9636, 10456, 10747, 11567, 9897, 10717, 11008, 11828, 11273,
- 12093, 12384, 13204, 6214, 7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851,
- 8671, 8962, 9782, 9227, 10047, 10338, 11158, 8260, 9080, 9371, 10191, 9636,
- 10456, 10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204,
- 8260, 9080, 9371, 10191, 9636, 10456, 10747, 11567, 9897, 10717, 11008,
- 11828, 11273, 12093, 12384, 13204, 10306, 11126, 11417, 12237, 11682, 12502,
- 12793, 13613, 11943, 12763, 13054, 13874, 13319, 14139, 14430, 15250, 6214,
- 7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227,
- 10047, 10338, 11158, 8260, 9080, 9371, 10191, 9636, 10456, 10747, 11567,
- 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204, 8260, 9080, 9371,
- 10191, 9636, 10456, 10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093,
- 12384, 13204, 10306, 11126, 11417, 12237, 11682, 12502, 12793, 13613, 11943,
- 12763, 13054, 13874, 13319, 14139, 14430, 15250, 8260, 9080, 9371, 10191,
- 9636, 10456, 10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384,
+ 76, 896, 1187, 2007, 1452, 2272, 2563, 3383, 1713, 2533, 2824,
+ 3644, 3089, 3909, 4200, 5020, 2122, 2942, 3233, 4053, 3498, 4318,
+ 4609, 5429, 3759, 4579, 4870, 5690, 5135, 5955, 6246, 7066, 2122,
+ 2942, 3233, 4053, 3498, 4318, 4609, 5429, 3759, 4579, 4870, 5690,
+ 5135, 5955, 6246, 7066, 4168, 4988, 5279, 6099, 5544, 6364, 6655,
+ 7475, 5805, 6625, 6916, 7736, 7181, 8001, 8292, 9112, 2122, 2942,
+ 3233, 4053, 3498, 4318, 4609, 5429, 3759, 4579, 4870, 5690, 5135,
+ 5955, 6246, 7066, 4168, 4988, 5279, 6099, 5544, 6364, 6655, 7475,
+ 5805, 6625, 6916, 7736, 7181, 8001, 8292, 9112, 4168, 4988, 5279,
+ 6099, 5544, 6364, 6655, 7475, 5805, 6625, 6916, 7736, 7181, 8001,
+ 8292, 9112, 6214, 7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851,
+ 8671, 8962, 9782, 9227, 10047, 10338, 11158, 2122, 2942, 3233, 4053,
+ 3498, 4318, 4609, 5429, 3759, 4579, 4870, 5690, 5135, 5955, 6246,
+ 7066, 4168, 4988, 5279, 6099, 5544, 6364, 6655, 7475, 5805, 6625,
+ 6916, 7736, 7181, 8001, 8292, 9112, 4168, 4988, 5279, 6099, 5544,
+ 6364, 6655, 7475, 5805, 6625, 6916, 7736, 7181, 8001, 8292, 9112,
+ 6214, 7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962,
+ 9782, 9227, 10047, 10338, 11158, 4168, 4988, 5279, 6099, 5544, 6364,
+ 6655, 7475, 5805, 6625, 6916, 7736, 7181, 8001, 8292, 9112, 6214,
+ 7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782,
+ 9227, 10047, 10338, 11158, 6214, 7034, 7325, 8145, 7590, 8410, 8701,
+ 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338, 11158, 8260, 9080,
+ 9371, 10191, 9636, 10456, 10747, 11567, 9897, 10717, 11008, 11828, 11273,
+ 12093, 12384, 13204, 2122, 2942, 3233, 4053, 3498, 4318, 4609, 5429,
+ 3759, 4579, 4870, 5690, 5135, 5955, 6246, 7066, 4168, 4988, 5279,
+ 6099, 5544, 6364, 6655, 7475, 5805, 6625, 6916, 7736, 7181, 8001,
+ 8292, 9112, 4168, 4988, 5279, 6099, 5544, 6364, 6655, 7475, 5805,
+ 6625, 6916, 7736, 7181, 8001, 8292, 9112, 6214, 7034, 7325, 8145,
+ 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338,
+ 11158, 4168, 4988, 5279, 6099, 5544, 6364, 6655, 7475, 5805, 6625,
+ 6916, 7736, 7181, 8001, 8292, 9112, 6214, 7034, 7325, 8145, 7590,
+ 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338, 11158,
+ 6214, 7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962,
+ 9782, 9227, 10047, 10338, 11158, 8260, 9080, 9371, 10191, 9636, 10456,
+ 10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204, 4168,
+ 4988, 5279, 6099, 5544, 6364, 6655, 7475, 5805, 6625, 6916, 7736,
+ 7181, 8001, 8292, 9112, 6214, 7034, 7325, 8145, 7590, 8410, 8701,
+ 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338, 11158, 6214, 7034,
+ 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227,
+ 10047, 10338, 11158, 8260, 9080, 9371, 10191, 9636, 10456, 10747, 11567,
+ 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204, 6214, 7034, 7325,
+ 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047,
+ 10338, 11158, 8260, 9080, 9371, 10191, 9636, 10456, 10747, 11567, 9897,
+ 10717, 11008, 11828, 11273, 12093, 12384, 13204, 8260, 9080, 9371, 10191,
+ 9636, 10456, 10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384,
13204, 10306, 11126, 11417, 12237, 11682, 12502, 12793, 13613, 11943, 12763,
- 13054, 13874, 13319, 14139, 14430, 15250, 10306, 11126, 11417, 12237, 11682,
- 12502, 12793, 13613, 11943, 12763, 13054, 13874, 13319, 14139, 14430, 15250,
- 12352, 13172, 13463, 14283, 13728, 14548, 14839, 15659, 13989, 14809, 15100,
- 15920, 15365, 16185, 16476, 17296, 2122, 2942, 3233, 4053, 3498, 4318, 4609,
- 5429, 3759, 4579, 4870, 5690, 5135, 5955, 6246, 7066, 4168, 4988, 5279,
- 6099, 5544, 6364, 6655, 7475, 5805, 6625, 6916, 7736, 7181, 8001, 8292,
- 9112, 4168, 4988, 5279, 6099, 5544, 6364, 6655, 7475, 5805, 6625, 6916,
- 7736, 7181, 8001, 8292, 9112, 6214, 7034, 7325, 8145, 7590, 8410, 8701,
- 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338, 11158, 4168, 4988, 5279,
- 6099, 5544, 6364, 6655, 7475, 5805, 6625, 6916, 7736, 7181, 8001, 8292,
- 9112, 6214, 7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962,
- 9782, 9227, 10047, 10338, 11158, 6214, 7034, 7325, 8145, 7590, 8410, 8701,
- 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338, 11158, 8260, 9080, 9371,
- 10191, 9636, 10456, 10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093,
- 12384, 13204, 4168, 4988, 5279, 6099, 5544, 6364, 6655, 7475, 5805, 6625,
- 6916, 7736, 7181, 8001, 8292, 9112, 6214, 7034, 7325, 8145, 7590, 8410,
- 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338, 11158, 6214, 7034,
- 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047,
- 10338, 11158, 8260, 9080, 9371, 10191, 9636, 10456, 10747, 11567, 9897,
- 10717, 11008, 11828, 11273, 12093, 12384, 13204, 6214, 7034, 7325, 8145,
- 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338, 11158,
- 8260, 9080, 9371, 10191, 9636, 10456, 10747, 11567, 9897, 10717, 11008,
- 11828, 11273, 12093, 12384, 13204, 8260, 9080, 9371, 10191, 9636, 10456,
- 10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204, 10306,
- 11126, 11417, 12237, 11682, 12502, 12793, 13613, 11943, 12763, 13054, 13874,
- 13319, 14139, 14430, 15250, 4168, 4988, 5279, 6099, 5544, 6364, 6655, 7475,
- 5805, 6625, 6916, 7736, 7181, 8001, 8292, 9112, 6214, 7034, 7325, 8145,
- 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338, 11158,
- 6214, 7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782,
- 9227, 10047, 10338, 11158, 8260, 9080, 9371, 10191, 9636, 10456, 10747,
- 11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204, 6214, 7034,
- 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047,
- 10338, 11158, 8260, 9080, 9371, 10191, 9636, 10456, 10747, 11567, 9897,
- 10717, 11008, 11828, 11273, 12093, 12384, 13204, 8260, 9080, 9371, 10191,
- 9636, 10456, 10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384,
- 13204, 10306, 11126, 11417, 12237, 11682, 12502, 12793, 13613, 11943, 12763,
- 13054, 13874, 13319, 14139, 14430, 15250, 6214, 7034, 7325, 8145, 7590,
- 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338, 11158, 8260,
- 9080, 9371, 10191, 9636, 10456, 10747, 11567, 9897, 10717, 11008, 11828,
- 11273, 12093, 12384, 13204, 8260, 9080, 9371, 10191, 9636, 10456, 10747,
- 11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204, 10306, 11126,
+ 13054, 13874, 13319, 14139, 14430, 15250, 2122, 2942, 3233, 4053, 3498,
+ 4318, 4609, 5429, 3759, 4579, 4870, 5690, 5135, 5955, 6246, 7066,
+ 4168, 4988, 5279, 6099, 5544, 6364, 6655, 7475, 5805, 6625, 6916,
+ 7736, 7181, 8001, 8292, 9112, 4168, 4988, 5279, 6099, 5544, 6364,
+ 6655, 7475, 5805, 6625, 6916, 7736, 7181, 8001, 8292, 9112, 6214,
+ 7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782,
+ 9227, 10047, 10338, 11158, 4168, 4988, 5279, 6099, 5544, 6364, 6655,
+ 7475, 5805, 6625, 6916, 7736, 7181, 8001, 8292, 9112, 6214, 7034,
+ 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227,
+ 10047, 10338, 11158, 6214, 7034, 7325, 8145, 7590, 8410, 8701, 9521,
+ 7851, 8671, 8962, 9782, 9227, 10047, 10338, 11158, 8260, 9080, 9371,
+ 10191, 9636, 10456, 10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093,
+ 12384, 13204, 4168, 4988, 5279, 6099, 5544, 6364, 6655, 7475, 5805,
+ 6625, 6916, 7736, 7181, 8001, 8292, 9112, 6214, 7034, 7325, 8145,
+ 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338,
+ 11158, 6214, 7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671,
+ 8962, 9782, 9227, 10047, 10338, 11158, 8260, 9080, 9371, 10191, 9636,
+ 10456, 10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204,
+ 6214, 7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962,
+ 9782, 9227, 10047, 10338, 11158, 8260, 9080, 9371, 10191, 9636, 10456,
+ 10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204, 8260,
+ 9080, 9371, 10191, 9636, 10456, 10747, 11567, 9897, 10717, 11008, 11828,
+ 11273, 12093, 12384, 13204, 10306, 11126, 11417, 12237, 11682, 12502, 12793,
+ 13613, 11943, 12763, 13054, 13874, 13319, 14139, 14430, 15250, 4168, 4988,
+ 5279, 6099, 5544, 6364, 6655, 7475, 5805, 6625, 6916, 7736, 7181,
+ 8001, 8292, 9112, 6214, 7034, 7325, 8145, 7590, 8410, 8701, 9521,
+ 7851, 8671, 8962, 9782, 9227, 10047, 10338, 11158, 6214, 7034, 7325,
+ 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047,
+ 10338, 11158, 8260, 9080, 9371, 10191, 9636, 10456, 10747, 11567, 9897,
+ 10717, 11008, 11828, 11273, 12093, 12384, 13204, 6214, 7034, 7325, 8145,
+ 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338,
+ 11158, 8260, 9080, 9371, 10191, 9636, 10456, 10747, 11567, 9897, 10717,
+ 11008, 11828, 11273, 12093, 12384, 13204, 8260, 9080, 9371, 10191, 9636,
+ 10456, 10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204,
+ 10306, 11126, 11417, 12237, 11682, 12502, 12793, 13613, 11943, 12763, 13054,
+ 13874, 13319, 14139, 14430, 15250, 6214, 7034, 7325, 8145, 7590, 8410,
+ 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338, 11158, 8260,
+ 9080, 9371, 10191, 9636, 10456, 10747, 11567, 9897, 10717, 11008, 11828,
+ 11273, 12093, 12384, 13204, 8260, 9080, 9371, 10191, 9636, 10456, 10747,
+ 11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204, 10306, 11126,
11417, 12237, 11682, 12502, 12793, 13613, 11943, 12763, 13054, 13874, 13319,
- 14139, 14430, 15250, 8260, 9080, 9371, 10191, 9636, 10456, 10747, 11567,
- 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204, 10306, 11126, 11417,
+ 14139, 14430, 15250, 8260, 9080, 9371, 10191, 9636, 10456, 10747, 11567,
+ 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204, 10306, 11126, 11417,
12237, 11682, 12502, 12793, 13613, 11943, 12763, 13054, 13874, 13319, 14139,
14430, 15250, 10306, 11126, 11417, 12237, 11682, 12502, 12793, 13613, 11943,
12763, 13054, 13874, 13319, 14139, 14430, 15250, 12352, 13172, 13463, 14283,
13728, 14548, 14839, 15659, 13989, 14809, 15100, 15920, 15365, 16185, 16476,
- 17296, 4168, 4988, 5279, 6099, 5544, 6364, 6655, 7475, 5805, 6625, 6916,
- 7736, 7181, 8001, 8292, 9112, 6214, 7034, 7325, 8145, 7590, 8410, 8701,
- 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338, 11158, 6214, 7034, 7325,
- 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338,
- 11158, 8260, 9080, 9371, 10191, 9636, 10456, 10747, 11567, 9897, 10717,
- 11008, 11828, 11273, 12093, 12384, 13204, 6214, 7034, 7325, 8145, 7590,
- 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338, 11158, 8260,
- 9080, 9371, 10191, 9636, 10456, 10747, 11567, 9897, 10717, 11008, 11828,
- 11273, 12093, 12384, 13204, 8260, 9080, 9371, 10191, 9636, 10456, 10747,
- 11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204, 10306, 11126,
- 11417, 12237, 11682, 12502, 12793, 13613, 11943, 12763, 13054, 13874, 13319,
- 14139, 14430, 15250, 6214, 7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851,
- 8671, 8962, 9782, 9227, 10047, 10338, 11158, 8260, 9080, 9371, 10191, 9636,
- 10456, 10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204,
- 8260, 9080, 9371, 10191, 9636, 10456, 10747, 11567, 9897, 10717, 11008,
- 11828, 11273, 12093, 12384, 13204, 10306, 11126, 11417, 12237, 11682, 12502,
- 12793, 13613, 11943, 12763, 13054, 13874, 13319, 14139, 14430, 15250, 8260,
- 9080, 9371, 10191, 9636, 10456, 10747, 11567, 9897, 10717, 11008, 11828,
- 11273, 12093, 12384, 13204, 10306, 11126, 11417, 12237, 11682, 12502, 12793,
- 13613, 11943, 12763, 13054, 13874, 13319, 14139, 14430, 15250, 10306, 11126,
- 11417, 12237, 11682, 12502, 12793, 13613, 11943, 12763, 13054, 13874, 13319,
- 14139, 14430, 15250, 12352, 13172, 13463, 14283, 13728, 14548, 14839, 15659,
- 13989, 14809, 15100, 15920, 15365, 16185, 16476, 17296, 6214, 7034, 7325,
- 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338,
- 11158, 8260, 9080, 9371, 10191, 9636, 10456, 10747, 11567, 9897, 10717,
- 11008, 11828, 11273, 12093, 12384, 13204, 8260, 9080, 9371, 10191, 9636,
- 10456, 10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204,
- 10306, 11126, 11417, 12237, 11682, 12502, 12793, 13613, 11943, 12763, 13054,
- 13874, 13319, 14139, 14430, 15250, 8260, 9080, 9371, 10191, 9636, 10456,
- 10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204, 10306,
+ 17296, 2122, 2942, 3233, 4053, 3498, 4318, 4609, 5429, 3759, 4579,
+ 4870, 5690, 5135, 5955, 6246, 7066, 4168, 4988, 5279, 6099, 5544,
+ 6364, 6655, 7475, 5805, 6625, 6916, 7736, 7181, 8001, 8292, 9112,
+ 4168, 4988, 5279, 6099, 5544, 6364, 6655, 7475, 5805, 6625, 6916,
+ 7736, 7181, 8001, 8292, 9112, 6214, 7034, 7325, 8145, 7590, 8410,
+ 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338, 11158, 4168,
+ 4988, 5279, 6099, 5544, 6364, 6655, 7475, 5805, 6625, 6916, 7736,
+ 7181, 8001, 8292, 9112, 6214, 7034, 7325, 8145, 7590, 8410, 8701,
+ 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338, 11158, 6214, 7034,
+ 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227,
+ 10047, 10338, 11158, 8260, 9080, 9371, 10191, 9636, 10456, 10747, 11567,
+ 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204, 4168, 4988, 5279,
+ 6099, 5544, 6364, 6655, 7475, 5805, 6625, 6916, 7736, 7181, 8001,
+ 8292, 9112, 6214, 7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851,
+ 8671, 8962, 9782, 9227, 10047, 10338, 11158, 6214, 7034, 7325, 8145,
+ 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338,
+ 11158, 8260, 9080, 9371, 10191, 9636, 10456, 10747, 11567, 9897, 10717,
+ 11008, 11828, 11273, 12093, 12384, 13204, 6214, 7034, 7325, 8145, 7590,
+ 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338, 11158,
+ 8260, 9080, 9371, 10191, 9636, 10456, 10747, 11567, 9897, 10717, 11008,
+ 11828, 11273, 12093, 12384, 13204, 8260, 9080, 9371, 10191, 9636, 10456,
+ 10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204, 10306,
11126, 11417, 12237, 11682, 12502, 12793, 13613, 11943, 12763, 13054, 13874,
- 13319, 14139, 14430, 15250, 10306, 11126, 11417, 12237, 11682, 12502, 12793,
+ 13319, 14139, 14430, 15250, 4168, 4988, 5279, 6099, 5544, 6364, 6655,
+ 7475, 5805, 6625, 6916, 7736, 7181, 8001, 8292, 9112, 6214, 7034,
+ 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227,
+ 10047, 10338, 11158, 6214, 7034, 7325, 8145, 7590, 8410, 8701, 9521,
+ 7851, 8671, 8962, 9782, 9227, 10047, 10338, 11158, 8260, 9080, 9371,
+ 10191, 9636, 10456, 10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093,
+ 12384, 13204, 6214, 7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851,
+ 8671, 8962, 9782, 9227, 10047, 10338, 11158, 8260, 9080, 9371, 10191,
+ 9636, 10456, 10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384,
+ 13204, 8260, 9080, 9371, 10191, 9636, 10456, 10747, 11567, 9897, 10717,
+ 11008, 11828, 11273, 12093, 12384, 13204, 10306, 11126, 11417, 12237, 11682,
+ 12502, 12793, 13613, 11943, 12763, 13054, 13874, 13319, 14139, 14430, 15250,
+ 6214, 7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962,
+ 9782, 9227, 10047, 10338, 11158, 8260, 9080, 9371, 10191, 9636, 10456,
+ 10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204, 8260,
+ 9080, 9371, 10191, 9636, 10456, 10747, 11567, 9897, 10717, 11008, 11828,
+ 11273, 12093, 12384, 13204, 10306, 11126, 11417, 12237, 11682, 12502, 12793,
+ 13613, 11943, 12763, 13054, 13874, 13319, 14139, 14430, 15250, 8260, 9080,
+ 9371, 10191, 9636, 10456, 10747, 11567, 9897, 10717, 11008, 11828, 11273,
+ 12093, 12384, 13204, 10306, 11126, 11417, 12237, 11682, 12502, 12793, 13613,
+ 11943, 12763, 13054, 13874, 13319, 14139, 14430, 15250, 10306, 11126, 11417,
+ 12237, 11682, 12502, 12793, 13613, 11943, 12763, 13054, 13874, 13319, 14139,
+ 14430, 15250, 12352, 13172, 13463, 14283, 13728, 14548, 14839, 15659, 13989,
+ 14809, 15100, 15920, 15365, 16185, 16476, 17296, 4168, 4988, 5279, 6099,
+ 5544, 6364, 6655, 7475, 5805, 6625, 6916, 7736, 7181, 8001, 8292,
+ 9112, 6214, 7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671,
+ 8962, 9782, 9227, 10047, 10338, 11158, 6214, 7034, 7325, 8145, 7590,
+ 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338, 11158,
+ 8260, 9080, 9371, 10191, 9636, 10456, 10747, 11567, 9897, 10717, 11008,
+ 11828, 11273, 12093, 12384, 13204, 6214, 7034, 7325, 8145, 7590, 8410,
+ 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338, 11158, 8260,
+ 9080, 9371, 10191, 9636, 10456, 10747, 11567, 9897, 10717, 11008, 11828,
+ 11273, 12093, 12384, 13204, 8260, 9080, 9371, 10191, 9636, 10456, 10747,
+ 11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204, 10306, 11126,
+ 11417, 12237, 11682, 12502, 12793, 13613, 11943, 12763, 13054, 13874, 13319,
+ 14139, 14430, 15250, 6214, 7034, 7325, 8145, 7590, 8410, 8701, 9521,
+ 7851, 8671, 8962, 9782, 9227, 10047, 10338, 11158, 8260, 9080, 9371,
+ 10191, 9636, 10456, 10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093,
+ 12384, 13204, 8260, 9080, 9371, 10191, 9636, 10456, 10747, 11567, 9897,
+ 10717, 11008, 11828, 11273, 12093, 12384, 13204, 10306, 11126, 11417, 12237,
+ 11682, 12502, 12793, 13613, 11943, 12763, 13054, 13874, 13319, 14139, 14430,
+ 15250, 8260, 9080, 9371, 10191, 9636, 10456, 10747, 11567, 9897, 10717,
+ 11008, 11828, 11273, 12093, 12384, 13204, 10306, 11126, 11417, 12237, 11682,
+ 12502, 12793, 13613, 11943, 12763, 13054, 13874, 13319, 14139, 14430, 15250,
+ 10306, 11126, 11417, 12237, 11682, 12502, 12793, 13613, 11943, 12763, 13054,
+ 13874, 13319, 14139, 14430, 15250, 12352, 13172, 13463, 14283, 13728, 14548,
+ 14839, 15659, 13989, 14809, 15100, 15920, 15365, 16185, 16476, 17296, 6214,
+ 7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782,
+ 9227, 10047, 10338, 11158, 8260, 9080, 9371, 10191, 9636, 10456, 10747,
+ 11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204, 8260, 9080,
+ 9371, 10191, 9636, 10456, 10747, 11567, 9897, 10717, 11008, 11828, 11273,
+ 12093, 12384, 13204, 10306, 11126, 11417, 12237, 11682, 12502, 12793, 13613,
+ 11943, 12763, 13054, 13874, 13319, 14139, 14430, 15250, 8260, 9080, 9371,
+ 10191, 9636, 10456, 10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093,
+ 12384, 13204, 10306, 11126, 11417, 12237, 11682, 12502, 12793, 13613, 11943,
+ 12763, 13054, 13874, 13319, 14139, 14430, 15250, 10306, 11126, 11417, 12237,
+ 11682, 12502, 12793, 13613, 11943, 12763, 13054, 13874, 13319, 14139, 14430,
+ 15250, 12352, 13172, 13463, 14283, 13728, 14548, 14839, 15659, 13989, 14809,
+ 15100, 15920, 15365, 16185, 16476, 17296, 8260, 9080, 9371, 10191, 9636,
+ 10456, 10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204,
+ 10306, 11126, 11417, 12237, 11682, 12502, 12793, 13613, 11943, 12763, 13054,
+ 13874, 13319, 14139, 14430, 15250, 10306, 11126, 11417, 12237, 11682, 12502,
+ 12793, 13613, 11943, 12763, 13054, 13874, 13319, 14139, 14430, 15250, 12352,
+ 13172, 13463, 14283, 13728, 14548, 14839, 15659, 13989, 14809, 15100, 15920,
+ 15365, 16185, 16476, 17296, 10306, 11126, 11417, 12237, 11682, 12502, 12793,
13613, 11943, 12763, 13054, 13874, 13319, 14139, 14430, 15250, 12352, 13172,
13463, 14283, 13728, 14548, 14839, 15659, 13989, 14809, 15100, 15920, 15365,
- 16185, 16476, 17296, 8260, 9080, 9371, 10191, 9636, 10456, 10747, 11567,
- 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204, 10306, 11126, 11417,
- 12237, 11682, 12502, 12793, 13613, 11943, 12763, 13054, 13874, 13319, 14139,
- 14430, 15250, 10306, 11126, 11417, 12237, 11682, 12502, 12793, 13613, 11943,
- 12763, 13054, 13874, 13319, 14139, 14430, 15250, 12352, 13172, 13463, 14283,
- 13728, 14548, 14839, 15659, 13989, 14809, 15100, 15920, 15365, 16185, 16476,
- 17296, 10306, 11126, 11417, 12237, 11682, 12502, 12793, 13613, 11943, 12763,
- 13054, 13874, 13319, 14139, 14430, 15250, 12352, 13172, 13463, 14283, 13728,
- 14548, 14839, 15659, 13989, 14809, 15100, 15920, 15365, 16185, 16476, 17296,
- 12352, 13172, 13463, 14283, 13728, 14548, 14839, 15659, 13989, 14809, 15100,
- 15920, 15365, 16185, 16476, 17296, 14398, 15218, 15509, 16329, 15774, 16594,
- 16885, 17705, 16035, 16855, 17146, 17966, 17411, 18231, 18522, 19342
+ 16185, 16476, 17296, 12352, 13172, 13463, 14283, 13728, 14548, 14839, 15659,
+ 13989, 14809, 15100, 15920, 15365, 16185, 16476, 17296, 14398, 15218, 15509,
+ 16329, 15774, 16594, 16885, 17705, 16035, 16855, 17146, 17966, 17411, 18231,
+ 18522, 19342
};
#endif
#if CONFIG_VPX_HIGHBITDEPTH
-static const vpx_tree_index cat1_high10[2] = {0, 0};
-static const vpx_tree_index cat2_high10[4] = {2, 2, 0, 0};
-static const vpx_tree_index cat3_high10[6] = {2, 2, 4, 4, 0, 0};
-static const vpx_tree_index cat4_high10[8] = {2, 2, 4, 4, 6, 6, 0, 0};
-static const vpx_tree_index cat5_high10[10] = {2, 2, 4, 4, 6, 6, 8, 8, 0, 0};
-static const vpx_tree_index cat6_high10[32] = {2, 2, 4, 4, 6, 6, 8, 8, 10, 10,
- 12, 12, 14, 14, 16, 16, 18, 18, 20, 20, 22, 22, 24, 24, 26, 26, 28, 28,
- 30, 30, 0, 0};
-static const vpx_tree_index cat1_high12[2] = {0, 0};
-static const vpx_tree_index cat2_high12[4] = {2, 2, 0, 0};
-static const vpx_tree_index cat3_high12[6] = {2, 2, 4, 4, 0, 0};
-static const vpx_tree_index cat4_high12[8] = {2, 2, 4, 4, 6, 6, 0, 0};
-static const vpx_tree_index cat5_high12[10] = {2, 2, 4, 4, 6, 6, 8, 8, 0, 0};
-static const vpx_tree_index cat6_high12[36] = {2, 2, 4, 4, 6, 6, 8, 8, 10, 10,
- 12, 12, 14, 14, 16, 16, 18, 18, 20, 20, 22, 22, 24, 24, 26, 26, 28, 28,
- 30, 30, 32, 32, 34, 34, 0, 0};
+static const vpx_tree_index cat1_high10[2] = { 0, 0 };
+static const vpx_tree_index cat2_high10[4] = { 2, 2, 0, 0 };
+static const vpx_tree_index cat3_high10[6] = { 2, 2, 4, 4, 0, 0 };
+static const vpx_tree_index cat4_high10[8] = { 2, 2, 4, 4, 6, 6, 0, 0 };
+static const vpx_tree_index cat5_high10[10] = { 2, 2, 4, 4, 6, 6, 8, 8, 0, 0 };
+static const vpx_tree_index cat6_high10[32] = { 2, 2, 4, 4, 6, 6, 8, 8,
+ 10, 10, 12, 12, 14, 14, 16, 16,
+ 18, 18, 20, 20, 22, 22, 24, 24,
+ 26, 26, 28, 28, 30, 30, 0, 0 };
+static const vpx_tree_index cat1_high12[2] = { 0, 0 };
+static const vpx_tree_index cat2_high12[4] = { 2, 2, 0, 0 };
+static const vpx_tree_index cat3_high12[6] = { 2, 2, 4, 4, 0, 0 };
+static const vpx_tree_index cat4_high12[8] = { 2, 2, 4, 4, 6, 6, 0, 0 };
+static const vpx_tree_index cat5_high12[10] = { 2, 2, 4, 4, 6, 6, 8, 8, 0, 0 };
+static const vpx_tree_index cat6_high12[36] = {
+ 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14, 16, 16, 18, 18,
+ 20, 20, 22, 22, 24, 24, 26, 26, 28, 28, 30, 30, 32, 32, 34, 34, 0, 0
+};
#endif
const vp10_extra_bit vp10_extra_bits[ENTROPY_TOKENS] = {
- {0, 0, 0, 0, zero_cost}, // ZERO_TOKEN
- {0, 0, 0, 1, sign_cost}, // ONE_TOKEN
- {0, 0, 0, 2, sign_cost}, // TWO_TOKEN
- {0, 0, 0, 3, sign_cost}, // THREE_TOKEN
- {0, 0, 0, 4, sign_cost}, // FOUR_TOKEN
- {cat1, vp10_cat1_prob, 1, CAT1_MIN_VAL, cat1_cost}, // CATEGORY1_TOKEN
- {cat2, vp10_cat2_prob, 2, CAT2_MIN_VAL, cat2_cost}, // CATEGORY2_TOKEN
- {cat3, vp10_cat3_prob, 3, CAT3_MIN_VAL, cat3_cost}, // CATEGORY3_TOKEN
- {cat4, vp10_cat4_prob, 4, CAT4_MIN_VAL, cat4_cost}, // CATEGORY4_TOKEN
- {cat5, vp10_cat5_prob, 5, CAT5_MIN_VAL, cat5_cost}, // CATEGORY5_TOKEN
- {cat6, vp10_cat6_prob, 14, CAT6_MIN_VAL, 0}, // CATEGORY6_TOKEN
- {0, 0, 0, 0, zero_cost} // EOB_TOKEN
+ { 0, 0, 0, 0, zero_cost }, // ZERO_TOKEN
+ { 0, 0, 0, 1, sign_cost }, // ONE_TOKEN
+ { 0, 0, 0, 2, sign_cost }, // TWO_TOKEN
+ { 0, 0, 0, 3, sign_cost }, // THREE_TOKEN
+ { 0, 0, 0, 4, sign_cost }, // FOUR_TOKEN
+ { cat1, vp10_cat1_prob, 1, CAT1_MIN_VAL, cat1_cost }, // CATEGORY1_TOKEN
+ { cat2, vp10_cat2_prob, 2, CAT2_MIN_VAL, cat2_cost }, // CATEGORY2_TOKEN
+ { cat3, vp10_cat3_prob, 3, CAT3_MIN_VAL, cat3_cost }, // CATEGORY3_TOKEN
+ { cat4, vp10_cat4_prob, 4, CAT4_MIN_VAL, cat4_cost }, // CATEGORY4_TOKEN
+ { cat5, vp10_cat5_prob, 5, CAT5_MIN_VAL, cat5_cost }, // CATEGORY5_TOKEN
+ { cat6, vp10_cat6_prob, 14, CAT6_MIN_VAL, 0 }, // CATEGORY6_TOKEN
+ { 0, 0, 0, 0, zero_cost } // EOB_TOKEN
};
#if CONFIG_VPX_HIGHBITDEPTH
const vp10_extra_bit vp10_extra_bits_high10[ENTROPY_TOKENS] = {
- {0, 0, 0, 0, zero_cost}, // ZERO
- {0, 0, 0, 1, sign_cost}, // ONE
- {0, 0, 0, 2, sign_cost}, // TWO
- {0, 0, 0, 3, sign_cost}, // THREE
- {0, 0, 0, 4, sign_cost}, // FOUR
- {cat1_high10, vp10_cat1_prob_high10, 1, CAT1_MIN_VAL, cat1_cost}, // CAT1
- {cat2_high10, vp10_cat2_prob_high10, 2, CAT2_MIN_VAL, cat2_cost}, // CAT2
- {cat3_high10, vp10_cat3_prob_high10, 3, CAT3_MIN_VAL, cat3_cost}, // CAT3
- {cat4_high10, vp10_cat4_prob_high10, 4, CAT4_MIN_VAL, cat4_cost}, // CAT4
- {cat5_high10, vp10_cat5_prob_high10, 5, CAT5_MIN_VAL, cat5_cost}, // CAT5
- {cat6_high10, vp10_cat6_prob_high10, 16, CAT6_MIN_VAL, 0}, // CAT6
- {0, 0, 0, 0, zero_cost} // EOB
+ { 0, 0, 0, 0, zero_cost }, // ZERO
+ { 0, 0, 0, 1, sign_cost }, // ONE
+ { 0, 0, 0, 2, sign_cost }, // TWO
+ { 0, 0, 0, 3, sign_cost }, // THREE
+ { 0, 0, 0, 4, sign_cost }, // FOUR
+ { cat1_high10, vp10_cat1_prob_high10, 1, CAT1_MIN_VAL, cat1_cost }, // CAT1
+ { cat2_high10, vp10_cat2_prob_high10, 2, CAT2_MIN_VAL, cat2_cost }, // CAT2
+ { cat3_high10, vp10_cat3_prob_high10, 3, CAT3_MIN_VAL, cat3_cost }, // CAT3
+ { cat4_high10, vp10_cat4_prob_high10, 4, CAT4_MIN_VAL, cat4_cost }, // CAT4
+ { cat5_high10, vp10_cat5_prob_high10, 5, CAT5_MIN_VAL, cat5_cost }, // CAT5
+ { cat6_high10, vp10_cat6_prob_high10, 16, CAT6_MIN_VAL, 0 }, // CAT6
+ { 0, 0, 0, 0, zero_cost } // EOB
};
const vp10_extra_bit vp10_extra_bits_high12[ENTROPY_TOKENS] = {
- {0, 0, 0, 0, zero_cost}, // ZERO
- {0, 0, 0, 1, sign_cost}, // ONE
- {0, 0, 0, 2, sign_cost}, // TWO
- {0, 0, 0, 3, sign_cost}, // THREE
- {0, 0, 0, 4, sign_cost}, // FOUR
- {cat1_high12, vp10_cat1_prob_high12, 1, CAT1_MIN_VAL, cat1_cost}, // CAT1
- {cat2_high12, vp10_cat2_prob_high12, 2, CAT2_MIN_VAL, cat2_cost}, // CAT2
- {cat3_high12, vp10_cat3_prob_high12, 3, CAT3_MIN_VAL, cat3_cost}, // CAT3
- {cat4_high12, vp10_cat4_prob_high12, 4, CAT4_MIN_VAL, cat4_cost}, // CAT4
- {cat5_high12, vp10_cat5_prob_high12, 5, CAT5_MIN_VAL, cat5_cost}, // CAT5
- {cat6_high12, vp10_cat6_prob_high12, 18, CAT6_MIN_VAL, 0}, // CAT6
- {0, 0, 0, 0, zero_cost} // EOB
+ { 0, 0, 0, 0, zero_cost }, // ZERO
+ { 0, 0, 0, 1, sign_cost }, // ONE
+ { 0, 0, 0, 2, sign_cost }, // TWO
+ { 0, 0, 0, 3, sign_cost }, // THREE
+ { 0, 0, 0, 4, sign_cost }, // FOUR
+ { cat1_high12, vp10_cat1_prob_high12, 1, CAT1_MIN_VAL, cat1_cost }, // CAT1
+ { cat2_high12, vp10_cat2_prob_high12, 2, CAT2_MIN_VAL, cat2_cost }, // CAT2
+ { cat3_high12, vp10_cat3_prob_high12, 3, CAT3_MIN_VAL, cat3_cost }, // CAT3
+ { cat4_high12, vp10_cat4_prob_high12, 4, CAT4_MIN_VAL, cat4_cost }, // CAT4
+ { cat5_high12, vp10_cat5_prob_high12, 5, CAT5_MIN_VAL, cat5_cost }, // CAT5
+ { cat6_high12, vp10_cat6_prob_high12, 18, CAT6_MIN_VAL, 0 }, // CAT6
+ { 0, 0, 0, 0, zero_cost } // EOB
};
#endif
const struct vp10_token vp10_coef_encodings[ENTROPY_TOKENS] = {
- {2, 2}, {6, 3}, {28, 5}, {58, 6}, {59, 6}, {60, 6}, {61, 6}, {124, 7},
- {125, 7}, {126, 7}, {127, 7}, {0, 1}
+ { 2, 2 }, { 6, 3 }, { 28, 5 }, { 58, 6 }, { 59, 6 }, { 60, 6 },
+ { 61, 6 }, { 124, 7 }, { 125, 7 }, { 126, 7 }, { 127, 7 }, { 0, 1 }
};
-
struct tokenize_b_args {
VP10_COMP *cpi;
ThreadData *td;
TOKENEXTRA **tp;
};
-static void set_entropy_context_b(int plane, int block,
- int blk_row, int blk_col,
- BLOCK_SIZE plane_bsize,
+static void set_entropy_context_b(int plane, int block, int blk_row,
+ int blk_col, BLOCK_SIZE plane_bsize,
TX_SIZE tx_size, void *arg) {
- struct tokenize_b_args* const args = arg;
+ struct tokenize_b_args *const args = arg;
ThreadData *const td = args->td;
MACROBLOCK *const x = &td->mb;
MACROBLOCKD *const xd = &x->e_mbd;
struct macroblock_plane *p = &x->plane[plane];
struct macroblockd_plane *pd = &xd->plane[plane];
- vp10_set_contexts(xd, pd, plane_bsize, tx_size, p->eobs[block] > 0,
- blk_col, blk_row);
+ vp10_set_contexts(xd, pd, plane_bsize, tx_size, p->eobs[block] > 0, blk_col,
+ blk_row);
}
static INLINE void add_token(TOKENEXTRA **t, const vpx_prob *context_tree,
int32_t extra, uint8_t token,
- uint8_t skip_eob_node,
- unsigned int *counts) {
+ uint8_t skip_eob_node, unsigned int *counts) {
(*t)->token = token;
(*t)->extra = extra;
(*t)->context_tree = context_tree;
@@ -468,8 +485,7 @@
static INLINE void add_token_no_extra(TOKENEXTRA **t,
const vpx_prob *context_tree,
- uint8_t token,
- uint8_t skip_eob_node,
+ uint8_t token, uint8_t skip_eob_node,
unsigned int *counts) {
(*t)->token = token;
(*t)->context_tree = context_tree;
@@ -485,9 +501,8 @@
}
static void tokenize_b(int plane, int block, int blk_row, int blk_col,
- BLOCK_SIZE plane_bsize,
- TX_SIZE tx_size, void *arg) {
- struct tokenize_b_args* const args = arg;
+ BLOCK_SIZE plane_bsize, TX_SIZE tx_size, void *arg) {
+ struct tokenize_b_args *const args = arg;
VP10_COMP *cpi = args->cpi;
ThreadData *const td = args->td;
MACROBLOCK *const x = &td->mb;
@@ -499,7 +514,7 @@
MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
int pt; /* near block/prev token context index */
int c;
- TOKENEXTRA *t = *tp; /* store tokens starting here */
+ TOKENEXTRA *t = *tp; /* store tokens starting here */
int eob = p->eobs[block];
const PLANE_TYPE type = pd->plane_type;
const tran_low_t *qcoeff = BLOCK_OFFSET(p->qcoeff, block);
@@ -567,8 +582,7 @@
int *skippable;
};
static void is_skippable(int plane, int block, int blk_row, int blk_col,
- BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
- void *argv) {
+ BLOCK_SIZE plane_bsize, TX_SIZE tx_size, void *argv) {
struct is_skippable_args *args = argv;
(void)plane;
(void)plane_bsize;
@@ -582,9 +596,9 @@
// vp10_foreach_transform_block() and simplify is_skippable().
int vp10_is_skippable_in_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) {
int result = 1;
- struct is_skippable_args args = {x->plane[plane].eobs, &result};
+ struct is_skippable_args args = { x->plane[plane].eobs, &result };
vp10_foreach_transformed_block_in_plane(&x->e_mbd, bsize, plane, is_skippable,
- &args);
+ &args);
return result;
}
@@ -593,35 +607,34 @@
void *argv) {
struct is_skippable_args *args = argv;
int eobs = (tx_size == TX_4X4) ? 3 : 10;
- (void) plane;
- (void) plane_bsize;
- (void) blk_row;
- (void) blk_col;
+ (void)plane;
+ (void)plane_bsize;
+ (void)blk_row;
+ (void)blk_col;
*(args->skippable) |= (args->eobs[block] > eobs);
}
int vp10_has_high_freq_in_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) {
int result = 0;
- struct is_skippable_args args = {x->plane[plane].eobs, &result};
+ struct is_skippable_args args = { x->plane[plane].eobs, &result };
vp10_foreach_transformed_block_in_plane(&x->e_mbd, bsize, plane,
- has_high_freq_coeff, &args);
+ has_high_freq_coeff, &args);
return result;
}
void vp10_tokenize_sb(VP10_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
- int dry_run, BLOCK_SIZE bsize) {
+ int dry_run, BLOCK_SIZE bsize) {
VP10_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &td->mb;
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
const int ctx = vp10_get_skip_context(xd);
- const int skip_inc = !segfeature_active(&cm->seg, mbmi->segment_id,
- SEG_LVL_SKIP);
- struct tokenize_b_args arg = {cpi, td, t};
+ const int skip_inc =
+ !segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP);
+ struct tokenize_b_args arg = { cpi, td, t };
if (mbmi->skip) {
- if (!dry_run)
- td->counts->skip[ctx][1] += skip_inc;
+ if (!dry_run) td->counts->skip[ctx][1] += skip_inc;
reset_skip_context(xd, bsize);
return;
}
diff --git a/vp10/encoder/tokenize.h b/vp10/encoder/tokenize.h
index 536a55c..0a02048 100644
--- a/vp10/encoder/tokenize.h
+++ b/vp10/encoder/tokenize.h
@@ -20,15 +20,14 @@
extern "C" {
#endif
-#define EOSB_TOKEN 127 // Not signalled, encoder only
+#define EOSB_TOKEN 127 // Not signalled, encoder only
#if CONFIG_VPX_HIGHBITDEPTH
- typedef int32_t EXTRABIT;
+typedef int32_t EXTRABIT;
#else
- typedef int16_t EXTRABIT;
+typedef int16_t EXTRABIT;
#endif
-
typedef struct {
int16_t token;
EXTRABIT extra;
@@ -52,7 +51,7 @@
struct ThreadData;
void vp10_tokenize_sb(struct VP10_COMP *cpi, struct ThreadData *td,
- TOKENEXTRA **t, int dry_run, BLOCK_SIZE bsize);
+ TOKENEXTRA **t, int dry_run, BLOCK_SIZE bsize);
extern const int16_t *vp10_dct_value_cost_ptr;
/* TODO: The Token field should be broken out into a separate char array to
@@ -66,27 +65,26 @@
extern const int16_t vp10_cat6_high10_high_cost[512];
extern const int16_t vp10_cat6_high12_high_cost[2048];
static INLINE int16_t vp10_get_cost(int16_t token, EXTRABIT extrabits,
- const int16_t *cat6_high_table) {
- if (token != CATEGORY6_TOKEN)
- return vp10_extra_bits[token].cost[extrabits];
- return vp10_cat6_low_cost[extrabits & 0xff]
- + cat6_high_table[extrabits >> 8];
+ const int16_t *cat6_high_table) {
+ if (token != CATEGORY6_TOKEN) return vp10_extra_bits[token].cost[extrabits];
+ return vp10_cat6_low_cost[extrabits & 0xff] + cat6_high_table[extrabits >> 8];
}
#if CONFIG_VPX_HIGHBITDEPTH
-static INLINE const int16_t* vp10_get_high_cost_table(int bit_depth) {
+static INLINE const int16_t *vp10_get_high_cost_table(int bit_depth) {
return bit_depth == 8 ? vp10_cat6_high_cost
- : (bit_depth == 10 ? vp10_cat6_high10_high_cost :
- vp10_cat6_high12_high_cost);
+ : (bit_depth == 10 ? vp10_cat6_high10_high_cost
+ : vp10_cat6_high12_high_cost);
}
#else
-static INLINE const int16_t* vp10_get_high_cost_table(int bit_depth) {
- (void) bit_depth;
+static INLINE const int16_t *vp10_get_high_cost_table(int bit_depth) {
+ (void)bit_depth;
return vp10_cat6_high_cost;
}
#endif // CONFIG_VPX_HIGHBITDEPTH
-static INLINE void vp10_get_token_extra(int v, int16_t *token, EXTRABIT *extra) {
+static INLINE void vp10_get_token_extra(int v, int16_t *token,
+ EXTRABIT *extra) {
if (v >= CAT6_MIN_VAL || v <= -CAT6_MIN_VAL) {
*token = CATEGORY6_TOKEN;
if (v >= CAT6_MIN_VAL)
@@ -99,12 +97,10 @@
*extra = vp10_dct_cat_lt_10_value_tokens[v].extra;
}
static INLINE int16_t vp10_get_token(int v) {
- if (v >= CAT6_MIN_VAL || v <= -CAT6_MIN_VAL)
- return 10;
+ if (v >= CAT6_MIN_VAL || v <= -CAT6_MIN_VAL) return 10;
return vp10_dct_cat_lt_10_value_tokens[v].token;
}
-
#ifdef __cplusplus
} // extern "C"
#endif
diff --git a/vp10/encoder/treewriter.c b/vp10/encoder/treewriter.c
index 1f42f32..152bf40 100644
--- a/vp10/encoder/treewriter.c
+++ b/vp10/encoder/treewriter.c
@@ -27,7 +27,7 @@
}
void vp10_tokens_from_tree(struct vp10_token *tokens,
- const vpx_tree_index *tree) {
+ const vpx_tree_index *tree) {
tree2tok(tokens, tree, 0, 0, 0);
}
@@ -52,7 +52,7 @@
}
void vp10_tree_probs_from_distribution(vpx_tree tree,
- unsigned int branch_ct[/* n-1 */][2],
- const unsigned int num_events[/* n */]) {
+ unsigned int branch_ct[/* n-1 */][2],
+ const unsigned int num_events[/* n */]) {
convert_distribution(0, tree, branch_ct, num_events);
}
diff --git a/vp10/encoder/treewriter.h b/vp10/encoder/treewriter.h
index 6b76a03..05de002 100644
--- a/vp10/encoder/treewriter.h
+++ b/vp10/encoder/treewriter.h
@@ -18,19 +18,19 @@
#endif
void vp10_tree_probs_from_distribution(vpx_tree tree,
- unsigned int branch_ct[ /* n - 1 */ ][2],
- const unsigned int num_events[ /* n */ ]);
+ unsigned int branch_ct[/* n - 1 */][2],
+ const unsigned int num_events[/* n */]);
struct vp10_token {
int value;
int len;
};
-void vp10_tokens_from_tree(struct vp10_token*, const vpx_tree_index *);
+void vp10_tokens_from_tree(struct vp10_token *, const vpx_tree_index *);
static INLINE void vp10_write_tree(vpx_writer *w, const vpx_tree_index *tree,
- const vpx_prob *probs, int bits, int len,
- vpx_tree_index i) {
+ const vpx_prob *probs, int bits, int len,
+ vpx_tree_index i) {
do {
const int bit = (bits >> --len) & 1;
vpx_write(w, bit, probs[i >> 1]);
@@ -39,8 +39,8 @@
}
static INLINE void vp10_write_token(vpx_writer *w, const vpx_tree_index *tree,
- const vpx_prob *probs,
- const struct vp10_token *token) {
+ const vpx_prob *probs,
+ const struct vp10_token *token) {
vp10_write_tree(w, tree, probs, token->value, token->len, 0);
}
diff --git a/vp10/encoder/x86/dct_sse2.c b/vp10/encoder/x86/dct_sse2.c
index e111157..7a61ada 100644
--- a/vp10/encoder/x86/dct_sse2.c
+++ b/vp10/encoder/x86/dct_sse2.c
@@ -78,8 +78,8 @@
const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
__m128i u[4], v[4];
- u[0]=_mm_unpacklo_epi16(in[0], in[1]);
- u[1]=_mm_unpacklo_epi16(in[3], in[2]);
+ u[0] = _mm_unpacklo_epi16(in[0], in[1]);
+ u[1] = _mm_unpacklo_epi16(in[3], in[2]);
v[0] = _mm_add_epi16(u[0], u[1]);
v[1] = _mm_sub_epi16(u[0], u[1]);
@@ -151,14 +151,12 @@
transpose_4x4(in);
}
-void vp10_fht4x4_sse2(const int16_t *input, tran_low_t *output,
- int stride, int tx_type) {
+void vp10_fht4x4_sse2(const int16_t *input, tran_low_t *output, int stride,
+ int tx_type) {
__m128i in[4];
switch (tx_type) {
- case DCT_DCT:
- vpx_fdct4x4_sse2(input, output, stride);
- break;
+ case DCT_DCT: vpx_fdct4x4_sse2(input, output, stride); break;
case ADST_DCT:
load_buffer_4x4(input, in, stride);
fadst4_sse2(in);
@@ -177,21 +175,16 @@
fadst4_sse2(in);
write_buffer_4x4(output, in);
break;
- default:
- assert(0);
- break;
+ default: assert(0); break;
}
}
-void vp10_fdct8x8_quant_sse2(const int16_t *input, int stride,
- int16_t* coeff_ptr, intptr_t n_coeffs,
- int skip_block, const int16_t* zbin_ptr,
- const int16_t* round_ptr, const int16_t* quant_ptr,
- const int16_t* quant_shift_ptr, int16_t* qcoeff_ptr,
- int16_t* dqcoeff_ptr, const int16_t* dequant_ptr,
- uint16_t* eob_ptr,
- const int16_t* scan_ptr,
- const int16_t* iscan_ptr) {
+void vp10_fdct8x8_quant_sse2(
+ const int16_t *input, int stride, int16_t *coeff_ptr, intptr_t n_coeffs,
+ int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr,
+ const int16_t *quant_ptr, const int16_t *quant_shift_ptr,
+ int16_t *qcoeff_ptr, int16_t *dqcoeff_ptr, const int16_t *dequant_ptr,
+ uint16_t *eob_ptr, const int16_t *scan_ptr, const int16_t *iscan_ptr) {
__m128i zero;
int pass;
// Constants
@@ -208,14 +201,14 @@
const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64);
const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
// Load input
- __m128i in0 = _mm_load_si128((const __m128i *)(input + 0 * stride));
- __m128i in1 = _mm_load_si128((const __m128i *)(input + 1 * stride));
- __m128i in2 = _mm_load_si128((const __m128i *)(input + 2 * stride));
- __m128i in3 = _mm_load_si128((const __m128i *)(input + 3 * stride));
- __m128i in4 = _mm_load_si128((const __m128i *)(input + 4 * stride));
- __m128i in5 = _mm_load_si128((const __m128i *)(input + 5 * stride));
- __m128i in6 = _mm_load_si128((const __m128i *)(input + 6 * stride));
- __m128i in7 = _mm_load_si128((const __m128i *)(input + 7 * stride));
+ __m128i in0 = _mm_load_si128((const __m128i *)(input + 0 * stride));
+ __m128i in1 = _mm_load_si128((const __m128i *)(input + 1 * stride));
+ __m128i in2 = _mm_load_si128((const __m128i *)(input + 2 * stride));
+ __m128i in3 = _mm_load_si128((const __m128i *)(input + 3 * stride));
+ __m128i in4 = _mm_load_si128((const __m128i *)(input + 4 * stride));
+ __m128i in5 = _mm_load_si128((const __m128i *)(input + 5 * stride));
+ __m128i in6 = _mm_load_si128((const __m128i *)(input + 6 * stride));
+ __m128i in7 = _mm_load_si128((const __m128i *)(input + 7 * stride));
__m128i *in[8];
int index = 0;
@@ -469,9 +462,9 @@
// Setup global values
{
- round = _mm_load_si128((const __m128i*)round_ptr);
- quant = _mm_load_si128((const __m128i*)quant_ptr);
- dequant = _mm_load_si128((const __m128i*)dequant_ptr);
+ round = _mm_load_si128((const __m128i *)round_ptr);
+ quant = _mm_load_si128((const __m128i *)quant_ptr);
+ dequant = _mm_load_si128((const __m128i *)dequant_ptr);
}
{
@@ -503,15 +496,15 @@
qcoeff0 = _mm_sub_epi16(qcoeff0, coeff0_sign);
qcoeff1 = _mm_sub_epi16(qcoeff1, coeff1_sign);
- _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs), qcoeff0);
- _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs) + 1, qcoeff1);
+ _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs), qcoeff0);
+ _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs) + 1, qcoeff1);
coeff0 = _mm_mullo_epi16(qcoeff0, dequant);
dequant = _mm_unpackhi_epi64(dequant, dequant);
coeff1 = _mm_mullo_epi16(qcoeff1, dequant);
- _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs), coeff0);
- _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs) + 1, coeff1);
+ _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs), coeff0);
+ _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs) + 1, coeff1);
}
{
@@ -524,8 +517,8 @@
zero_coeff1 = _mm_cmpeq_epi16(coeff1, zero);
nzero_coeff0 = _mm_cmpeq_epi16(zero_coeff0, zero);
nzero_coeff1 = _mm_cmpeq_epi16(zero_coeff1, zero);
- iscan0 = _mm_load_si128((const __m128i*)(iscan_ptr + n_coeffs));
- iscan1 = _mm_load_si128((const __m128i*)(iscan_ptr + n_coeffs) + 1);
+ iscan0 = _mm_load_si128((const __m128i *)(iscan_ptr + n_coeffs));
+ iscan1 = _mm_load_si128((const __m128i *)(iscan_ptr + n_coeffs) + 1);
// Add one to convert from indices to counts
iscan0 = _mm_sub_epi16(iscan0, nzero_coeff0);
iscan1 = _mm_sub_epi16(iscan1, nzero_coeff1);
@@ -568,14 +561,14 @@
qcoeff0 = _mm_sub_epi16(qcoeff0, coeff0_sign);
qcoeff1 = _mm_sub_epi16(qcoeff1, coeff1_sign);
- _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs), qcoeff0);
- _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs) + 1, qcoeff1);
+ _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs), qcoeff0);
+ _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs) + 1, qcoeff1);
coeff0 = _mm_mullo_epi16(qcoeff0, dequant);
coeff1 = _mm_mullo_epi16(qcoeff1, dequant);
- _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs), coeff0);
- _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs) + 1, coeff1);
+ _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs), coeff0);
+ _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs) + 1, coeff1);
}
{
@@ -588,8 +581,8 @@
zero_coeff1 = _mm_cmpeq_epi16(coeff1, zero);
nzero_coeff0 = _mm_cmpeq_epi16(zero_coeff0, zero);
nzero_coeff1 = _mm_cmpeq_epi16(zero_coeff1, zero);
- iscan0 = _mm_load_si128((const __m128i*)(iscan_ptr + n_coeffs));
- iscan1 = _mm_load_si128((const __m128i*)(iscan_ptr + n_coeffs) + 1);
+ iscan0 = _mm_load_si128((const __m128i *)(iscan_ptr + n_coeffs));
+ iscan1 = _mm_load_si128((const __m128i *)(iscan_ptr + n_coeffs) + 1);
// Add one to convert from indices to counts
iscan0 = _mm_sub_epi16(iscan0, nzero_coeff0);
iscan1 = _mm_sub_epi16(iscan1, nzero_coeff1);
@@ -615,10 +608,10 @@
}
} else {
do {
- _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs), zero);
- _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs) + 1, zero);
- _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs), zero);
- _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs) + 1, zero);
+ _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs), zero);
+ _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs) + 1, zero);
+ _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs), zero);
+ _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs) + 1, zero);
n_coeffs += 8 * 2;
} while (n_coeffs < 0);
*eob_ptr = 0;
@@ -628,14 +621,14 @@
// load 8x8 array
static INLINE void load_buffer_8x8(const int16_t *input, __m128i *in,
int stride) {
- in[0] = _mm_load_si128((const __m128i *)(input + 0 * stride));
- in[1] = _mm_load_si128((const __m128i *)(input + 1 * stride));
- in[2] = _mm_load_si128((const __m128i *)(input + 2 * stride));
- in[3] = _mm_load_si128((const __m128i *)(input + 3 * stride));
- in[4] = _mm_load_si128((const __m128i *)(input + 4 * stride));
- in[5] = _mm_load_si128((const __m128i *)(input + 5 * stride));
- in[6] = _mm_load_si128((const __m128i *)(input + 6 * stride));
- in[7] = _mm_load_si128((const __m128i *)(input + 7 * stride));
+ in[0] = _mm_load_si128((const __m128i *)(input + 0 * stride));
+ in[1] = _mm_load_si128((const __m128i *)(input + 1 * stride));
+ in[2] = _mm_load_si128((const __m128i *)(input + 2 * stride));
+ in[3] = _mm_load_si128((const __m128i *)(input + 3 * stride));
+ in[4] = _mm_load_si128((const __m128i *)(input + 4 * stride));
+ in[5] = _mm_load_si128((const __m128i *)(input + 5 * stride));
+ in[6] = _mm_load_si128((const __m128i *)(input + 6 * stride));
+ in[7] = _mm_load_si128((const __m128i *)(input + 7 * stride));
in[0] = _mm_slli_epi16(in[0], 2);
in[1] = _mm_slli_epi16(in[1], 2);
@@ -930,14 +923,14 @@
__m128i in0, in1, in2, in3, in4, in5, in6, in7;
// properly aligned for butterfly input
- in0 = in[7];
- in1 = in[0];
- in2 = in[5];
- in3 = in[2];
- in4 = in[3];
- in5 = in[4];
- in6 = in[1];
- in7 = in[6];
+ in0 = in[7];
+ in1 = in[0];
+ in2 = in[5];
+ in3 = in[2];
+ in4 = in[3];
+ in5 = in[4];
+ in6 = in[1];
+ in7 = in[6];
// column transformation
// stage 1
@@ -1135,14 +1128,12 @@
array_transpose_8x8(in, in);
}
-void vp10_fht8x8_sse2(const int16_t *input, tran_low_t *output,
- int stride, int tx_type) {
+void vp10_fht8x8_sse2(const int16_t *input, tran_low_t *output, int stride,
+ int tx_type) {
__m128i in[8];
switch (tx_type) {
- case DCT_DCT:
- vpx_fdct8x8_sse2(input, output, stride);
- break;
+ case DCT_DCT: vpx_fdct8x8_sse2(input, output, stride); break;
case ADST_DCT:
load_buffer_8x8(input, in, stride);
fadst8_sse2(in);
@@ -1164,13 +1155,11 @@
right_shift_8x8(in, 1);
write_buffer_8x8(output, in, 8);
break;
- default:
- assert(0);
- break;
+ default: assert(0); break;
}
}
-static INLINE void load_buffer_16x16(const int16_t* input, __m128i *in0,
+static INLINE void load_buffer_16x16(const int16_t *input, __m128i *in0,
__m128i *in1, int stride) {
// load first 8 columns
load_buffer_8x8(input, in0, stride);
@@ -1530,13 +1519,13 @@
v[14] = _mm_srai_epi32(u[14], DCT_CONST_BITS);
v[15] = _mm_srai_epi32(u[15], DCT_CONST_BITS);
- in[1] = _mm_packs_epi32(v[0], v[1]);
- in[9] = _mm_packs_epi32(v[2], v[3]);
- in[5] = _mm_packs_epi32(v[4], v[5]);
+ in[1] = _mm_packs_epi32(v[0], v[1]);
+ in[9] = _mm_packs_epi32(v[2], v[3]);
+ in[5] = _mm_packs_epi32(v[4], v[5]);
in[13] = _mm_packs_epi32(v[6], v[7]);
- in[3] = _mm_packs_epi32(v[8], v[9]);
+ in[3] = _mm_packs_epi32(v[8], v[9]);
in[11] = _mm_packs_epi32(v[10], v[11]);
- in[7] = _mm_packs_epi32(v[12], v[13]);
+ in[7] = _mm_packs_epi32(v[12], v[13]);
in[15] = _mm_packs_epi32(v[14], v[15]);
}
@@ -2022,14 +2011,12 @@
array_transpose_16x16(in0, in1);
}
-void vp10_fht16x16_sse2(const int16_t *input, tran_low_t *output,
- int stride, int tx_type) {
+void vp10_fht16x16_sse2(const int16_t *input, tran_low_t *output, int stride,
+ int tx_type) {
__m128i in0[16], in1[16];
switch (tx_type) {
- case DCT_DCT:
- vpx_fdct16x16_sse2(input, output, stride);
- break;
+ case DCT_DCT: vpx_fdct16x16_sse2(input, output, stride); break;
case ADST_DCT:
load_buffer_16x16(input, in0, in1, stride);
fadst16_sse2(in0, in1);
@@ -2051,8 +2038,6 @@
fadst16_sse2(in0, in1);
write_buffer_16x16(output, in0, in1, 16);
break;
- default:
- assert(0);
- break;
+ default: assert(0); break;
}
}
diff --git a/vp10/encoder/x86/dct_ssse3.c b/vp10/encoder/x86/dct_ssse3.c
index df298d8..74794b1 100644
--- a/vp10/encoder/x86/dct_ssse3.c
+++ b/vp10/encoder/x86/dct_ssse3.c
@@ -20,16 +20,12 @@
#include "vpx_dsp/x86/inv_txfm_sse2.h"
#include "vpx_dsp/x86/txfm_common_sse2.h"
-void vp10_fdct8x8_quant_ssse3(const int16_t *input, int stride,
- int16_t* coeff_ptr, intptr_t n_coeffs,
- int skip_block, const int16_t* zbin_ptr,
- const int16_t* round_ptr, const int16_t* quant_ptr,
- const int16_t* quant_shift_ptr,
- int16_t* qcoeff_ptr,
- int16_t* dqcoeff_ptr, const int16_t* dequant_ptr,
- uint16_t* eob_ptr,
- const int16_t* scan_ptr,
- const int16_t* iscan_ptr) {
+void vp10_fdct8x8_quant_ssse3(
+ const int16_t* input, int stride, int16_t* coeff_ptr, intptr_t n_coeffs,
+ int skip_block, const int16_t* zbin_ptr, const int16_t* round_ptr,
+ const int16_t* quant_ptr, const int16_t* quant_shift_ptr,
+ int16_t* qcoeff_ptr, int16_t* dqcoeff_ptr, const int16_t* dequant_ptr,
+ uint16_t* eob_ptr, const int16_t* scan_ptr, const int16_t* iscan_ptr) {
__m128i zero;
int pass;
// Constants
@@ -47,15 +43,15 @@
const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64);
const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
// Load input
- __m128i in0 = _mm_load_si128((const __m128i *)(input + 0 * stride));
- __m128i in1 = _mm_load_si128((const __m128i *)(input + 1 * stride));
- __m128i in2 = _mm_load_si128((const __m128i *)(input + 2 * stride));
- __m128i in3 = _mm_load_si128((const __m128i *)(input + 3 * stride));
- __m128i in4 = _mm_load_si128((const __m128i *)(input + 4 * stride));
- __m128i in5 = _mm_load_si128((const __m128i *)(input + 5 * stride));
- __m128i in6 = _mm_load_si128((const __m128i *)(input + 6 * stride));
- __m128i in7 = _mm_load_si128((const __m128i *)(input + 7 * stride));
- __m128i *in[8];
+ __m128i in0 = _mm_load_si128((const __m128i*)(input + 0 * stride));
+ __m128i in1 = _mm_load_si128((const __m128i*)(input + 1 * stride));
+ __m128i in2 = _mm_load_si128((const __m128i*)(input + 2 * stride));
+ __m128i in3 = _mm_load_si128((const __m128i*)(input + 3 * stride));
+ __m128i in4 = _mm_load_si128((const __m128i*)(input + 4 * stride));
+ __m128i in5 = _mm_load_si128((const __m128i*)(input + 5 * stride));
+ __m128i in6 = _mm_load_si128((const __m128i*)(input + 6 * stride));
+ __m128i in7 = _mm_load_si128((const __m128i*)(input + 7 * stride));
+ __m128i* in[8];
int index = 0;
(void)scan_ptr;
@@ -393,7 +389,7 @@
qcoeff1 = _mm_sub_epi16(qcoeff1, coeff1_sign);
nzflag = _mm_movemask_epi8(_mm_cmpgt_epi16(qcoeff0, thr)) |
- _mm_movemask_epi8(_mm_cmpgt_epi16(qcoeff1, thr));
+ _mm_movemask_epi8(_mm_cmpgt_epi16(qcoeff1, thr));
if (nzflag) {
qcoeff0 = _mm_adds_epi16(qcoeff0, round);
diff --git a/vp10/encoder/x86/error_intrin_avx2.c b/vp10/encoder/x86/error_intrin_avx2.c
index 9766be2..560cd4a 100644
--- a/vp10/encoder/x86/error_intrin_avx2.c
+++ b/vp10/encoder/x86/error_intrin_avx2.c
@@ -13,10 +13,8 @@
#include "./vp10_rtcd.h"
#include "vpx/vpx_integer.h"
-int64_t vp10_block_error_avx2(const int16_t *coeff,
- const int16_t *dqcoeff,
- intptr_t block_size,
- int64_t *ssz) {
+int64_t vp10_block_error_avx2(const int16_t *coeff, const int16_t *dqcoeff,
+ intptr_t block_size, int64_t *ssz) {
__m256i sse_reg, ssz_reg, coeff_reg, dqcoeff_reg;
__m256i exp_dqcoeff_lo, exp_dqcoeff_hi, exp_coeff_lo, exp_coeff_hi;
__m256i sse_reg_64hi, ssz_reg_64hi;
@@ -29,7 +27,7 @@
sse_reg = _mm256_set1_epi16(0);
ssz_reg = _mm256_set1_epi16(0);
- for (i = 0 ; i < block_size ; i+= 16) {
+ for (i = 0; i < block_size; i += 16) {
// load 32 bytes from coeff and dqcoeff
coeff_reg = _mm256_loadu_si256((const __m256i *)(coeff + i));
dqcoeff_reg = _mm256_loadu_si256((const __m256i *)(dqcoeff + i));
@@ -66,8 +64,8 @@
_mm256_extractf128_si256(ssz_reg, 1));
// store the results
- _mm_storel_epi64((__m128i*)(&sse), sse_reg128);
+ _mm_storel_epi64((__m128i *)(&sse), sse_reg128);
- _mm_storel_epi64((__m128i*)(ssz), ssz_reg128);
+ _mm_storel_epi64((__m128i *)(ssz), ssz_reg128);
return sse;
}
diff --git a/vp10/encoder/x86/highbd_block_error_intrin_sse2.c b/vp10/encoder/x86/highbd_block_error_intrin_sse2.c
index 6b4cf50..eefca1c 100644
--- a/vp10/encoder/x86/highbd_block_error_intrin_sse2.c
+++ b/vp10/encoder/x86/highbd_block_error_intrin_sse2.c
@@ -14,8 +14,8 @@
#include "vp10/common/common.h"
int64_t vp10_highbd_block_error_sse2(tran_low_t *coeff, tran_low_t *dqcoeff,
- intptr_t block_size, int64_t *ssz,
- int bps) {
+ intptr_t block_size, int64_t *ssz,
+ int bps) {
int i, j, test;
uint32_t temp[4];
__m128i max, min, cmp0, cmp1, cmp2, cmp3;
@@ -23,41 +23,41 @@
const int shift = 2 * (bps - 8);
const int rounding = shift > 0 ? 1 << (shift - 1) : 0;
- for (i = 0; i < block_size; i+=8) {
+ for (i = 0; i < block_size; i += 8) {
// Load the data into xmm registers
- __m128i mm_coeff = _mm_load_si128((__m128i*) (coeff + i));
- __m128i mm_coeff2 = _mm_load_si128((__m128i*) (coeff + i + 4));
- __m128i mm_dqcoeff = _mm_load_si128((__m128i*) (dqcoeff + i));
- __m128i mm_dqcoeff2 = _mm_load_si128((__m128i*) (dqcoeff + i + 4));
+ __m128i mm_coeff = _mm_load_si128((__m128i *)(coeff + i));
+ __m128i mm_coeff2 = _mm_load_si128((__m128i *)(coeff + i + 4));
+ __m128i mm_dqcoeff = _mm_load_si128((__m128i *)(dqcoeff + i));
+ __m128i mm_dqcoeff2 = _mm_load_si128((__m128i *)(dqcoeff + i + 4));
// Check if any values require more than 15 bit
max = _mm_set1_epi32(0x3fff);
min = _mm_set1_epi32(0xffffc000);
cmp0 = _mm_xor_si128(_mm_cmpgt_epi32(mm_coeff, max),
- _mm_cmplt_epi32(mm_coeff, min));
+ _mm_cmplt_epi32(mm_coeff, min));
cmp1 = _mm_xor_si128(_mm_cmpgt_epi32(mm_coeff2, max),
- _mm_cmplt_epi32(mm_coeff2, min));
+ _mm_cmplt_epi32(mm_coeff2, min));
cmp2 = _mm_xor_si128(_mm_cmpgt_epi32(mm_dqcoeff, max),
- _mm_cmplt_epi32(mm_dqcoeff, min));
+ _mm_cmplt_epi32(mm_dqcoeff, min));
cmp3 = _mm_xor_si128(_mm_cmpgt_epi32(mm_dqcoeff2, max),
- _mm_cmplt_epi32(mm_dqcoeff2, min));
- test = _mm_movemask_epi8(_mm_or_si128(_mm_or_si128(cmp0, cmp1),
- _mm_or_si128(cmp2, cmp3)));
+ _mm_cmplt_epi32(mm_dqcoeff2, min));
+ test = _mm_movemask_epi8(
+ _mm_or_si128(_mm_or_si128(cmp0, cmp1), _mm_or_si128(cmp2, cmp3)));
if (!test) {
- __m128i mm_diff, error_sse2, sqcoeff_sse2;;
+ __m128i mm_diff, error_sse2, sqcoeff_sse2;
mm_coeff = _mm_packs_epi32(mm_coeff, mm_coeff2);
mm_dqcoeff = _mm_packs_epi32(mm_dqcoeff, mm_dqcoeff2);
mm_diff = _mm_sub_epi16(mm_coeff, mm_dqcoeff);
error_sse2 = _mm_madd_epi16(mm_diff, mm_diff);
sqcoeff_sse2 = _mm_madd_epi16(mm_coeff, mm_coeff);
- _mm_storeu_si128((__m128i*)temp, error_sse2);
+ _mm_storeu_si128((__m128i *)temp, error_sse2);
error = error + temp[0] + temp[1] + temp[2] + temp[3];
- _mm_storeu_si128((__m128i*)temp, sqcoeff_sse2);
+ _mm_storeu_si128((__m128i *)temp, sqcoeff_sse2);
sqcoeff += temp[0] + temp[1] + temp[2] + temp[3];
} else {
for (j = 0; j < 8; j++) {
const int64_t diff = coeff[i + j] - dqcoeff[i + j];
- error += diff * diff;
+ error += diff * diff;
sqcoeff += (int64_t)coeff[i + j] * (int64_t)coeff[i + j];
}
}
diff --git a/vp10/encoder/x86/quantize_sse2.c b/vp10/encoder/x86/quantize_sse2.c
index dabd3bd..78d5952 100644
--- a/vp10/encoder/x86/quantize_sse2.c
+++ b/vp10/encoder/x86/quantize_sse2.c
@@ -15,13 +15,12 @@
#include "vpx/vpx_integer.h"
void vp10_quantize_fp_sse2(const int16_t* coeff_ptr, intptr_t n_coeffs,
- int skip_block, const int16_t* zbin_ptr,
- const int16_t* round_ptr, const int16_t* quant_ptr,
- const int16_t* quant_shift_ptr, int16_t* qcoeff_ptr,
- int16_t* dqcoeff_ptr, const int16_t* dequant_ptr,
- uint16_t* eob_ptr,
- const int16_t* scan_ptr,
- const int16_t* iscan_ptr) {
+ int skip_block, const int16_t* zbin_ptr,
+ const int16_t* round_ptr, const int16_t* quant_ptr,
+ const int16_t* quant_shift_ptr, int16_t* qcoeff_ptr,
+ int16_t* dqcoeff_ptr, const int16_t* dequant_ptr,
+ uint16_t* eob_ptr, const int16_t* scan_ptr,
+ const int16_t* iscan_ptr) {
__m128i zero;
__m128i thr;
int16_t nzflag;
@@ -133,7 +132,7 @@
qcoeff1 = _mm_sub_epi16(qcoeff1, coeff1_sign);
nzflag = _mm_movemask_epi8(_mm_cmpgt_epi16(qcoeff0, thr)) |
- _mm_movemask_epi8(_mm_cmpgt_epi16(qcoeff1, thr));
+ _mm_movemask_epi8(_mm_cmpgt_epi16(qcoeff1, thr));
if (nzflag) {
qcoeff0 = _mm_adds_epi16(qcoeff0, round);