Port renaming changes from AOMedia
Cherry-Picked the following commits:
0defd8f Changed "WebM" to "AOMedia" & "webm" to "aomedia"
54e6676 Replace "VPx" by "AVx"
5082a36 Change "Vpx" to "Avx"
7df44f1 Replace "Vp9" w/ "Av1"
967f722 Remove kVp9CodecId
828f30c Change "Vp8" to "AOM"
030b5ff AUTHORS regenerated
2524cae Add ref-mv experimental flag
016762b Change copyright notice to AOMedia form
81e5526 Replace vp9 w/ av1
9b94565 Add missing files
fa8ca9f Change "vp9" to "av1"
ec838b7 Convert "vp8" to "aom"
80edfa0 Change "VP9" to "AV1"
d1a11fb Change "vp8" to "aom"
7b58251 Point to WebM test data
dd1a5c8 Replace "VP8" with "AOM"
ff00fc0 Change "VPX" to "AOM"
01dee0b Change "vp10" to "av1" in source code
cebe6f0 Convert "vpx" to "aom"
17b0567 rename vp10*.mk to av1_*.mk
fe5f8a8 rename files vp10_* to av1_*
Change-Id: I6fc3d18eb11fc171e46140c836ad5339cf6c9419
diff --git a/av1/encoder/bitstream.c b/av1/encoder/bitstream.c
index 30699b4..305a672 100644
--- a/av1/encoder/bitstream.c
+++ b/av1/encoder/bitstream.c
@@ -12,10 +12,10 @@
#include <limits.h>
#include <stdio.h>
-#include "aom/vpx_encoder.h"
+#include "aom/aom_encoder.h"
#include "aom_dsp/bitwriter_buffer.h"
-#include "aom_dsp/vpx_dsp_common.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_mem/aom_mem.h"
#include "aom_ports/mem_ops.h"
#include "aom_ports/system_state.h"
#include "aom_util/debug_util.h"
@@ -46,28 +46,28 @@
#include "av1/encoder/subexp.h"
#include "av1/encoder/tokenize.h"
-static const struct vp10_token intra_mode_encodings[INTRA_MODES] = {
+static const struct av1_token intra_mode_encodings[INTRA_MODES] = {
{ 0, 1 }, { 6, 3 }, { 28, 5 }, { 30, 5 }, { 58, 6 },
{ 59, 6 }, { 126, 7 }, { 127, 7 }, { 62, 6 }, { 2, 2 }
};
#if CONFIG_EXT_INTERP
-static const struct vp10_token switchable_interp_encodings[SWITCHABLE_FILTERS] =
+static const struct av1_token switchable_interp_encodings[SWITCHABLE_FILTERS] =
{ { 0, 1 }, { 4, 3 }, { 6, 3 }, { 5, 3 }, { 7, 3 } };
#else
-static const struct vp10_token switchable_interp_encodings[SWITCHABLE_FILTERS] =
+static const struct av1_token switchable_interp_encodings[SWITCHABLE_FILTERS] =
{ { 0, 1 }, { 2, 2 }, { 3, 2 } };
#endif // CONFIG_EXT_INTERP
#if CONFIG_EXT_PARTITION_TYPES
-static const struct vp10_token ext_partition_encodings[EXT_PARTITION_TYPES] = {
+static const struct av1_token ext_partition_encodings[EXT_PARTITION_TYPES] = {
{ 0, 1 }, { 4, 3 }, { 12, 4 }, { 7, 3 },
{ 10, 4 }, { 11, 4 }, { 26, 5 }, { 27, 5 }
};
#endif
-static const struct vp10_token partition_encodings[PARTITION_TYPES] = {
+static const struct av1_token partition_encodings[PARTITION_TYPES] = {
{ 0, 1 }, { 2, 2 }, { 6, 3 }, { 7, 3 }
};
#if !CONFIG_REF_MV
-static const struct vp10_token inter_mode_encodings[INTER_MODES] =
+static const struct av1_token inter_mode_encodings[INTER_MODES] =
#if CONFIG_EXT_INTER
{ { 2, 2 }, { 6, 3 }, { 0, 1 }, { 14, 4 }, { 15, 4 } };
#else
@@ -75,16 +75,16 @@
#endif // CONFIG_EXT_INTER
#endif
#if CONFIG_EXT_INTER
-static const struct vp10_token
+static const struct av1_token
inter_compound_mode_encodings[INTER_COMPOUND_MODES] = {
{ 2, 2 }, { 50, 6 }, { 51, 6 }, { 24, 5 }, { 52, 6 },
{ 53, 6 }, { 54, 6 }, { 55, 6 }, { 0, 1 }, { 7, 3 }
};
#endif // CONFIG_EXT_INTER
-static const struct vp10_token palette_size_encodings[] = {
+static const struct av1_token palette_size_encodings[] = {
{ 0, 1 }, { 2, 2 }, { 6, 3 }, { 14, 4 }, { 30, 5 }, { 62, 6 }, { 63, 6 },
};
-static const struct vp10_token
+static const struct av1_token
palette_color_encodings[PALETTE_MAX_SIZE - 1][PALETTE_MAX_SIZE] = {
{ { 0, 1 }, { 1, 1 } }, // 2 colors
{ { 0, 1 }, { 2, 2 }, { 3, 2 } }, // 3 colors
@@ -113,84 +113,84 @@
{ 127, 7 } }, // 8 colors
};
-static const struct vp10_token tx_size_encodings[TX_SIZES - 1][TX_SIZES] = {
+static const struct av1_token tx_size_encodings[TX_SIZES - 1][TX_SIZES] = {
{ { 0, 1 }, { 1, 1 } }, // Max tx_size is 8X8
{ { 0, 1 }, { 2, 2 }, { 3, 2 } }, // Max tx_size is 16X16
{ { 0, 1 }, { 2, 2 }, { 6, 3 }, { 7, 3 } }, // Max tx_size is 32X32
};
-static INLINE void write_uniform(vp10_writer *w, int n, int v) {
+static INLINE void write_uniform(aom_writer *w, int n, int v) {
int l = get_unsigned_bits(n);
int m = (1 << l) - n;
if (l == 0) return;
if (v < m) {
- vp10_write_literal(w, v, l - 1);
+ aom_write_literal(w, v, l - 1);
} else {
- vp10_write_literal(w, m + ((v - m) >> 1), l - 1);
- vp10_write_literal(w, (v - m) & 1, 1);
+ aom_write_literal(w, m + ((v - m) >> 1), l - 1);
+ aom_write_literal(w, (v - m) & 1, 1);
}
}
#if CONFIG_EXT_TX
-static struct vp10_token ext_tx_inter_encodings[EXT_TX_SETS_INTER][TX_TYPES];
-static struct vp10_token ext_tx_intra_encodings[EXT_TX_SETS_INTRA][TX_TYPES];
+static struct av1_token ext_tx_inter_encodings[EXT_TX_SETS_INTER][TX_TYPES];
+static struct av1_token ext_tx_intra_encodings[EXT_TX_SETS_INTRA][TX_TYPES];
#else
-static struct vp10_token ext_tx_encodings[TX_TYPES];
+static struct av1_token ext_tx_encodings[TX_TYPES];
#endif // CONFIG_EXT_TX
#if CONFIG_GLOBAL_MOTION
-static struct vp10_token global_motion_types_encodings[GLOBAL_MOTION_TYPES];
+static struct av1_token global_motion_types_encodings[GLOBAL_MOTION_TYPES];
#endif // CONFIG_GLOBAL_MOTION
#if CONFIG_EXT_INTRA
-static struct vp10_token intra_filter_encodings[INTRA_FILTERS];
+static struct av1_token intra_filter_encodings[INTRA_FILTERS];
#endif // CONFIG_EXT_INTRA
#if CONFIG_EXT_INTER
-static struct vp10_token interintra_mode_encodings[INTERINTRA_MODES];
+static struct av1_token interintra_mode_encodings[INTERINTRA_MODES];
#endif // CONFIG_EXT_INTER
#if CONFIG_OBMC || CONFIG_WARPED_MOTION
-static struct vp10_token motvar_encodings[MOTION_VARIATIONS];
+static struct av1_token motvar_encodings[MOTION_VARIATIONS];
#endif // CONFIG_OBMC || CONFIG_WARPED_MOTION
-void vp10_encode_token_init(void) {
+void av1_encode_token_init(void) {
#if CONFIG_EXT_TX
int s;
for (s = 1; s < EXT_TX_SETS_INTER; ++s) {
- vp10_tokens_from_tree(ext_tx_inter_encodings[s], vp10_ext_tx_inter_tree[s]);
+ av1_tokens_from_tree(ext_tx_inter_encodings[s], av1_ext_tx_inter_tree[s]);
}
for (s = 1; s < EXT_TX_SETS_INTRA; ++s) {
- vp10_tokens_from_tree(ext_tx_intra_encodings[s], vp10_ext_tx_intra_tree[s]);
+ av1_tokens_from_tree(ext_tx_intra_encodings[s], av1_ext_tx_intra_tree[s]);
}
#else
- vp10_tokens_from_tree(ext_tx_encodings, vp10_ext_tx_tree);
+ av1_tokens_from_tree(ext_tx_encodings, av1_ext_tx_tree);
#endif // CONFIG_EXT_TX
#if CONFIG_EXT_INTRA
- vp10_tokens_from_tree(intra_filter_encodings, vp10_intra_filter_tree);
+ av1_tokens_from_tree(intra_filter_encodings, av1_intra_filter_tree);
#endif // CONFIG_EXT_INTRA
#if CONFIG_EXT_INTER
- vp10_tokens_from_tree(interintra_mode_encodings, vp10_interintra_mode_tree);
+ av1_tokens_from_tree(interintra_mode_encodings, av1_interintra_mode_tree);
#endif // CONFIG_EXT_INTER
#if CONFIG_OBMC || CONFIG_WARPED_MOTION
- vp10_tokens_from_tree(motvar_encodings, vp10_motvar_tree);
+ av1_tokens_from_tree(motvar_encodings, av1_motvar_tree);
#endif // CONFIG_OBMC || CONFIG_WARPED_MOTION
#if CONFIG_GLOBAL_MOTION
- vp10_tokens_from_tree(global_motion_types_encodings,
- vp10_global_motion_types_tree);
+ av1_tokens_from_tree(global_motion_types_encodings,
+ av1_global_motion_types_tree);
#endif // CONFIG_GLOBAL_MOTION
}
-static void write_intra_mode(vp10_writer *w, PREDICTION_MODE mode,
- const vpx_prob *probs) {
- vp10_write_token(w, vp10_intra_mode_tree, probs, &intra_mode_encodings[mode]);
+static void write_intra_mode(aom_writer *w, PREDICTION_MODE mode,
+ const aom_prob *probs) {
+ av1_write_token(w, av1_intra_mode_tree, probs, &intra_mode_encodings[mode]);
}
#if CONFIG_EXT_INTER
-static void write_interintra_mode(vp10_writer *w, INTERINTRA_MODE mode,
- const vpx_prob *probs) {
- vp10_write_token(w, vp10_interintra_mode_tree, probs,
- &interintra_mode_encodings[mode]);
+static void write_interintra_mode(aom_writer *w, INTERINTRA_MODE mode,
+ const aom_prob *probs) {
+ av1_write_token(w, av1_interintra_mode_tree, probs,
+ &interintra_mode_encodings[mode]);
}
#endif // CONFIG_EXT_INTER
-static void write_inter_mode(VP10_COMMON *cm, vp10_writer *w,
+static void write_inter_mode(AV1_COMMON *cm, aom_writer *w,
PREDICTION_MODE mode,
#if CONFIG_REF_MV && CONFIG_EXT_INTER
int is_compound,
@@ -198,53 +198,53 @@
const int16_t mode_ctx) {
#if CONFIG_REF_MV
const int16_t newmv_ctx = mode_ctx & NEWMV_CTX_MASK;
- const vpx_prob newmv_prob = cm->fc->newmv_prob[newmv_ctx];
+ const aom_prob newmv_prob = cm->fc->newmv_prob[newmv_ctx];
#if CONFIG_EXT_INTER
- vp10_write(w, mode != NEWMV && mode != NEWFROMNEARMV, newmv_prob);
+ aom_write(w, mode != NEWMV && mode != NEWFROMNEARMV, newmv_prob);
if (!is_compound && (mode == NEWMV || mode == NEWFROMNEARMV))
- vp10_write(w, mode == NEWFROMNEARMV, cm->fc->new2mv_prob);
+ aom_write(w, mode == NEWFROMNEARMV, cm->fc->new2mv_prob);
if (mode != NEWMV && mode != NEWFROMNEARMV) {
#else
- vp10_write(w, mode != NEWMV, newmv_prob);
+ aom_write(w, mode != NEWMV, newmv_prob);
if (mode != NEWMV) {
#endif // CONFIG_EXT_INTER
const int16_t zeromv_ctx = (mode_ctx >> ZEROMV_OFFSET) & ZEROMV_CTX_MASK;
- const vpx_prob zeromv_prob = cm->fc->zeromv_prob[zeromv_ctx];
+ const aom_prob zeromv_prob = cm->fc->zeromv_prob[zeromv_ctx];
if (mode_ctx & (1 << ALL_ZERO_FLAG_OFFSET)) {
assert(mode == ZEROMV);
return;
}
- vp10_write(w, mode != ZEROMV, zeromv_prob);
+ aom_write(w, mode != ZEROMV, zeromv_prob);
if (mode != ZEROMV) {
int16_t refmv_ctx = (mode_ctx >> REFMV_OFFSET) & REFMV_CTX_MASK;
- vpx_prob refmv_prob;
+ aom_prob refmv_prob;
if (mode_ctx & (1 << SKIP_NEARESTMV_OFFSET)) refmv_ctx = 6;
if (mode_ctx & (1 << SKIP_NEARMV_OFFSET)) refmv_ctx = 7;
if (mode_ctx & (1 << SKIP_NEARESTMV_SUB8X8_OFFSET)) refmv_ctx = 8;
refmv_prob = cm->fc->refmv_prob[refmv_ctx];
- vp10_write(w, mode != NEARESTMV, refmv_prob);
+ aom_write(w, mode != NEARESTMV, refmv_prob);
}
}
#else
- const vpx_prob *const inter_probs = cm->fc->inter_mode_probs[mode_ctx];
+ const aom_prob *const inter_probs = cm->fc->inter_mode_probs[mode_ctx];
assert(is_inter_mode(mode));
- vp10_write_token(w, vp10_inter_mode_tree, inter_probs,
- &inter_mode_encodings[INTER_OFFSET(mode)]);
+ av1_write_token(w, av1_inter_mode_tree, inter_probs,
+ &inter_mode_encodings[INTER_OFFSET(mode)]);
#endif
}
#if CONFIG_REF_MV
-static void write_drl_idx(const VP10_COMMON *cm, const MB_MODE_INFO *mbmi,
- const MB_MODE_INFO_EXT *mbmi_ext, vp10_writer *w) {
- uint8_t ref_frame_type = vp10_ref_frame_type(mbmi->ref_frame);
+static void write_drl_idx(const AV1_COMMON *cm, const MB_MODE_INFO *mbmi,
+ const MB_MODE_INFO_EXT *mbmi_ext, aom_writer *w) {
+ uint8_t ref_frame_type = av1_ref_frame_type(mbmi->ref_frame);
assert(mbmi->ref_mv_idx < 3);
@@ -253,10 +253,10 @@
for (idx = 0; idx < 2; ++idx) {
if (mbmi_ext->ref_mv_count[ref_frame_type] > idx + 1) {
uint8_t drl_ctx =
- vp10_drl_ctx(mbmi_ext->ref_mv_stack[ref_frame_type], idx);
- vpx_prob drl_prob = cm->fc->drl_prob[drl_ctx];
+ av1_drl_ctx(mbmi_ext->ref_mv_stack[ref_frame_type], idx);
+ aom_prob drl_prob = cm->fc->drl_prob[drl_ctx];
- vp10_write(w, mbmi->ref_mv_idx != idx, drl_prob);
+ aom_write(w, mbmi->ref_mv_idx != idx, drl_prob);
if (mbmi->ref_mv_idx == idx) return;
}
}
@@ -269,10 +269,10 @@
for (idx = 1; idx < 3; ++idx) {
if (mbmi_ext->ref_mv_count[ref_frame_type] > idx + 1) {
uint8_t drl_ctx =
- vp10_drl_ctx(mbmi_ext->ref_mv_stack[ref_frame_type], idx);
- vpx_prob drl_prob = cm->fc->drl_prob[drl_ctx];
+ av1_drl_ctx(mbmi_ext->ref_mv_stack[ref_frame_type], idx);
+ aom_prob drl_prob = cm->fc->drl_prob[drl_ctx];
- vp10_write(w, mbmi->ref_mv_idx != (idx - 1), drl_prob);
+ aom_write(w, mbmi->ref_mv_idx != (idx - 1), drl_prob);
if (mbmi->ref_mv_idx == (idx - 1)) return;
}
}
@@ -282,40 +282,40 @@
#endif
#if CONFIG_EXT_INTER
-static void write_inter_compound_mode(VP10_COMMON *cm, vp10_writer *w,
+static void write_inter_compound_mode(AV1_COMMON *cm, aom_writer *w,
PREDICTION_MODE mode,
const int16_t mode_ctx) {
- const vpx_prob *const inter_compound_probs =
+ const aom_prob *const inter_compound_probs =
cm->fc->inter_compound_mode_probs[mode_ctx];
assert(is_inter_compound_mode(mode));
- vp10_write_token(w, vp10_inter_compound_mode_tree, inter_compound_probs,
- &inter_compound_mode_encodings[INTER_COMPOUND_OFFSET(mode)]);
+ av1_write_token(w, av1_inter_compound_mode_tree, inter_compound_probs,
+ &inter_compound_mode_encodings[INTER_COMPOUND_OFFSET(mode)]);
}
#endif // CONFIG_EXT_INTER
-static void encode_unsigned_max(struct vpx_write_bit_buffer *wb, int data,
+static void encode_unsigned_max(struct aom_write_bit_buffer *wb, int data,
int max) {
- vpx_wb_write_literal(wb, data, get_unsigned_bits(max));
+ aom_wb_write_literal(wb, data, get_unsigned_bits(max));
}
-static void prob_diff_update(const vpx_tree_index *tree,
- vpx_prob probs[/*n - 1*/],
+static void prob_diff_update(const aom_tree_index *tree,
+ aom_prob probs[/*n - 1*/],
const unsigned int counts[/*n - 1*/], int n,
- vp10_writer *w) {
+ aom_writer *w) {
int i;
unsigned int branch_ct[32][2];
// Assuming max number of probabilities <= 32
assert(n <= 32);
- vp10_tree_probs_from_distribution(tree, branch_ct, counts);
+ av1_tree_probs_from_distribution(tree, branch_ct, counts);
for (i = 0; i < n - 1; ++i)
- vp10_cond_prob_diff_update(w, &probs[i], branch_ct[i]);
+ av1_cond_prob_diff_update(w, &probs[i], branch_ct[i]);
}
-static int prob_diff_update_savings(const vpx_tree_index *tree,
- vpx_prob probs[/*n - 1*/],
+static int prob_diff_update_savings(const aom_tree_index *tree,
+ aom_prob probs[/*n - 1*/],
const unsigned int counts[/*n - 1*/],
int n) {
int i;
@@ -324,17 +324,17 @@
// Assuming max number of probabilities <= 32
assert(n <= 32);
- vp10_tree_probs_from_distribution(tree, branch_ct, counts);
+ av1_tree_probs_from_distribution(tree, branch_ct, counts);
for (i = 0; i < n - 1; ++i) {
- savings += vp10_cond_prob_diff_update_savings(&probs[i], branch_ct[i]);
+ savings += av1_cond_prob_diff_update_savings(&probs[i], branch_ct[i]);
}
return savings;
}
#if CONFIG_VAR_TX
-static void write_tx_size_vartx(const VP10_COMMON *cm, const MACROBLOCKD *xd,
+static void write_tx_size_vartx(const AV1_COMMON *cm, const MACROBLOCKD *xd,
const MB_MODE_INFO *mbmi, TX_SIZE tx_size,
- int blk_row, int blk_col, vp10_writer *w) {
+ int blk_row, int blk_col, aom_writer *w) {
const int tx_row = blk_row >> 1;
const int tx_col = blk_col >> 1;
int max_blocks_high = num_4x4_blocks_high_lookup[mbmi->sb_type];
@@ -348,14 +348,14 @@
if (blk_row >= max_blocks_high || blk_col >= max_blocks_wide) return;
if (tx_size == mbmi->inter_tx_size[tx_row][tx_col]) {
- vp10_write(w, 0, cm->fc->txfm_partition_prob[ctx]);
+ aom_write(w, 0, cm->fc->txfm_partition_prob[ctx]);
txfm_partition_update(xd->above_txfm_context + tx_col,
xd->left_txfm_context + tx_row, tx_size);
} else {
const BLOCK_SIZE bsize = txsize_to_bsize[tx_size];
int bsl = b_width_log2_lookup[bsize];
int i;
- vp10_write(w, 1, cm->fc->txfm_partition_prob[ctx]);
+ aom_write(w, 1, cm->fc->txfm_partition_prob[ctx]);
if (tx_size == TX_8X8) {
txfm_partition_update(xd->above_txfm_context + tx_col,
@@ -373,17 +373,17 @@
}
}
-static void update_txfm_partition_probs(VP10_COMMON *cm, vp10_writer *w,
+static void update_txfm_partition_probs(AV1_COMMON *cm, aom_writer *w,
FRAME_COUNTS *counts) {
int k;
for (k = 0; k < TXFM_PARTITION_CONTEXTS; ++k)
- vp10_cond_prob_diff_update(w, &cm->fc->txfm_partition_prob[k],
- counts->txfm_partition[k]);
+ av1_cond_prob_diff_update(w, &cm->fc->txfm_partition_prob[k],
+ counts->txfm_partition[k]);
}
#endif
-static void write_selected_tx_size(const VP10_COMMON *cm, const MACROBLOCKD *xd,
- vp10_writer *w) {
+static void write_selected_tx_size(const AV1_COMMON *cm, const MACROBLOCKD *xd,
+ aom_writer *w) {
const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
const BLOCK_SIZE bsize = mbmi->sb_type;
// For sub8x8 blocks the tx_size symbol does not need to be sent
@@ -401,89 +401,87 @@
IMPLIES(is_rect_tx(tx_size), tx_size == max_txsize_rect_lookup[bsize]));
#endif // CONFIG_EXT_TX && CONFIG_RECT_TX
- vp10_write_token(w, vp10_tx_size_tree[tx_size_cat],
- cm->fc->tx_size_probs[tx_size_cat][tx_size_ctx],
- &tx_size_encodings[tx_size_cat][coded_tx_size]);
+ av1_write_token(w, av1_tx_size_tree[tx_size_cat],
+ cm->fc->tx_size_probs[tx_size_cat][tx_size_ctx],
+ &tx_size_encodings[tx_size_cat][coded_tx_size]);
}
}
#if CONFIG_REF_MV
-static void update_inter_mode_probs(VP10_COMMON *cm, vp10_writer *w,
+static void update_inter_mode_probs(AV1_COMMON *cm, aom_writer *w,
FRAME_COUNTS *counts) {
int i;
for (i = 0; i < NEWMV_MODE_CONTEXTS; ++i)
- vp10_cond_prob_diff_update(w, &cm->fc->newmv_prob[i],
- counts->newmv_mode[i]);
+ av1_cond_prob_diff_update(w, &cm->fc->newmv_prob[i], counts->newmv_mode[i]);
for (i = 0; i < ZEROMV_MODE_CONTEXTS; ++i)
- vp10_cond_prob_diff_update(w, &cm->fc->zeromv_prob[i],
- counts->zeromv_mode[i]);
+ av1_cond_prob_diff_update(w, &cm->fc->zeromv_prob[i],
+ counts->zeromv_mode[i]);
for (i = 0; i < REFMV_MODE_CONTEXTS; ++i)
- vp10_cond_prob_diff_update(w, &cm->fc->refmv_prob[i],
- counts->refmv_mode[i]);
+ av1_cond_prob_diff_update(w, &cm->fc->refmv_prob[i], counts->refmv_mode[i]);
for (i = 0; i < DRL_MODE_CONTEXTS; ++i)
- vp10_cond_prob_diff_update(w, &cm->fc->drl_prob[i], counts->drl_mode[i]);
+ av1_cond_prob_diff_update(w, &cm->fc->drl_prob[i], counts->drl_mode[i]);
#if CONFIG_EXT_INTER
- vp10_cond_prob_diff_update(w, &cm->fc->new2mv_prob, counts->new2mv_mode);
+ av1_cond_prob_diff_update(w, &cm->fc->new2mv_prob, counts->new2mv_mode);
#endif // CONFIG_EXT_INTER
}
#endif
#if CONFIG_EXT_INTER
-static void update_inter_compound_mode_probs(VP10_COMMON *cm, vp10_writer *w) {
- const int savings_thresh = vp10_cost_one(GROUP_DIFF_UPDATE_PROB) -
- vp10_cost_zero(GROUP_DIFF_UPDATE_PROB);
+static void update_inter_compound_mode_probs(AV1_COMMON *cm, aom_writer *w) {
+ const int savings_thresh = av1_cost_one(GROUP_DIFF_UPDATE_PROB) -
+ av1_cost_zero(GROUP_DIFF_UPDATE_PROB);
int i;
int savings = 0;
int do_update = 0;
for (i = 0; i < INTER_MODE_CONTEXTS; ++i) {
savings += prob_diff_update_savings(
- vp10_inter_compound_mode_tree, cm->fc->inter_compound_mode_probs[i],
+ av1_inter_compound_mode_tree, cm->fc->inter_compound_mode_probs[i],
cm->counts.inter_compound_mode[i], INTER_COMPOUND_MODES);
}
do_update = savings > savings_thresh;
- vp10_write(w, do_update, GROUP_DIFF_UPDATE_PROB);
+ aom_write(w, do_update, GROUP_DIFF_UPDATE_PROB);
if (do_update) {
for (i = 0; i < INTER_MODE_CONTEXTS; ++i) {
prob_diff_update(
- vp10_inter_compound_mode_tree, cm->fc->inter_compound_mode_probs[i],
+ av1_inter_compound_mode_tree, cm->fc->inter_compound_mode_probs[i],
cm->counts.inter_compound_mode[i], INTER_COMPOUND_MODES, w);
}
}
}
#endif // CONFIG_EXT_INTER
-static int write_skip(const VP10_COMMON *cm, const MACROBLOCKD *xd,
- int segment_id, const MODE_INFO *mi, vp10_writer *w) {
+static int write_skip(const AV1_COMMON *cm, const MACROBLOCKD *xd,
+ int segment_id, const MODE_INFO *mi, aom_writer *w) {
if (segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP)) {
return 1;
} else {
const int skip = mi->mbmi.skip;
- vp10_write(w, skip, vp10_get_skip_prob(cm, xd));
+ aom_write(w, skip, av1_get_skip_prob(cm, xd));
return skip;
}
}
-static void update_skip_probs(VP10_COMMON *cm, vp10_writer *w,
+static void update_skip_probs(AV1_COMMON *cm, aom_writer *w,
FRAME_COUNTS *counts) {
int k;
for (k = 0; k < SKIP_CONTEXTS; ++k)
- vp10_cond_prob_diff_update(w, &cm->fc->skip_probs[k], counts->skip[k]);
+ av1_cond_prob_diff_update(w, &cm->fc->skip_probs[k], counts->skip[k]);
}
-static void update_switchable_interp_probs(VP10_COMMON *cm, vp10_writer *w,
+static void update_switchable_interp_probs(AV1_COMMON *cm, aom_writer *w,
FRAME_COUNTS *counts) {
int j;
for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j)
- prob_diff_update(vp10_switchable_interp_tree,
+ prob_diff_update(av1_switchable_interp_tree,
cm->fc->switchable_interp_prob[j],
counts->switchable_interp[j], SWITCHABLE_FILTERS, w);
}
#if CONFIG_EXT_TX
-static void update_ext_tx_probs(VP10_COMMON *cm, vp10_writer *w) {
- const int savings_thresh = vp10_cost_one(GROUP_DIFF_UPDATE_PROB) -
- vp10_cost_zero(GROUP_DIFF_UPDATE_PROB);
+static void update_ext_tx_probs(AV1_COMMON *cm, aom_writer *w) {
+ const int savings_thresh = av1_cost_one(GROUP_DIFF_UPDATE_PROB) -
+ av1_cost_zero(GROUP_DIFF_UPDATE_PROB);
int i, j;
int s;
for (s = 1; s < EXT_TX_SETS_INTER; ++s) {
@@ -492,16 +490,16 @@
for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
if (!use_inter_ext_tx_for_txsize[s][i]) continue;
savings += prob_diff_update_savings(
- vp10_ext_tx_inter_tree[s], cm->fc->inter_ext_tx_prob[s][i],
+ av1_ext_tx_inter_tree[s], cm->fc->inter_ext_tx_prob[s][i],
cm->counts.inter_ext_tx[s][i], num_ext_tx_set_inter[s]);
}
do_update = savings > savings_thresh;
- vp10_write(w, do_update, GROUP_DIFF_UPDATE_PROB);
+ aom_write(w, do_update, GROUP_DIFF_UPDATE_PROB);
if (do_update) {
for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
if (!use_inter_ext_tx_for_txsize[s][i]) continue;
prob_diff_update(
- vp10_ext_tx_inter_tree[s], cm->fc->inter_ext_tx_prob[s][i],
+ av1_ext_tx_inter_tree[s], cm->fc->inter_ext_tx_prob[s][i],
cm->counts.inter_ext_tx[s][i], num_ext_tx_set_inter[s], w);
}
}
@@ -514,17 +512,17 @@
if (!use_intra_ext_tx_for_txsize[s][i]) continue;
for (j = 0; j < INTRA_MODES; ++j)
savings += prob_diff_update_savings(
- vp10_ext_tx_intra_tree[s], cm->fc->intra_ext_tx_prob[s][i][j],
+ av1_ext_tx_intra_tree[s], cm->fc->intra_ext_tx_prob[s][i][j],
cm->counts.intra_ext_tx[s][i][j], num_ext_tx_set_intra[s]);
}
do_update = savings > savings_thresh;
- vp10_write(w, do_update, GROUP_DIFF_UPDATE_PROB);
+ aom_write(w, do_update, GROUP_DIFF_UPDATE_PROB);
if (do_update) {
for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
if (!use_intra_ext_tx_for_txsize[s][i]) continue;
for (j = 0; j < INTRA_MODES; ++j)
prob_diff_update(
- vp10_ext_tx_intra_tree[s], cm->fc->intra_ext_tx_prob[s][i][j],
+ av1_ext_tx_intra_tree[s], cm->fc->intra_ext_tx_prob[s][i][j],
cm->counts.intra_ext_tx[s][i][j], num_ext_tx_set_intra[s], w);
}
}
@@ -533,9 +531,9 @@
#else
-static void update_ext_tx_probs(VP10_COMMON *cm, vp10_writer *w) {
- const int savings_thresh = vp10_cost_one(GROUP_DIFF_UPDATE_PROB) -
- vp10_cost_zero(GROUP_DIFF_UPDATE_PROB);
+static void update_ext_tx_probs(AV1_COMMON *cm, aom_writer *w) {
+ const int savings_thresh = av1_cost_one(GROUP_DIFF_UPDATE_PROB) -
+ av1_cost_zero(GROUP_DIFF_UPDATE_PROB);
int i, j;
int savings = 0;
@@ -543,43 +541,43 @@
for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
for (j = 0; j < TX_TYPES; ++j)
savings += prob_diff_update_savings(
- vp10_ext_tx_tree, cm->fc->intra_ext_tx_prob[i][j],
+ av1_ext_tx_tree, cm->fc->intra_ext_tx_prob[i][j],
cm->counts.intra_ext_tx[i][j], TX_TYPES);
}
do_update = savings > savings_thresh;
- vp10_write(w, do_update, GROUP_DIFF_UPDATE_PROB);
+ aom_write(w, do_update, GROUP_DIFF_UPDATE_PROB);
if (do_update) {
for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
for (j = 0; j < TX_TYPES; ++j)
- prob_diff_update(vp10_ext_tx_tree, cm->fc->intra_ext_tx_prob[i][j],
+ prob_diff_update(av1_ext_tx_tree, cm->fc->intra_ext_tx_prob[i][j],
cm->counts.intra_ext_tx[i][j], TX_TYPES, w);
}
}
savings = 0;
for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
savings +=
- prob_diff_update_savings(vp10_ext_tx_tree, cm->fc->inter_ext_tx_prob[i],
+ prob_diff_update_savings(av1_ext_tx_tree, cm->fc->inter_ext_tx_prob[i],
cm->counts.inter_ext_tx[i], TX_TYPES);
}
do_update = savings > savings_thresh;
- vp10_write(w, do_update, GROUP_DIFF_UPDATE_PROB);
+ aom_write(w, do_update, GROUP_DIFF_UPDATE_PROB);
if (do_update) {
for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
- prob_diff_update(vp10_ext_tx_tree, cm->fc->inter_ext_tx_prob[i],
+ prob_diff_update(av1_ext_tx_tree, cm->fc->inter_ext_tx_prob[i],
cm->counts.inter_ext_tx[i], TX_TYPES, w);
}
}
}
#endif // CONFIG_EXT_TX
-static void pack_palette_tokens(vp10_writer *w, const TOKENEXTRA **tp, int n,
+static void pack_palette_tokens(aom_writer *w, const TOKENEXTRA **tp, int n,
int num) {
int i;
const TOKENEXTRA *p = *tp;
for (i = 0; i < num; ++i) {
- vp10_write_token(w, vp10_palette_color_tree[n - 2], p->context_tree,
- &palette_color_encodings[n - 2][p->token]);
+ av1_write_token(w, av1_palette_color_tree[n - 2], p->context_tree,
+ &palette_color_encodings[n - 2][p->token]);
++p;
}
@@ -587,25 +585,25 @@
}
#if CONFIG_SUPERTX
-static void update_supertx_probs(VP10_COMMON *cm, vp10_writer *w) {
- const int savings_thresh = vp10_cost_one(GROUP_DIFF_UPDATE_PROB) -
- vp10_cost_zero(GROUP_DIFF_UPDATE_PROB);
+static void update_supertx_probs(AV1_COMMON *cm, aom_writer *w) {
+ const int savings_thresh = av1_cost_one(GROUP_DIFF_UPDATE_PROB) -
+ av1_cost_zero(GROUP_DIFF_UPDATE_PROB);
int i, j;
int savings = 0;
int do_update = 0;
for (i = 0; i < PARTITION_SUPERTX_CONTEXTS; ++i) {
for (j = 1; j < TX_SIZES; ++j) {
- savings += vp10_cond_prob_diff_update_savings(&cm->fc->supertx_prob[i][j],
- cm->counts.supertx[i][j]);
+ savings += av1_cond_prob_diff_update_savings(&cm->fc->supertx_prob[i][j],
+ cm->counts.supertx[i][j]);
}
}
do_update = savings > savings_thresh;
- vp10_write(w, do_update, GROUP_DIFF_UPDATE_PROB);
+ aom_write(w, do_update, GROUP_DIFF_UPDATE_PROB);
if (do_update) {
for (i = 0; i < PARTITION_SUPERTX_CONTEXTS; ++i) {
for (j = 1; j < TX_SIZES; ++j) {
- vp10_cond_prob_diff_update(w, &cm->fc->supertx_prob[i][j],
- cm->counts.supertx[i][j]);
+ av1_cond_prob_diff_update(w, &cm->fc->supertx_prob[i][j],
+ cm->counts.supertx[i][j]);
}
}
}
@@ -613,9 +611,9 @@
#endif // CONFIG_SUPERTX
#if !CONFIG_ANS
-static void pack_mb_tokens(vp10_writer *w, const TOKENEXTRA **tp,
+static void pack_mb_tokens(aom_writer *w, const TOKENEXTRA **tp,
const TOKENEXTRA *const stop,
- vpx_bit_depth_t bit_depth, const TX_SIZE tx) {
+ aom_bit_depth_t bit_depth, const TX_SIZE tx) {
const TOKENEXTRA *p = *tp;
#if CONFIG_VAR_TX
int count = 0;
@@ -624,39 +622,39 @@
while (p < stop && p->token != EOSB_TOKEN) {
const int t = p->token;
- const struct vp10_token *const a = &vp10_coef_encodings[t];
+ const struct av1_token *const a = &av1_coef_encodings[t];
int v = a->value;
int n = a->len;
-#if CONFIG_VP9_HIGHBITDEPTH
- const vp10_extra_bit *b;
- if (bit_depth == VPX_BITS_12)
- b = &vp10_extra_bits_high12[t];
- else if (bit_depth == VPX_BITS_10)
- b = &vp10_extra_bits_high10[t];
+#if CONFIG_AOM_HIGHBITDEPTH
+ const av1_extra_bit *b;
+ if (bit_depth == AOM_BITS_12)
+ b = &av1_extra_bits_high12[t];
+ else if (bit_depth == AOM_BITS_10)
+ b = &av1_extra_bits_high10[t];
else
- b = &vp10_extra_bits[t];
+ b = &av1_extra_bits[t];
#else
- const vp10_extra_bit *const b = &vp10_extra_bits[t];
+ const av1_extra_bit *const b = &av1_extra_bits[t];
(void)bit_depth;
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
/* skip one or two nodes */
if (p->skip_eob_node)
n -= p->skip_eob_node;
else
- vp10_write(w, t != EOB_TOKEN, p->context_tree[0]);
+ aom_write(w, t != EOB_TOKEN, p->context_tree[0]);
if (t != EOB_TOKEN) {
- vp10_write(w, t != ZERO_TOKEN, p->context_tree[1]);
+ aom_write(w, t != ZERO_TOKEN, p->context_tree[1]);
if (t != ZERO_TOKEN) {
- vp10_write(w, t != ONE_TOKEN, p->context_tree[2]);
+ aom_write(w, t != ONE_TOKEN, p->context_tree[2]);
if (t != ONE_TOKEN) {
int len = UNCONSTRAINED_NODES - p->skip_eob_node;
- vp10_write_tree(w, vp10_coef_con_tree,
- vp10_pareto8_full[p->context_tree[PIVOT_NODE] - 1], v,
- n - len, 0);
+ av1_write_tree(w, av1_coef_con_tree,
+ av1_pareto8_full[p->context_tree[PIVOT_NODE] - 1], v,
+ n - len, 0);
}
}
}
@@ -679,13 +677,13 @@
skip_bits--;
assert(!bb);
} else {
- vp10_write(w, bb, pb[i >> 1]);
+ aom_write(w, bb, pb[i >> 1]);
}
i = b->tree[i + bb];
} while (n);
}
- vp10_write_bit(w, e & 1);
+ aom_write_bit(w, e & 1);
}
++p;
@@ -702,7 +700,7 @@
// coder.
static void pack_mb_tokens(struct BufAnsCoder *ans, const TOKENEXTRA **tp,
const TOKENEXTRA *const stop,
- vpx_bit_depth_t bit_depth, const TX_SIZE tx) {
+ aom_bit_depth_t bit_depth, const TX_SIZE tx) {
const TOKENEXTRA *p = *tp;
#if CONFIG_VAR_TX
int count = 0;
@@ -711,18 +709,18 @@
while (p < stop && p->token != EOSB_TOKEN) {
const int t = p->token;
-#if CONFIG_VP9_HIGHBITDEPTH
- const vp10_extra_bit *b;
- if (bit_depth == VPX_BITS_12)
- b = &vp10_extra_bits_high12[t];
- else if (bit_depth == VPX_BITS_10)
- b = &vp10_extra_bits_high10[t];
+#if CONFIG_AOM_HIGHBITDEPTH
+ const av1_extra_bit *b;
+ if (bit_depth == AOM_BITS_12)
+ b = &av1_extra_bits_high12[t];
+ else if (bit_depth == AOM_BITS_10)
+ b = &av1_extra_bits_high10[t];
else
- b = &vp10_extra_bits[t];
+ b = &av1_extra_bits[t];
#else
- const vp10_extra_bit *const b = &vp10_extra_bits[t];
+ const av1_extra_bit *const b = &av1_extra_bits[t];
(void)bit_depth;
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
/* skip one or two nodes */
if (!p->skip_eob_node)
@@ -776,10 +774,10 @@
#endif // !CONFIG_ANS
#if CONFIG_VAR_TX
-static void pack_txb_tokens(vp10_writer *w, const TOKENEXTRA **tp,
+static void pack_txb_tokens(aom_writer *w, const TOKENEXTRA **tp,
const TOKENEXTRA *const tok_end, MACROBLOCKD *xd,
MB_MODE_INFO *mbmi, int plane,
- BLOCK_SIZE plane_bsize, vpx_bit_depth_t bit_depth,
+ BLOCK_SIZE plane_bsize, aom_bit_depth_t bit_depth,
int block, int blk_row, int blk_col,
TX_SIZE tx_size) {
const struct macroblockd_plane *const pd = &xd->plane[plane];
@@ -824,16 +822,16 @@
}
#endif
-static void write_segment_id(vp10_writer *w, const struct segmentation *seg,
+static void write_segment_id(aom_writer *w, const struct segmentation *seg,
const struct segmentation_probs *segp,
int segment_id) {
if (seg->enabled && seg->update_map)
- vp10_write_tree(w, vp10_segment_tree, segp->tree_probs, segment_id, 3, 0);
+ av1_write_tree(w, av1_segment_tree, segp->tree_probs, segment_id, 3, 0);
}
// This function encodes the reference frame
-static void write_ref_frames(const VP10_COMMON *cm, const MACROBLOCKD *xd,
- vp10_writer *w) {
+static void write_ref_frames(const AV1_COMMON *cm, const MACROBLOCKD *xd,
+ aom_writer *w) {
const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
const int is_compound = has_second_ref(mbmi);
const int segment_id = mbmi->segment_id;
@@ -848,7 +846,7 @@
// does the feature use compound prediction or not
// (if not specified at the frame/segment level)
if (cm->reference_mode == REFERENCE_MODE_SELECT) {
- vp10_write(w, is_compound, vp10_get_reference_mode_prob(cm, xd));
+ aom_write(w, is_compound, av1_get_reference_mode_prob(cm, xd));
} else {
assert((!is_compound) == (cm->reference_mode == SINGLE_REFERENCE));
}
@@ -862,47 +860,47 @@
const int bit = mbmi->ref_frame[0] == GOLDEN_FRAME;
#endif // CONFIG_EXT_REFS
- vp10_write(w, bit, vp10_get_pred_prob_comp_ref_p(cm, xd));
+ aom_write(w, bit, av1_get_pred_prob_comp_ref_p(cm, xd));
#if CONFIG_EXT_REFS
if (!bit) {
const int bit1 = mbmi->ref_frame[0] == LAST_FRAME;
- vp10_write(w, bit1, vp10_get_pred_prob_comp_ref_p1(cm, xd));
+ aom_write(w, bit1, av1_get_pred_prob_comp_ref_p1(cm, xd));
} else {
const int bit2 = mbmi->ref_frame[0] == GOLDEN_FRAME;
- vp10_write(w, bit2, vp10_get_pred_prob_comp_ref_p2(cm, xd));
+ aom_write(w, bit2, av1_get_pred_prob_comp_ref_p2(cm, xd));
}
- vp10_write(w, bit_bwd, vp10_get_pred_prob_comp_bwdref_p(cm, xd));
+ aom_write(w, bit_bwd, av1_get_pred_prob_comp_bwdref_p(cm, xd));
#endif // CONFIG_EXT_REFS
} else {
#if CONFIG_EXT_REFS
const int bit0 = (mbmi->ref_frame[0] == ALTREF_FRAME ||
mbmi->ref_frame[0] == BWDREF_FRAME);
- vp10_write(w, bit0, vp10_get_pred_prob_single_ref_p1(cm, xd));
+ aom_write(w, bit0, av1_get_pred_prob_single_ref_p1(cm, xd));
if (bit0) {
const int bit1 = mbmi->ref_frame[0] == ALTREF_FRAME;
- vp10_write(w, bit1, vp10_get_pred_prob_single_ref_p2(cm, xd));
+ aom_write(w, bit1, av1_get_pred_prob_single_ref_p2(cm, xd));
} else {
const int bit2 = (mbmi->ref_frame[0] == LAST3_FRAME ||
mbmi->ref_frame[0] == GOLDEN_FRAME);
- vp10_write(w, bit2, vp10_get_pred_prob_single_ref_p3(cm, xd));
+ aom_write(w, bit2, av1_get_pred_prob_single_ref_p3(cm, xd));
if (!bit2) {
const int bit3 = mbmi->ref_frame[0] != LAST_FRAME;
- vp10_write(w, bit3, vp10_get_pred_prob_single_ref_p4(cm, xd));
+ aom_write(w, bit3, av1_get_pred_prob_single_ref_p4(cm, xd));
} else {
const int bit4 = mbmi->ref_frame[0] != LAST3_FRAME;
- vp10_write(w, bit4, vp10_get_pred_prob_single_ref_p5(cm, xd));
+ aom_write(w, bit4, av1_get_pred_prob_single_ref_p5(cm, xd));
}
}
#else // CONFIG_EXT_REFS
const int bit0 = mbmi->ref_frame[0] != LAST_FRAME;
- vp10_write(w, bit0, vp10_get_pred_prob_single_ref_p1(cm, xd));
+ aom_write(w, bit0, av1_get_pred_prob_single_ref_p1(cm, xd));
if (bit0) {
const int bit1 = mbmi->ref_frame[0] != GOLDEN_FRAME;
- vp10_write(w, bit1, vp10_get_pred_prob_single_ref_p2(cm, xd));
+ aom_write(w, bit1, av1_get_pred_prob_single_ref_p2(cm, xd));
}
#endif // CONFIG_EXT_REFS
}
@@ -910,15 +908,15 @@
}
#if CONFIG_EXT_INTRA
-static void write_ext_intra_mode_info(const VP10_COMMON *const cm,
+static void write_ext_intra_mode_info(const AV1_COMMON *const cm,
const MB_MODE_INFO *const mbmi,
- vp10_writer *w) {
+ aom_writer *w) {
#if !ALLOW_FILTER_INTRA_MODES
return;
#endif
if (mbmi->mode == DC_PRED && mbmi->palette_mode_info.palette_size[0] == 0) {
- vp10_write(w, mbmi->ext_intra_mode_info.use_ext_intra_mode[0],
- cm->fc->ext_intra_probs[0]);
+ aom_write(w, mbmi->ext_intra_mode_info.use_ext_intra_mode[0],
+ cm->fc->ext_intra_probs[0]);
if (mbmi->ext_intra_mode_info.use_ext_intra_mode[0]) {
EXT_INTRA_MODE mode = mbmi->ext_intra_mode_info.ext_intra_mode[0];
write_uniform(w, FILTER_INTRA_MODES, mode);
@@ -927,8 +925,8 @@
if (mbmi->uv_mode == DC_PRED &&
mbmi->palette_mode_info.palette_size[1] == 0) {
- vp10_write(w, mbmi->ext_intra_mode_info.use_ext_intra_mode[1],
- cm->fc->ext_intra_probs[1]);
+ aom_write(w, mbmi->ext_intra_mode_info.use_ext_intra_mode[1],
+ cm->fc->ext_intra_probs[1]);
if (mbmi->ext_intra_mode_info.use_ext_intra_mode[1]) {
EXT_INTRA_MODE mode = mbmi->ext_intra_mode_info.ext_intra_mode[1];
write_uniform(w, FILTER_INTRA_MODES, mode);
@@ -936,11 +934,11 @@
}
}
-static void write_intra_angle_info(const VP10_COMMON *cm, const MACROBLOCKD *xd,
- vp10_writer *w) {
+static void write_intra_angle_info(const AV1_COMMON *cm, const MACROBLOCKD *xd,
+ aom_writer *w) {
const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
const BLOCK_SIZE bsize = mbmi->sb_type;
- const int intra_filter_ctx = vp10_get_pred_context_intra_interp(xd);
+ const int intra_filter_ctx = av1_get_pred_context_intra_interp(xd);
int p_angle;
if (bsize < BLOCK_8X8) return;
@@ -949,10 +947,10 @@
write_uniform(w, 2 * MAX_ANGLE_DELTAS + 1,
MAX_ANGLE_DELTAS + mbmi->angle_delta[0]);
p_angle = mode_to_angle_map[mbmi->mode] + mbmi->angle_delta[0] * ANGLE_STEP;
- if (vp10_is_intra_filter_switchable(p_angle)) {
- vp10_write_token(w, vp10_intra_filter_tree,
- cm->fc->intra_filter_probs[intra_filter_ctx],
- &intra_filter_encodings[mbmi->intra_filter]);
+ if (av1_is_intra_filter_switchable(p_angle)) {
+ av1_write_token(w, av1_intra_filter_tree,
+ cm->fc->intra_filter_probs[intra_filter_ctx],
+ &intra_filter_encodings[mbmi->intra_filter]);
}
}
@@ -963,10 +961,9 @@
}
#endif // CONFIG_EXT_INTRA
-static void write_switchable_interp_filter(VP10_COMP *cpi,
- const MACROBLOCKD *xd,
- vp10_writer *w) {
- VP10_COMMON *const cm = &cpi->common;
+static void write_switchable_interp_filter(AV1_COMP *cpi, const MACROBLOCKD *xd,
+ aom_writer *w) {
+ AV1_COMMON *const cm = &cpi->common;
const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
#if CONFIG_DUAL_FILTER
int dir;
@@ -974,12 +971,12 @@
if (cm->interp_filter == SWITCHABLE) {
#if CONFIG_EXT_INTERP
#if CONFIG_DUAL_FILTER
- if (!vp10_is_interp_needed(xd)) {
+ if (!av1_is_interp_needed(xd)) {
assert(mbmi->interp_filter[0] == EIGHTTAP_REGULAR);
return;
}
#else
- if (!vp10_is_interp_needed(xd)) {
+ if (!av1_is_interp_needed(xd)) {
#if CONFIG_DUAL_FILTER
assert(mbmi->interp_filter[0] == EIGHTTAP_REGULAR);
assert(mbmi->interp_filter[1] == EIGHTTAP_REGULAR);
@@ -995,28 +992,27 @@
if (has_subpel_mv_component(xd->mi[0], xd, dir) ||
(mbmi->ref_frame[1] > INTRA_FRAME &&
has_subpel_mv_component(xd->mi[0], xd, dir + 2))) {
- const int ctx = vp10_get_pred_context_switchable_interp(xd, dir);
- vp10_write_token(
- w, vp10_switchable_interp_tree, cm->fc->switchable_interp_prob[ctx],
- &switchable_interp_encodings[mbmi->interp_filter[dir]]);
+ const int ctx = av1_get_pred_context_switchable_interp(xd, dir);
+ av1_write_token(w, av1_switchable_interp_tree,
+ cm->fc->switchable_interp_prob[ctx],
+ &switchable_interp_encodings[mbmi->interp_filter[dir]]);
++cpi->interp_filter_selected[0][mbmi->interp_filter[dir]];
}
}
#else
{
- const int ctx = vp10_get_pred_context_switchable_interp(xd);
- vp10_write_token(w, vp10_switchable_interp_tree,
- cm->fc->switchable_interp_prob[ctx],
- &switchable_interp_encodings[mbmi->interp_filter]);
+ const int ctx = av1_get_pred_context_switchable_interp(xd);
+ av1_write_token(w, av1_switchable_interp_tree,
+ cm->fc->switchable_interp_prob[ctx],
+ &switchable_interp_encodings[mbmi->interp_filter]);
++cpi->interp_filter_selected[0][mbmi->interp_filter];
}
#endif
}
}
-static void write_palette_mode_info(const VP10_COMMON *cm,
- const MACROBLOCKD *xd,
- const MODE_INFO *const mi, vp10_writer *w) {
+static void write_palette_mode_info(const AV1_COMMON *cm, const MACROBLOCKD *xd,
+ const MODE_INFO *const mi, aom_writer *w) {
const MB_MODE_INFO *const mbmi = &mi->mbmi;
const MODE_INFO *const above_mi = xd->above_mi;
const MODE_INFO *const left_mi = xd->left_mi;
@@ -1031,44 +1027,43 @@
palette_ctx += (above_mi->mbmi.palette_mode_info.palette_size[0] > 0);
if (left_mi)
palette_ctx += (left_mi->mbmi.palette_mode_info.palette_size[0] > 0);
- vp10_write(
- w, n > 0,
- vp10_default_palette_y_mode_prob[bsize - BLOCK_8X8][palette_ctx]);
+ aom_write(w, n > 0,
+ av1_default_palette_y_mode_prob[bsize - BLOCK_8X8][palette_ctx]);
if (n > 0) {
- vp10_write_token(w, vp10_palette_size_tree,
- vp10_default_palette_y_size_prob[bsize - BLOCK_8X8],
- &palette_size_encodings[n - 2]);
+ av1_write_token(w, av1_palette_size_tree,
+ av1_default_palette_y_size_prob[bsize - BLOCK_8X8],
+ &palette_size_encodings[n - 2]);
for (i = 0; i < n; ++i)
- vp10_write_literal(w, pmi->palette_colors[i], cm->bit_depth);
+ aom_write_literal(w, pmi->palette_colors[i], cm->bit_depth);
write_uniform(w, n, pmi->palette_first_color_idx[0]);
}
}
if (mbmi->uv_mode == DC_PRED) {
n = pmi->palette_size[1];
- vp10_write(w, n > 0,
- vp10_default_palette_uv_mode_prob[pmi->palette_size[0] > 0]);
+ aom_write(w, n > 0,
+ av1_default_palette_uv_mode_prob[pmi->palette_size[0] > 0]);
if (n > 0) {
- vp10_write_token(w, vp10_palette_size_tree,
- vp10_default_palette_uv_size_prob[bsize - BLOCK_8X8],
- &palette_size_encodings[n - 2]);
+ av1_write_token(w, av1_palette_size_tree,
+ av1_default_palette_uv_size_prob[bsize - BLOCK_8X8],
+ &palette_size_encodings[n - 2]);
for (i = 0; i < n; ++i) {
- vp10_write_literal(w, pmi->palette_colors[PALETTE_MAX_SIZE + i],
- cm->bit_depth);
- vp10_write_literal(w, pmi->palette_colors[2 * PALETTE_MAX_SIZE + i],
- cm->bit_depth);
+ aom_write_literal(w, pmi->palette_colors[PALETTE_MAX_SIZE + i],
+ cm->bit_depth);
+ aom_write_literal(w, pmi->palette_colors[2 * PALETTE_MAX_SIZE + i],
+ cm->bit_depth);
}
write_uniform(w, n, pmi->palette_first_color_idx[1]);
}
}
}
-static void pack_inter_mode_mvs(VP10_COMP *cpi, const MODE_INFO *mi,
+static void pack_inter_mode_mvs(AV1_COMP *cpi, const MODE_INFO *mi,
#if CONFIG_SUPERTX
int supertx_enabled,
#endif
- vp10_writer *w) {
- VP10_COMMON *const cm = &cpi->common;
+ aom_writer *w) {
+ AV1_COMMON *const cm = &cpi->common;
#if !CONFIG_REF_MV
const nmv_context *nmvc = &cm->fc->nmvc;
#endif
@@ -1089,8 +1084,8 @@
if (seg->update_map) {
if (seg->temporal_update) {
const int pred_flag = mbmi->seg_id_predicted;
- vpx_prob pred_prob = vp10_get_pred_prob_seg_id(segp, xd);
- vp10_write(w, pred_flag, pred_prob);
+ aom_prob pred_prob = av1_get_pred_prob_seg_id(segp, xd);
+ aom_write(w, pred_flag, pred_prob);
if (!pred_flag) write_segment_id(w, seg, segp, segment_id);
} else {
write_segment_id(w, seg, segp, segment_id);
@@ -1110,7 +1105,7 @@
if (!supertx_enabled)
#endif // CONFIG_SUPERTX
if (!segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME))
- vp10_write(w, is_inter, vp10_get_intra_inter_prob(cm, xd));
+ aom_write(w, is_inter, av1_get_intra_inter_prob(cm, xd));
if (bsize >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT &&
#if CONFIG_SUPERTX
@@ -1172,8 +1167,8 @@
mode_ctx = mbmi_ext->compound_mode_context[mbmi->ref_frame[0]];
else
#endif // CONFIG_EXT_INTER
- mode_ctx = vp10_mode_context_analyzer(mbmi_ext->mode_context,
- mbmi->ref_frame, bsize, -1);
+ mode_ctx = av1_mode_context_analyzer(mbmi_ext->mode_context,
+ mbmi->ref_frame, bsize, -1);
#endif
// If segment skip is not enabled code the mode.
@@ -1213,8 +1208,8 @@
#if CONFIG_EXT_INTER
if (!is_compound)
#endif // CONFIG_EXT_INTER
- mode_ctx = vp10_mode_context_analyzer(mbmi_ext->mode_context,
- mbmi->ref_frame, bsize, j);
+ mode_ctx = av1_mode_context_analyzer(mbmi_ext->mode_context,
+ mbmi->ref_frame, bsize, j);
#endif
#if CONFIG_EXT_INTER
if (is_inter_compound_mode(b_mode))
@@ -1236,53 +1231,53 @@
for (ref = 0; ref < 1 + is_compound; ++ref) {
#if CONFIG_REF_MV
int nmv_ctx =
- vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[ref]],
- mbmi_ext->ref_mv_stack[mbmi->ref_frame[ref]]);
+ av1_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[ref]],
+ mbmi_ext->ref_mv_stack[mbmi->ref_frame[ref]]);
const nmv_context *nmvc = &cm->fc->nmvc[nmv_ctx];
#endif
- vp10_encode_mv(cpi, w, &mi->bmi[j].as_mv[ref].as_mv,
+ av1_encode_mv(cpi, w, &mi->bmi[j].as_mv[ref].as_mv,
#if CONFIG_EXT_INTER
- &mi->bmi[j].ref_mv[ref].as_mv,
+ &mi->bmi[j].ref_mv[ref].as_mv,
#if CONFIG_REF_MV
- is_compound,
+ is_compound,
#endif
#else
#if CONFIG_REF_MV
- &mi->bmi[j].pred_mv_s8[ref].as_mv, is_compound,
+ &mi->bmi[j].pred_mv_s8[ref].as_mv, is_compound,
#else
- &mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][0].as_mv,
+ &mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][0].as_mv,
#endif // CONFIG_REF_MV
#endif // CONFIG_EXT_INTER
- nmvc, allow_hp);
+ nmvc, allow_hp);
}
}
#if CONFIG_EXT_INTER
else if (b_mode == NEAREST_NEWMV || b_mode == NEAR_NEWMV) {
#if CONFIG_REF_MV
int nmv_ctx =
- vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[1]],
- mbmi_ext->ref_mv_stack[mbmi->ref_frame[1]]);
+ av1_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[1]],
+ mbmi_ext->ref_mv_stack[mbmi->ref_frame[1]]);
const nmv_context *nmvc = &cm->fc->nmvc[nmv_ctx];
#endif
- vp10_encode_mv(cpi, w, &mi->bmi[j].as_mv[1].as_mv,
- &mi->bmi[j].ref_mv[1].as_mv,
+ av1_encode_mv(cpi, w, &mi->bmi[j].as_mv[1].as_mv,
+ &mi->bmi[j].ref_mv[1].as_mv,
#if CONFIG_REF_MV
- is_compound,
+ is_compound,
#endif
- nmvc, allow_hp);
+ nmvc, allow_hp);
} else if (b_mode == NEW_NEARESTMV || b_mode == NEW_NEARMV) {
#if CONFIG_REF_MV
int nmv_ctx =
- vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[0]],
- mbmi_ext->ref_mv_stack[mbmi->ref_frame[0]]);
+ av1_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[0]],
+ mbmi_ext->ref_mv_stack[mbmi->ref_frame[0]]);
const nmv_context *nmvc = &cm->fc->nmvc[nmv_ctx];
#endif
- vp10_encode_mv(cpi, w, &mi->bmi[j].as_mv[0].as_mv,
- &mi->bmi[j].ref_mv[0].as_mv,
+ av1_encode_mv(cpi, w, &mi->bmi[j].as_mv[0].as_mv,
+ &mi->bmi[j].ref_mv[0].as_mv,
#if CONFIG_REF_MV
- is_compound,
+ is_compound,
#endif
- nmvc, allow_hp);
+ nmvc, allow_hp);
}
#endif // CONFIG_EXT_INTER
}
@@ -1297,52 +1292,52 @@
for (ref = 0; ref < 1 + is_compound; ++ref) {
#if CONFIG_REF_MV
int nmv_ctx =
- vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[ref]],
- mbmi_ext->ref_mv_stack[mbmi->ref_frame[ref]]);
+ av1_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[ref]],
+ mbmi_ext->ref_mv_stack[mbmi->ref_frame[ref]]);
const nmv_context *nmvc = &cm->fc->nmvc[nmv_ctx];
#endif
ref_mv = mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][0];
#if CONFIG_EXT_INTER
if (mode == NEWFROMNEARMV)
- vp10_encode_mv(cpi, w, &mbmi->mv[ref].as_mv,
- &mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][1].as_mv,
+ av1_encode_mv(cpi, w, &mbmi->mv[ref].as_mv,
+ &mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][1].as_mv,
#if CONFIG_REF_MV
- is_compound,
+ is_compound,
#endif
- nmvc, allow_hp);
+ nmvc, allow_hp);
else
#endif // CONFIG_EXT_INTER
- vp10_encode_mv(cpi, w, &mbmi->mv[ref].as_mv, &ref_mv.as_mv,
+ av1_encode_mv(cpi, w, &mbmi->mv[ref].as_mv, &ref_mv.as_mv,
#if CONFIG_REF_MV
- is_compound,
+ is_compound,
#endif
- nmvc, allow_hp);
+ nmvc, allow_hp);
}
#if CONFIG_EXT_INTER
} else if (mode == NEAREST_NEWMV || mode == NEAR_NEWMV) {
#if CONFIG_REF_MV
- int nmv_ctx = vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[1]],
- mbmi_ext->ref_mv_stack[mbmi->ref_frame[1]]);
+ int nmv_ctx = av1_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[1]],
+ mbmi_ext->ref_mv_stack[mbmi->ref_frame[1]]);
const nmv_context *nmvc = &cm->fc->nmvc[nmv_ctx];
#endif
- vp10_encode_mv(cpi, w, &mbmi->mv[1].as_mv,
- &mbmi_ext->ref_mvs[mbmi->ref_frame[1]][0].as_mv,
+ av1_encode_mv(cpi, w, &mbmi->mv[1].as_mv,
+ &mbmi_ext->ref_mvs[mbmi->ref_frame[1]][0].as_mv,
#if CONFIG_REF_MV
- is_compound,
+ is_compound,
#endif
- nmvc, allow_hp);
+ nmvc, allow_hp);
} else if (mode == NEW_NEARESTMV || mode == NEW_NEARMV) {
#if CONFIG_REF_MV
- int nmv_ctx = vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[0]],
- mbmi_ext->ref_mv_stack[mbmi->ref_frame[0]]);
+ int nmv_ctx = av1_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[0]],
+ mbmi_ext->ref_mv_stack[mbmi->ref_frame[0]]);
const nmv_context *nmvc = &cm->fc->nmvc[nmv_ctx];
#endif
- vp10_encode_mv(cpi, w, &mbmi->mv[0].as_mv,
- &mbmi_ext->ref_mvs[mbmi->ref_frame[0]][0].as_mv,
+ av1_encode_mv(cpi, w, &mbmi->mv[0].as_mv,
+ &mbmi_ext->ref_mvs[mbmi->ref_frame[0]][0].as_mv,
#if CONFIG_REF_MV
- is_compound,
+ is_compound,
#endif
- nmvc, allow_hp);
+ nmvc, allow_hp);
#endif // CONFIG_EXT_INTER
}
}
@@ -1355,16 +1350,16 @@
is_interintra_allowed(mbmi)) {
const int interintra = mbmi->ref_frame[1] == INTRA_FRAME;
const int bsize_group = size_group_lookup[bsize];
- vp10_write(w, interintra, cm->fc->interintra_prob[bsize_group]);
+ aom_write(w, interintra, cm->fc->interintra_prob[bsize_group]);
if (interintra) {
write_interintra_mode(w, mbmi->interintra_mode,
cm->fc->interintra_mode_prob[bsize_group]);
if (is_interintra_wedge_used(bsize)) {
- vp10_write(w, mbmi->use_wedge_interintra,
- cm->fc->wedge_interintra_prob[bsize]);
+ aom_write(w, mbmi->use_wedge_interintra,
+ cm->fc->wedge_interintra_prob[bsize]);
if (mbmi->use_wedge_interintra) {
- vp10_write_literal(w, mbmi->interintra_wedge_index,
- get_wedge_bits_lookup(bsize));
+ aom_write_literal(w, mbmi->interintra_wedge_index,
+ get_wedge_bits_lookup(bsize));
assert(mbmi->interintra_wedge_sign == 0);
}
}
@@ -1384,8 +1379,8 @@
// is not active, and assume SIMPLE_TRANSLATION in the decoder if
// it is active.
assert(mbmi->motion_variation < MOTION_VARIATIONS);
- vp10_write_token(w, vp10_motvar_tree, cm->fc->motvar_prob[bsize],
- &motvar_encodings[mbmi->motion_variation]);
+ av1_write_token(w, av1_motvar_tree, cm->fc->motvar_prob[bsize],
+ &motvar_encodings[mbmi->motion_variation]);
}
#endif // CONFIG_OBMC || CONFIG_WARPED_MOTION
@@ -1397,12 +1392,12 @@
mbmi->motion_variation != SIMPLE_TRANSLATION) &&
#endif // CONFIG_OBMC
is_interinter_wedge_used(bsize)) {
- vp10_write(w, mbmi->use_wedge_interinter,
- cm->fc->wedge_interinter_prob[bsize]);
+ aom_write(w, mbmi->use_wedge_interinter,
+ cm->fc->wedge_interinter_prob[bsize]);
if (mbmi->use_wedge_interinter) {
- vp10_write_literal(w, mbmi->interinter_wedge_index,
- get_wedge_bits_lookup(bsize));
- vp10_write_bit(w, mbmi->interinter_wedge_sign);
+ aom_write_literal(w, mbmi->interinter_wedge_index,
+ get_wedge_bits_lookup(bsize));
+ aom_write_bit(w, mbmi->interinter_wedge_sign);
}
}
#endif // CONFIG_EXT_INTER
@@ -1424,14 +1419,14 @@
if (is_inter) {
assert(ext_tx_used_inter[eset][mbmi->tx_type]);
if (eset > 0)
- vp10_write_token(
- w, vp10_ext_tx_inter_tree[eset],
+ av1_write_token(
+ w, av1_ext_tx_inter_tree[eset],
cm->fc->inter_ext_tx_prob[eset][txsize_sqr_map[mbmi->tx_size]],
&ext_tx_inter_encodings[eset][mbmi->tx_type]);
} else if (ALLOW_INTRA_EXT_TX) {
if (eset > 0)
- vp10_write_token(
- w, vp10_ext_tx_intra_tree[eset],
+ av1_write_token(
+ w, av1_ext_tx_intra_tree[eset],
cm->fc->intra_ext_tx_prob[eset][mbmi->tx_size][mbmi->mode],
&ext_tx_intra_encodings[eset][mbmi->tx_type]);
}
@@ -1443,12 +1438,12 @@
#endif // CONFIG_SUPERTX
!segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
if (is_inter) {
- vp10_write_token(w, vp10_ext_tx_tree,
- cm->fc->inter_ext_tx_prob[mbmi->tx_size],
- &ext_tx_encodings[mbmi->tx_type]);
+ av1_write_token(w, av1_ext_tx_tree,
+ cm->fc->inter_ext_tx_prob[mbmi->tx_size],
+ &ext_tx_encodings[mbmi->tx_type]);
} else {
- vp10_write_token(
- w, vp10_ext_tx_tree,
+ av1_write_token(
+ w, av1_ext_tx_tree,
cm->fc->intra_ext_tx_prob
[mbmi->tx_size][intra_mode_to_tx_type_context[mbmi->mode]],
&ext_tx_encodings[mbmi->tx_type]);
@@ -1465,8 +1460,8 @@
}
}
-static void write_mb_modes_kf(const VP10_COMMON *cm, const MACROBLOCKD *xd,
- MODE_INFO **mi_8x8, vp10_writer *w) {
+static void write_mb_modes_kf(const AV1_COMMON *cm, const MACROBLOCKD *xd,
+ MODE_INFO **mi_8x8, aom_writer *w) {
const struct segmentation *const seg = &cm->seg;
const struct segmentation_probs *const segp = &cm->fc->seg;
const MODE_INFO *const mi = mi_8x8[0];
@@ -1518,16 +1513,16 @@
ALLOW_INTRA_EXT_TX) {
int eset = get_ext_tx_set(mbmi->tx_size, bsize, 0);
if (eset > 0)
- vp10_write_token(
- w, vp10_ext_tx_intra_tree[eset],
+ av1_write_token(
+ w, av1_ext_tx_intra_tree[eset],
cm->fc->intra_ext_tx_prob[eset][mbmi->tx_size][mbmi->mode],
&ext_tx_intra_encodings[eset][mbmi->tx_type]);
}
#else
if (mbmi->tx_size < TX_32X32 && cm->base_qindex > 0 && !mbmi->skip &&
!segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
- vp10_write_token(
- w, vp10_ext_tx_tree,
+ av1_write_token(
+ w, av1_ext_tx_tree,
cm->fc->intra_ext_tx_prob[mbmi->tx_size]
[intra_mode_to_tx_type_context[mbmi->mode]],
&ext_tx_encodings[mbmi->tx_type]);
@@ -1546,14 +1541,14 @@
write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col)
#endif // CONFIG_ANS && CONFIG_SUPERTX
-static void write_modes_b(VP10_COMP *cpi, const TileInfo *const tile,
- vp10_writer *w, const TOKENEXTRA **tok,
+static void write_modes_b(AV1_COMP *cpi, const TileInfo *const tile,
+ aom_writer *w, const TOKENEXTRA **tok,
const TOKENEXTRA *const tok_end,
#if CONFIG_SUPERTX
int supertx_enabled,
#endif
int mi_row, int mi_col) {
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
MODE_INFO *m;
int plane;
@@ -1584,8 +1579,8 @@
xd->left_txfm_context_buffer + (mi_row & MAX_MIB_MASK);
#endif
#if CONFIG_EXT_INTERP
- // vp10_is_interp_needed needs the ref frame buffers set up to look
- // up if they are scaled. vp10_is_interp_needed is in turn needed by
+ // av1_is_interp_needed needs the ref frame buffers set up to look
+ // up if they are scaled. av1_is_interp_needed is in turn needed by
// write_switchable_interp_filter, which is called by pack_inter_mode_mvs.
set_ref_ptrs(cm, xd, m->mbmi.ref_frame[0], m->mbmi.ref_frame[1]);
#endif // CONFIG_EXT_INTERP
@@ -1645,7 +1640,7 @@
MB_MODE_INFO *mbmi = &m->mbmi;
BLOCK_SIZE bsize = mbmi->sb_type;
const BLOCK_SIZE plane_bsize =
- get_plane_block_size(VPXMAX(bsize, BLOCK_8X8), pd);
+ get_plane_block_size(AOMMAX(bsize, BLOCK_8X8), pd);
const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize];
const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize];
@@ -1692,31 +1687,31 @@
}
}
-static void write_partition(const VP10_COMMON *const cm,
+static void write_partition(const AV1_COMMON *const cm,
const MACROBLOCKD *const xd, int hbs, int mi_row,
int mi_col, PARTITION_TYPE p, BLOCK_SIZE bsize,
- vp10_writer *w) {
+ aom_writer *w) {
const int ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
- const vpx_prob *const probs = cm->fc->partition_prob[ctx];
+ const aom_prob *const probs = cm->fc->partition_prob[ctx];
const int has_rows = (mi_row + hbs) < cm->mi_rows;
const int has_cols = (mi_col + hbs) < cm->mi_cols;
if (has_rows && has_cols) {
#if CONFIG_EXT_PARTITION_TYPES
if (bsize <= BLOCK_8X8)
- vp10_write_token(w, vp10_partition_tree, probs, &partition_encodings[p]);
+ av1_write_token(w, av1_partition_tree, probs, &partition_encodings[p]);
else
- vp10_write_token(w, vp10_ext_partition_tree, probs,
- &ext_partition_encodings[p]);
+ av1_write_token(w, av1_ext_partition_tree, probs,
+ &ext_partition_encodings[p]);
#else
- vp10_write_token(w, vp10_partition_tree, probs, &partition_encodings[p]);
+ av1_write_token(w, av1_partition_tree, probs, &partition_encodings[p]);
#endif // CONFIG_EXT_PARTITION_TYPES
} else if (!has_rows && has_cols) {
assert(p == PARTITION_SPLIT || p == PARTITION_HORZ);
- vp10_write(w, p == PARTITION_SPLIT, probs[1]);
+ aom_write(w, p == PARTITION_SPLIT, probs[1]);
} else if (has_rows && !has_cols) {
assert(p == PARTITION_SPLIT || p == PARTITION_VERT);
- vp10_write(w, p == PARTITION_SPLIT, probs[2]);
+ aom_write(w, p == PARTITION_SPLIT, probs[2]);
} else {
assert(p == PARTITION_SPLIT);
}
@@ -1733,14 +1728,14 @@
write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col, bsize)
#endif // CONFIG_ANS && CONFIG_SUPERTX
-static void write_modes_sb(VP10_COMP *const cpi, const TileInfo *const tile,
- vp10_writer *const w, const TOKENEXTRA **tok,
+static void write_modes_sb(AV1_COMP *const cpi, const TileInfo *const tile,
+ aom_writer *const w, const TOKENEXTRA **tok,
const TOKENEXTRA *const tok_end,
#if CONFIG_SUPERTX
int supertx_enabled,
#endif
int mi_row, int mi_col, BLOCK_SIZE bsize) {
- const VP10_COMMON *const cm = &cpi->common;
+ const AV1_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
const int hbs = num_8x8_blocks_wide_lookup[bsize] / 2;
const PARTITION_TYPE partition = get_partition(cm, mi_row, mi_col, bsize);
@@ -1764,12 +1759,12 @@
if (!supertx_enabled && !frame_is_intra_only(cm) &&
partition != PARTITION_NONE && bsize <= MAX_SUPERTX_BLOCK_SIZE &&
!xd->lossless[0]) {
- vpx_prob prob;
+ aom_prob prob;
supertx_size = max_txsize_lookup[bsize];
prob = cm->fc->supertx_prob[partition_supertx_context_lookup[partition]]
[supertx_size];
supertx_enabled = (xd->mi[0]->mbmi.tx_size == supertx_size);
- vp10_write(w, supertx_enabled, prob);
+ aom_write(w, supertx_enabled, prob);
}
#endif // CONFIG_SUPERTX
if (subsize < BLOCK_8X8) {
@@ -1858,16 +1853,16 @@
if (get_ext_tx_types(supertx_size, bsize, 1) > 1 && !skip) {
int eset = get_ext_tx_set(supertx_size, bsize, 1);
if (eset > 0) {
- vp10_write_token(w, vp10_ext_tx_inter_tree[eset],
- cm->fc->inter_ext_tx_prob[eset][supertx_size],
- &ext_tx_inter_encodings[eset][mbmi->tx_type]);
+ av1_write_token(w, av1_ext_tx_inter_tree[eset],
+ cm->fc->inter_ext_tx_prob[eset][supertx_size],
+ &ext_tx_inter_encodings[eset][mbmi->tx_type]);
}
}
#else
if (supertx_size < TX_32X32 && !skip) {
- vp10_write_token(w, vp10_ext_tx_tree,
- cm->fc->inter_ext_tx_prob[supertx_size],
- &ext_tx_encodings[mbmi->tx_type]);
+ av1_write_token(w, av1_ext_tx_tree,
+ cm->fc->inter_ext_tx_prob[supertx_size],
+ &ext_tx_encodings[mbmi->tx_type]);
}
#endif // CONFIG_EXT_TX
@@ -1904,7 +1899,7 @@
#if DERING_REFINEMENT
if (bsize == BLOCK_64X64 && cm->dering_level != 0 &&
!sb_all_skip(cm, mi_row, mi_col)) {
- vpx_write_literal(
+ aom_write_literal(
w,
cm->mi_grid_visible[mi_row * cm->mi_stride + mi_col]->mbmi.dering_gain,
DERING_REFINEMENT_BITS);
@@ -1913,10 +1908,10 @@
#endif // CONFIG_EXT_PARTITION_TYPES
}
-static void write_modes(VP10_COMP *const cpi, const TileInfo *const tile,
- vp10_writer *const w, const TOKENEXTRA **tok,
+static void write_modes(AV1_COMP *const cpi, const TileInfo *const tile,
+ aom_writer *const w, const TOKENEXTRA **tok,
const TOKENEXTRA *const tok_end) {
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
const int mi_row_start = tile->mi_row_start;
const int mi_row_end = tile->mi_row_end;
@@ -1924,10 +1919,10 @@
const int mi_col_end = tile->mi_col_end;
int mi_row, mi_col;
- vp10_zero_above_context(cm, mi_col_start, mi_col_end);
+ av1_zero_above_context(cm, mi_col_start, mi_col_end);
for (mi_row = mi_row_start; mi_row < mi_row_end; mi_row += cm->mib_size) {
- vp10_zero_left_context(xd);
+ av1_zero_left_context(xd);
for (mi_col = mi_col_start; mi_col < mi_col_end; mi_col += cm->mib_size) {
write_modes_sb_wrapper(cpi, tile, w, tok, tok_end, 0, mi_row, mi_col,
@@ -1936,10 +1931,10 @@
}
}
-static void build_tree_distribution(VP10_COMP *cpi, TX_SIZE tx_size,
- vp10_coeff_stats *coef_branch_ct,
- vp10_coeff_probs_model *coef_probs) {
- vp10_coeff_count *coef_counts = cpi->td.rd_counts.coef_counts[tx_size];
+static void build_tree_distribution(AV1_COMP *cpi, TX_SIZE tx_size,
+ av1_coeff_stats *coef_branch_ct,
+ av1_coeff_probs_model *coef_probs) {
+ av1_coeff_count *coef_counts = cpi->td.rd_counts.coef_counts[tx_size];
unsigned int(*eob_branch_ct)[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS] =
cpi->common.counts.eob_branch[tx_size];
int i, j, k, l, m;
@@ -1948,9 +1943,9 @@
for (j = 0; j < REF_TYPES; ++j) {
for (k = 0; k < COEF_BANDS; ++k) {
for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
- vp10_tree_probs_from_distribution(vp10_coef_tree,
- coef_branch_ct[i][j][k][l],
- coef_counts[i][j][k][l]);
+ av1_tree_probs_from_distribution(av1_coef_tree,
+ coef_branch_ct[i][j][k][l],
+ coef_counts[i][j][k][l]);
coef_branch_ct[i][j][k][l][0][1] =
eob_branch_ct[i][j][k][l] - coef_branch_ct[i][j][k][l][0][0];
for (m = 0; m < UNCONSTRAINED_NODES; ++m)
@@ -1963,12 +1958,12 @@
}
}
-static void update_coef_probs_common(vp10_writer *const bc, VP10_COMP *cpi,
+static void update_coef_probs_common(aom_writer *const bc, AV1_COMP *cpi,
TX_SIZE tx_size,
- vp10_coeff_stats *frame_branch_ct,
- vp10_coeff_probs_model *new_coef_probs) {
- vp10_coeff_probs_model *old_coef_probs = cpi->common.fc->coef_probs[tx_size];
- const vpx_prob upd = DIFF_UPDATE_PROB;
+ av1_coeff_stats *frame_branch_ct,
+ av1_coeff_probs_model *new_coef_probs) {
+ av1_coeff_probs_model *old_coef_probs = cpi->common.fc->coef_probs[tx_size];
+ const aom_prob upd = DIFF_UPDATE_PROB;
const int entropy_nodes_update = UNCONSTRAINED_NODES;
int i, j, k, l, t;
int stepsize = cpi->sf.coeff_prob_appx_step;
@@ -1983,22 +1978,22 @@
for (k = 0; k < COEF_BANDS; ++k) {
for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
for (t = 0; t < entropy_nodes_update; ++t) {
- vpx_prob newp = new_coef_probs[i][j][k][l][t];
- const vpx_prob oldp = old_coef_probs[i][j][k][l][t];
+ aom_prob newp = new_coef_probs[i][j][k][l][t];
+ const aom_prob oldp = old_coef_probs[i][j][k][l][t];
int s;
int u = 0;
if (t == PIVOT_NODE)
- s = vp10_prob_diff_update_savings_search_model(
+ s = av1_prob_diff_update_savings_search_model(
frame_branch_ct[i][j][k][l][0],
old_coef_probs[i][j][k][l], &newp, upd, stepsize);
else
- s = vp10_prob_diff_update_savings_search(
+ s = av1_prob_diff_update_savings_search(
frame_branch_ct[i][j][k][l][t], oldp, &newp, upd);
if (s > 0 && newp != oldp) u = 1;
if (u)
- savings += s - (int)(vp10_cost_zero(upd));
+ savings += s - (int)(av1_cost_zero(upd));
else
- savings -= (int)(vp10_cost_zero(upd));
+ savings -= (int)(av1_cost_zero(upd));
update[u]++;
}
}
@@ -2008,33 +2003,33 @@
/* Is coef updated at all */
if (update[1] == 0 || savings < 0) {
- vp10_write_bit(bc, 0);
+ aom_write_bit(bc, 0);
return;
}
- vp10_write_bit(bc, 1);
+ aom_write_bit(bc, 1);
for (i = 0; i < PLANE_TYPES; ++i) {
for (j = 0; j < REF_TYPES; ++j) {
for (k = 0; k < COEF_BANDS; ++k) {
for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
// calc probs and branch cts for this frame only
for (t = 0; t < entropy_nodes_update; ++t) {
- vpx_prob newp = new_coef_probs[i][j][k][l][t];
- vpx_prob *oldp = old_coef_probs[i][j][k][l] + t;
- const vpx_prob upd = DIFF_UPDATE_PROB;
+ aom_prob newp = new_coef_probs[i][j][k][l][t];
+ aom_prob *oldp = old_coef_probs[i][j][k][l] + t;
+ const aom_prob upd = DIFF_UPDATE_PROB;
int s;
int u = 0;
if (t == PIVOT_NODE)
- s = vp10_prob_diff_update_savings_search_model(
+ s = av1_prob_diff_update_savings_search_model(
frame_branch_ct[i][j][k][l][0],
old_coef_probs[i][j][k][l], &newp, upd, stepsize);
else
- s = vp10_prob_diff_update_savings_search(
+ s = av1_prob_diff_update_savings_search(
frame_branch_ct[i][j][k][l][t], *oldp, &newp, upd);
if (s > 0 && newp != *oldp) u = 1;
- vp10_write(bc, u, upd);
+ aom_write(bc, u, upd);
if (u) {
/* send/use new probability */
- vp10_write_prob_diff_update(bc, newp, *oldp);
+ av1_write_prob_diff_update(bc, newp, *oldp);
*oldp = newp;
}
}
@@ -2054,17 +2049,17 @@
for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
// calc probs and branch cts for this frame only
for (t = 0; t < entropy_nodes_update; ++t) {
- vpx_prob newp = new_coef_probs[i][j][k][l][t];
- vpx_prob *oldp = old_coef_probs[i][j][k][l] + t;
+ aom_prob newp = new_coef_probs[i][j][k][l][t];
+ aom_prob *oldp = old_coef_probs[i][j][k][l] + t;
int s;
int u = 0;
if (t == PIVOT_NODE) {
- s = vp10_prob_diff_update_savings_search_model(
+ s = av1_prob_diff_update_savings_search_model(
frame_branch_ct[i][j][k][l][0],
old_coef_probs[i][j][k][l], &newp, upd, stepsize);
} else {
- s = vp10_prob_diff_update_savings_search(
+ s = av1_prob_diff_update_savings_search(
frame_branch_ct[i][j][k][l][t], *oldp, &newp, upd);
}
@@ -2077,14 +2072,14 @@
if (u == 1 && updates == 1) {
int v;
// first update
- vp10_write_bit(bc, 1);
+ aom_write_bit(bc, 1);
for (v = 0; v < noupdates_before_first; ++v)
- vp10_write(bc, 0, upd);
+ aom_write(bc, 0, upd);
}
- vp10_write(bc, u, upd);
+ aom_write(bc, u, upd);
if (u) {
/* send/use new probability */
- vp10_write_prob_diff_update(bc, newp, *oldp);
+ av1_write_prob_diff_update(bc, newp, *oldp);
*oldp = newp;
}
}
@@ -2093,7 +2088,7 @@
}
}
if (updates == 0) {
- vp10_write_bit(bc, 0); // no updates
+ aom_write_bit(bc, 0); // no updates
}
return;
}
@@ -2104,8 +2099,8 @@
#if CONFIG_ENTROPY
// Calculate the token counts between subsequent subframe updates.
static void get_coef_counts_diff(
- VP10_COMP *cpi, int index,
- vp10_coeff_count coef_counts[TX_SIZES][PLANE_TYPES],
+ AV1_COMP *cpi, int index,
+ av1_coeff_count coef_counts[TX_SIZES][PLANE_TYPES],
unsigned int eob_counts[TX_SIZES][PLANE_TYPES][REF_TYPES][COEF_BANDS]
[COEFF_CONTEXTS]) {
int i, j, k, l, m, tx_size, val;
@@ -2151,11 +2146,11 @@
}
static void update_coef_probs_subframe(
- vp10_writer *const bc, VP10_COMP *cpi, TX_SIZE tx_size,
- vp10_coeff_stats branch_ct[COEF_PROBS_BUFS][TX_SIZES][PLANE_TYPES],
- vp10_coeff_probs_model *new_coef_probs) {
- vp10_coeff_probs_model *old_coef_probs = cpi->common.fc->coef_probs[tx_size];
- const vpx_prob upd = DIFF_UPDATE_PROB;
+ aom_writer *const bc, AV1_COMP *cpi, TX_SIZE tx_size,
+ av1_coeff_stats branch_ct[COEF_PROBS_BUFS][TX_SIZES][PLANE_TYPES],
+ av1_coeff_probs_model *new_coef_probs) {
+ av1_coeff_probs_model *old_coef_probs = cpi->common.fc->coef_probs[tx_size];
+ const aom_prob upd = DIFF_UPDATE_PROB;
const int entropy_nodes_update = UNCONSTRAINED_NODES;
int i, j, k, l, t;
int stepsize = cpi->sf.coeff_prob_appx_step;
@@ -2180,22 +2175,22 @@
}
}
for (t = 0; t < entropy_nodes_update; ++t) {
- vpx_prob newp = new_coef_probs[i][j][k][l][t];
- const vpx_prob oldp = old_coef_probs[i][j][k][l][t];
+ aom_prob newp = new_coef_probs[i][j][k][l][t];
+ const aom_prob oldp = old_coef_probs[i][j][k][l][t];
int s, u = 0;
if (t == PIVOT_NODE)
- s = vp10_prob_update_search_model_subframe(
+ s = av1_prob_update_search_model_subframe(
this_branch_ct, old_coef_probs[i][j][k][l], &newp, upd,
stepsize, max_idx);
else
- s = vp10_prob_update_search_subframe(this_branch_ct[t], oldp,
- &newp, upd, max_idx);
+ s = av1_prob_update_search_subframe(this_branch_ct[t], oldp,
+ &newp, upd, max_idx);
if (s > 0 && newp != oldp) u = 1;
if (u)
- savings += s - (int)(vp10_cost_zero(upd));
+ savings += s - (int)(av1_cost_zero(upd));
else
- savings -= (int)(vp10_cost_zero(upd));
+ savings -= (int)(av1_cost_zero(upd));
update[u]++;
}
}
@@ -2205,10 +2200,10 @@
/* Is coef updated at all */
if (update[1] == 0 || savings < 0) {
- vp10_write_bit(bc, 0);
+ aom_write_bit(bc, 0);
return;
}
- vp10_write_bit(bc, 1);
+ aom_write_bit(bc, 1);
for (i = 0; i < PLANE_TYPES; ++i) {
for (j = 0; j < REF_TYPES; ++j) {
for (k = 0; k < COEF_BANDS; ++k) {
@@ -2221,24 +2216,24 @@
}
}
for (t = 0; t < entropy_nodes_update; ++t) {
- vpx_prob newp = new_coef_probs[i][j][k][l][t];
- vpx_prob *oldp = old_coef_probs[i][j][k][l] + t;
- const vpx_prob upd = DIFF_UPDATE_PROB;
+ aom_prob newp = new_coef_probs[i][j][k][l][t];
+ aom_prob *oldp = old_coef_probs[i][j][k][l] + t;
+ const aom_prob upd = DIFF_UPDATE_PROB;
int s;
int u = 0;
if (t == PIVOT_NODE)
- s = vp10_prob_update_search_model_subframe(
+ s = av1_prob_update_search_model_subframe(
this_branch_ct, old_coef_probs[i][j][k][l], &newp, upd,
stepsize, max_idx);
else
- s = vp10_prob_update_search_subframe(this_branch_ct[t], *oldp,
- &newp, upd, max_idx);
+ s = av1_prob_update_search_subframe(this_branch_ct[t], *oldp,
+ &newp, upd, max_idx);
if (s > 0 && newp != *oldp) u = 1;
- vp10_write(bc, u, upd);
+ aom_write(bc, u, upd);
if (u) {
/* send/use new probability */
- vp10_write_prob_diff_update(bc, newp, *oldp);
+ av1_write_prob_diff_update(bc, newp, *oldp);
*oldp = newp;
}
}
@@ -2264,18 +2259,18 @@
}
}
for (t = 0; t < entropy_nodes_update; ++t) {
- vpx_prob newp = new_coef_probs[i][j][k][l][t];
- vpx_prob *oldp = old_coef_probs[i][j][k][l] + t;
+ aom_prob newp = new_coef_probs[i][j][k][l][t];
+ aom_prob *oldp = old_coef_probs[i][j][k][l] + t;
int s;
int u = 0;
if (t == PIVOT_NODE)
- s = vp10_prob_update_search_model_subframe(
+ s = av1_prob_update_search_model_subframe(
this_branch_ct, old_coef_probs[i][j][k][l], &newp, upd,
stepsize, max_idx);
else
- s = vp10_prob_update_search_subframe(this_branch_ct[t], *oldp,
- &newp, upd, max_idx);
+ s = av1_prob_update_search_subframe(this_branch_ct[t], *oldp,
+ &newp, upd, max_idx);
if (s > 0 && newp != *oldp) u = 1;
updates += u;
if (u == 0 && updates == 0) {
@@ -2285,14 +2280,14 @@
if (u == 1 && updates == 1) {
int v;
// first update
- vp10_write_bit(bc, 1);
+ aom_write_bit(bc, 1);
for (v = 0; v < noupdates_before_first; ++v)
- vp10_write(bc, 0, upd);
+ aom_write(bc, 0, upd);
}
- vp10_write(bc, u, upd);
+ aom_write(bc, u, upd);
if (u) {
/* send/use new probability */
- vp10_write_prob_diff_update(bc, newp, *oldp);
+ av1_write_prob_diff_update(bc, newp, *oldp);
*oldp = newp;
}
}
@@ -2301,7 +2296,7 @@
}
}
if (updates == 0) {
- vp10_write_bit(bc, 0); // no updates
+ aom_write_bit(bc, 0); // no updates
}
return;
}
@@ -2310,7 +2305,7 @@
}
#endif // CONFIG_ENTROPY
-static void update_coef_probs(VP10_COMP *cpi, vp10_writer *w) {
+static void update_coef_probs(AV1_COMP *cpi, aom_writer *w) {
const TX_MODE tx_mode = cpi->common.tx_mode;
const TX_SIZE max_tx_size = tx_mode_to_biggest_tx_size[tx_mode];
TX_SIZE tx_size;
@@ -2318,17 +2313,17 @@
int update = 0;
#endif // CONFIG_ANS
#if CONFIG_ENTROPY
- VP10_COMMON *cm = &cpi->common;
+ AV1_COMMON *cm = &cpi->common;
SUBFRAME_STATS *subframe_stats = &cpi->subframe_stats;
unsigned int eob_counts_copy[TX_SIZES][PLANE_TYPES][REF_TYPES][COEF_BANDS]
[COEFF_CONTEXTS];
int i;
- vp10_coeff_probs_model dummy_frame_coef_probs[PLANE_TYPES];
+ av1_coeff_probs_model dummy_frame_coef_probs[PLANE_TYPES];
if (cm->do_subframe_update &&
cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD) {
- vp10_copy(cpi->common.fc->coef_probs,
- subframe_stats->enc_starting_coef_probs);
+ av1_copy(cpi->common.fc->coef_probs,
+ subframe_stats->enc_starting_coef_probs);
for (i = 0; i <= cpi->common.coef_probs_update_idx; ++i) {
get_coef_counts_diff(cpi, i, cpi->wholeframe_stats.coef_counts_buf[i],
cpi->wholeframe_stats.eob_counts_buf[i]);
@@ -2337,32 +2332,32 @@
#endif // CONFIG_ENTROPY
for (tx_size = TX_4X4; tx_size <= max_tx_size; ++tx_size) {
- vp10_coeff_stats frame_branch_ct[PLANE_TYPES];
- vp10_coeff_probs_model frame_coef_probs[PLANE_TYPES];
+ av1_coeff_stats frame_branch_ct[PLANE_TYPES];
+ av1_coeff_probs_model frame_coef_probs[PLANE_TYPES];
if (cpi->td.counts->tx_size_totals[tx_size] <= 20 ||
(tx_size >= TX_16X16 && cpi->sf.tx_size_search_method == USE_TX_8X8)) {
- vp10_write_bit(w, 0);
+ aom_write_bit(w, 0);
} else {
#if CONFIG_ENTROPY
if (cm->do_subframe_update &&
cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD) {
unsigned int
eob_counts_copy[PLANE_TYPES][REF_TYPES][COEF_BANDS][COEFF_CONTEXTS];
- vp10_coeff_count coef_counts_copy[PLANE_TYPES];
- vp10_copy(eob_counts_copy, cpi->common.counts.eob_branch[tx_size]);
- vp10_copy(coef_counts_copy, cpi->td.rd_counts.coef_counts[tx_size]);
+ av1_coeff_count coef_counts_copy[PLANE_TYPES];
+ av1_copy(eob_counts_copy, cpi->common.counts.eob_branch[tx_size]);
+ av1_copy(coef_counts_copy, cpi->td.rd_counts.coef_counts[tx_size]);
build_tree_distribution(cpi, tx_size, frame_branch_ct,
frame_coef_probs);
for (i = 0; i <= cpi->common.coef_probs_update_idx; ++i) {
- vp10_copy(cpi->common.counts.eob_branch[tx_size],
- cpi->wholeframe_stats.eob_counts_buf[i][tx_size]);
- vp10_copy(cpi->td.rd_counts.coef_counts[tx_size],
- cpi->wholeframe_stats.coef_counts_buf[i][tx_size]);
+ av1_copy(cpi->common.counts.eob_branch[tx_size],
+ cpi->wholeframe_stats.eob_counts_buf[i][tx_size]);
+ av1_copy(cpi->td.rd_counts.coef_counts[tx_size],
+ cpi->wholeframe_stats.coef_counts_buf[i][tx_size]);
build_tree_distribution(cpi, tx_size, cpi->branch_ct_buf[i][tx_size],
dummy_frame_coef_probs);
}
- vp10_copy(cpi->common.counts.eob_branch[tx_size], eob_counts_copy);
- vp10_copy(cpi->td.rd_counts.coef_counts[tx_size], coef_counts_copy);
+ av1_copy(cpi->common.counts.eob_branch[tx_size], eob_counts_copy);
+ av1_copy(cpi->td.rd_counts.coef_counts[tx_size], coef_counts_copy);
update_coef_probs_subframe(w, cpi, tx_size, cpi->branch_ct_buf,
frame_coef_probs);
@@ -2385,65 +2380,65 @@
}
#if CONFIG_ENTROPY
- vp10_copy(cm->starting_coef_probs, cm->fc->coef_probs);
- vp10_copy(subframe_stats->coef_probs_buf[0], cm->fc->coef_probs);
+ av1_copy(cm->starting_coef_probs, cm->fc->coef_probs);
+ av1_copy(subframe_stats->coef_probs_buf[0], cm->fc->coef_probs);
if (cm->do_subframe_update &&
cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD) {
- vp10_copy(eob_counts_copy, cm->counts.eob_branch);
+ av1_copy(eob_counts_copy, cm->counts.eob_branch);
for (i = 1; i <= cpi->common.coef_probs_update_idx; ++i) {
for (tx_size = TX_4X4; tx_size <= max_tx_size; ++tx_size)
- vp10_full_to_model_counts(cm->counts.coef[tx_size],
- subframe_stats->coef_counts_buf[i][tx_size]);
- vp10_copy(cm->counts.eob_branch, subframe_stats->eob_counts_buf[i]);
- vp10_partial_adapt_probs(cm, 0, 0);
- vp10_copy(subframe_stats->coef_probs_buf[i], cm->fc->coef_probs);
+ av1_full_to_model_counts(cm->counts.coef[tx_size],
+ subframe_stats->coef_counts_buf[i][tx_size]);
+ av1_copy(cm->counts.eob_branch, subframe_stats->eob_counts_buf[i]);
+ av1_partial_adapt_probs(cm, 0, 0);
+ av1_copy(subframe_stats->coef_probs_buf[i], cm->fc->coef_probs);
}
- vp10_copy(cm->fc->coef_probs, subframe_stats->coef_probs_buf[0]);
- vp10_copy(cm->counts.eob_branch, eob_counts_copy);
+ av1_copy(cm->fc->coef_probs, subframe_stats->coef_probs_buf[0]);
+ av1_copy(cm->counts.eob_branch, eob_counts_copy);
}
#endif // CONFIG_ENTROPY
#if CONFIG_ANS
- if (update) vp10_coef_pareto_cdfs(cpi->common.fc);
+ if (update) av1_coef_pareto_cdfs(cpi->common.fc);
#endif // CONFIG_ANS
}
#if CONFIG_LOOP_RESTORATION
-static void encode_restoration(VP10_COMMON *cm,
- struct vpx_write_bit_buffer *wb) {
+static void encode_restoration(AV1_COMMON *cm,
+ struct aom_write_bit_buffer *wb) {
int i;
RestorationInfo *rst = &cm->rst_info;
- vpx_wb_write_bit(wb, rst->restoration_type != RESTORE_NONE);
+ aom_wb_write_bit(wb, rst->restoration_type != RESTORE_NONE);
if (rst->restoration_type != RESTORE_NONE) {
if (rst->restoration_type == RESTORE_BILATERAL) {
- vpx_wb_write_bit(wb, 1);
+ aom_wb_write_bit(wb, 1);
for (i = 0; i < cm->rst_internal.ntiles; ++i) {
if (rst->bilateral_level[i] >= 0) {
- vpx_wb_write_bit(wb, 1);
- vpx_wb_write_literal(wb, rst->bilateral_level[i],
- vp10_bilateral_level_bits(cm));
+ aom_wb_write_bit(wb, 1);
+ aom_wb_write_literal(wb, rst->bilateral_level[i],
+ av1_bilateral_level_bits(cm));
} else {
- vpx_wb_write_bit(wb, 0);
+ aom_wb_write_bit(wb, 0);
}
}
} else {
- vpx_wb_write_bit(wb, 0);
+ aom_wb_write_bit(wb, 0);
for (i = 0; i < cm->rst_internal.ntiles; ++i) {
if (rst->wiener_level[i]) {
- vpx_wb_write_bit(wb, 1);
- vpx_wb_write_literal(wb, rst->vfilter[i][0] - WIENER_FILT_TAP0_MINV,
+ aom_wb_write_bit(wb, 1);
+ aom_wb_write_literal(wb, rst->vfilter[i][0] - WIENER_FILT_TAP0_MINV,
WIENER_FILT_TAP0_BITS);
- vpx_wb_write_literal(wb, rst->vfilter[i][1] - WIENER_FILT_TAP1_MINV,
+ aom_wb_write_literal(wb, rst->vfilter[i][1] - WIENER_FILT_TAP1_MINV,
WIENER_FILT_TAP1_BITS);
- vpx_wb_write_literal(wb, rst->vfilter[i][2] - WIENER_FILT_TAP2_MINV,
+ aom_wb_write_literal(wb, rst->vfilter[i][2] - WIENER_FILT_TAP2_MINV,
WIENER_FILT_TAP2_BITS);
- vpx_wb_write_literal(wb, rst->hfilter[i][0] - WIENER_FILT_TAP0_MINV,
+ aom_wb_write_literal(wb, rst->hfilter[i][0] - WIENER_FILT_TAP0_MINV,
WIENER_FILT_TAP0_BITS);
- vpx_wb_write_literal(wb, rst->hfilter[i][1] - WIENER_FILT_TAP1_MINV,
+ aom_wb_write_literal(wb, rst->hfilter[i][1] - WIENER_FILT_TAP1_MINV,
WIENER_FILT_TAP1_BITS);
- vpx_wb_write_literal(wb, rst->hfilter[i][2] - WIENER_FILT_TAP2_MINV,
+ aom_wb_write_literal(wb, rst->hfilter[i][2] - WIENER_FILT_TAP2_MINV,
WIENER_FILT_TAP2_BITS);
} else {
- vpx_wb_write_bit(wb, 0);
+ aom_wb_write_bit(wb, 0);
}
}
}
@@ -2451,39 +2446,38 @@
}
#endif // CONFIG_LOOP_RESTORATION
-static void encode_loopfilter(VP10_COMMON *cm,
- struct vpx_write_bit_buffer *wb) {
+static void encode_loopfilter(AV1_COMMON *cm, struct aom_write_bit_buffer *wb) {
int i;
struct loopfilter *lf = &cm->lf;
// Encode the loop filter level and type
- vpx_wb_write_literal(wb, lf->filter_level, 6);
- vpx_wb_write_literal(wb, lf->sharpness_level, 3);
+ aom_wb_write_literal(wb, lf->filter_level, 6);
+ aom_wb_write_literal(wb, lf->sharpness_level, 3);
// Write out loop filter deltas applied at the MB level based on mode or
// ref frame (if they are enabled).
- vpx_wb_write_bit(wb, lf->mode_ref_delta_enabled);
+ aom_wb_write_bit(wb, lf->mode_ref_delta_enabled);
if (lf->mode_ref_delta_enabled) {
- vpx_wb_write_bit(wb, lf->mode_ref_delta_update);
+ aom_wb_write_bit(wb, lf->mode_ref_delta_update);
if (lf->mode_ref_delta_update) {
for (i = 0; i < TOTAL_REFS_PER_FRAME; i++) {
const int delta = lf->ref_deltas[i];
const int changed = delta != lf->last_ref_deltas[i];
- vpx_wb_write_bit(wb, changed);
+ aom_wb_write_bit(wb, changed);
if (changed) {
lf->last_ref_deltas[i] = delta;
- vpx_wb_write_inv_signed_literal(wb, delta, 6);
+ aom_wb_write_inv_signed_literal(wb, delta, 6);
}
}
for (i = 0; i < MAX_MODE_LF_DELTAS; i++) {
const int delta = lf->mode_deltas[i];
const int changed = delta != lf->last_mode_deltas[i];
- vpx_wb_write_bit(wb, changed);
+ aom_wb_write_bit(wb, changed);
if (changed) {
lf->last_mode_deltas[i] = delta;
- vpx_wb_write_inv_signed_literal(wb, delta, 6);
+ aom_wb_write_inv_signed_literal(wb, delta, 6);
}
}
}
@@ -2491,84 +2485,83 @@
}
#if CONFIG_CLPF
-static void encode_clpf(const VP10_COMMON *cm,
- struct vpx_write_bit_buffer *wb) {
- vpx_wb_write_literal(wb, cm->clpf, 1);
+static void encode_clpf(const AV1_COMMON *cm, struct aom_write_bit_buffer *wb) {
+ aom_wb_write_literal(wb, cm->clpf, 1);
}
#endif
#if CONFIG_DERING
-static void encode_dering(int level, struct vpx_write_bit_buffer *wb) {
- vpx_wb_write_literal(wb, level, DERING_LEVEL_BITS);
+static void encode_dering(int level, struct aom_write_bit_buffer *wb) {
+ aom_wb_write_literal(wb, level, DERING_LEVEL_BITS);
}
#endif // CONFIG_DERING
-static void write_delta_q(struct vpx_write_bit_buffer *wb, int delta_q) {
+static void write_delta_q(struct aom_write_bit_buffer *wb, int delta_q) {
if (delta_q != 0) {
- vpx_wb_write_bit(wb, 1);
- vpx_wb_write_inv_signed_literal(wb, delta_q, 6);
+ aom_wb_write_bit(wb, 1);
+ aom_wb_write_inv_signed_literal(wb, delta_q, 6);
} else {
- vpx_wb_write_bit(wb, 0);
+ aom_wb_write_bit(wb, 0);
}
}
-static void encode_quantization(const VP10_COMMON *const cm,
- struct vpx_write_bit_buffer *wb) {
- vpx_wb_write_literal(wb, cm->base_qindex, QINDEX_BITS);
+static void encode_quantization(const AV1_COMMON *const cm,
+ struct aom_write_bit_buffer *wb) {
+ aom_wb_write_literal(wb, cm->base_qindex, QINDEX_BITS);
write_delta_q(wb, cm->y_dc_delta_q);
write_delta_q(wb, cm->uv_dc_delta_q);
write_delta_q(wb, cm->uv_ac_delta_q);
#if CONFIG_AOM_QM
- vpx_wb_write_bit(wb, cm->using_qmatrix);
+ aom_wb_write_bit(wb, cm->using_qmatrix);
if (cm->using_qmatrix) {
- vpx_wb_write_literal(wb, cm->min_qmlevel, QM_LEVEL_BITS);
- vpx_wb_write_literal(wb, cm->max_qmlevel, QM_LEVEL_BITS);
+ aom_wb_write_literal(wb, cm->min_qmlevel, QM_LEVEL_BITS);
+ aom_wb_write_literal(wb, cm->max_qmlevel, QM_LEVEL_BITS);
}
#endif
}
-static void encode_segmentation(VP10_COMMON *cm, MACROBLOCKD *xd,
- struct vpx_write_bit_buffer *wb) {
+static void encode_segmentation(AV1_COMMON *cm, MACROBLOCKD *xd,
+ struct aom_write_bit_buffer *wb) {
int i, j;
const struct segmentation *seg = &cm->seg;
- vpx_wb_write_bit(wb, seg->enabled);
+ aom_wb_write_bit(wb, seg->enabled);
if (!seg->enabled) return;
// Segmentation map
if (!frame_is_intra_only(cm) && !cm->error_resilient_mode) {
- vpx_wb_write_bit(wb, seg->update_map);
+ aom_wb_write_bit(wb, seg->update_map);
} else {
assert(seg->update_map == 1);
}
if (seg->update_map) {
// Select the coding strategy (temporal or spatial)
- vp10_choose_segmap_coding_method(cm, xd);
+ av1_choose_segmap_coding_method(cm, xd);
// Write out the chosen coding method.
if (!frame_is_intra_only(cm) && !cm->error_resilient_mode) {
- vpx_wb_write_bit(wb, seg->temporal_update);
+ aom_wb_write_bit(wb, seg->temporal_update);
} else {
assert(seg->temporal_update == 0);
}
}
// Segmentation data
- vpx_wb_write_bit(wb, seg->update_data);
+ aom_wb_write_bit(wb, seg->update_data);
if (seg->update_data) {
- vpx_wb_write_bit(wb, seg->abs_delta);
+ aom_wb_write_bit(wb, seg->abs_delta);
for (i = 0; i < MAX_SEGMENTS; i++) {
for (j = 0; j < SEG_LVL_MAX; j++) {
const int active = segfeature_active(seg, i, j);
- vpx_wb_write_bit(wb, active);
+ aom_wb_write_bit(wb, active);
if (active) {
const int data = get_segdata(seg, i, j);
- const int data_max = vp10_seg_feature_data_max(j);
+ const int data_max = av1_seg_feature_data_max(j);
- if (vp10_is_segfeature_signed(j)) {
+ if (av1_is_segfeature_signed(j)) {
encode_unsigned_max(wb, abs(data), data_max);
- vpx_wb_write_bit(wb, data < 0);
+ aom_wb_write_bit(wb, data < 0);
} else {
encode_unsigned_max(wb, data, data_max);
}
@@ -2578,8 +2571,8 @@
}
}
-static void update_seg_probs(VP10_COMP *cpi, vp10_writer *w) {
- VP10_COMMON *cm = &cpi->common;
+static void update_seg_probs(AV1_COMP *cpi, aom_writer *w) {
+ AV1_COMMON *cm = &cpi->common;
if (!cm->seg.enabled || !cm->seg.update_map) return;
@@ -2587,41 +2580,41 @@
int i;
for (i = 0; i < PREDICTION_PROBS; i++)
- vp10_cond_prob_diff_update(w, &cm->fc->seg.pred_probs[i],
- cm->counts.seg.pred[i]);
+ av1_cond_prob_diff_update(w, &cm->fc->seg.pred_probs[i],
+ cm->counts.seg.pred[i]);
- prob_diff_update(vp10_segment_tree, cm->fc->seg.tree_probs,
+ prob_diff_update(av1_segment_tree, cm->fc->seg.tree_probs,
cm->counts.seg.tree_mispred, MAX_SEGMENTS, w);
} else {
- prob_diff_update(vp10_segment_tree, cm->fc->seg.tree_probs,
+ prob_diff_update(av1_segment_tree, cm->fc->seg.tree_probs,
cm->counts.seg.tree_total, MAX_SEGMENTS, w);
}
}
-static void write_txfm_mode(TX_MODE mode, struct vpx_write_bit_buffer *wb) {
- vpx_wb_write_bit(wb, mode == TX_MODE_SELECT);
- if (mode != TX_MODE_SELECT) vpx_wb_write_literal(wb, mode, 2);
+static void write_txfm_mode(TX_MODE mode, struct aom_write_bit_buffer *wb) {
+ aom_wb_write_bit(wb, mode == TX_MODE_SELECT);
+ if (mode != TX_MODE_SELECT) aom_wb_write_literal(wb, mode, 2);
}
-static void update_txfm_probs(VP10_COMMON *cm, vp10_writer *w,
+static void update_txfm_probs(AV1_COMMON *cm, aom_writer *w,
FRAME_COUNTS *counts) {
if (cm->tx_mode == TX_MODE_SELECT) {
int i, j;
for (i = 0; i < TX_SIZES - 1; ++i)
for (j = 0; j < TX_SIZE_CONTEXTS; ++j)
- prob_diff_update(vp10_tx_size_tree[i], cm->fc->tx_size_probs[i][j],
+ prob_diff_update(av1_tx_size_tree[i], cm->fc->tx_size_probs[i][j],
counts->tx_size[i][j], i + 2, w);
}
}
static void write_interp_filter(INTERP_FILTER filter,
- struct vpx_write_bit_buffer *wb) {
- vpx_wb_write_bit(wb, filter == SWITCHABLE);
+ struct aom_write_bit_buffer *wb) {
+ aom_wb_write_bit(wb, filter == SWITCHABLE);
if (filter != SWITCHABLE)
- vpx_wb_write_literal(wb, filter, 2 + CONFIG_EXT_INTERP);
+ aom_wb_write_literal(wb, filter, 2 + CONFIG_EXT_INTERP);
}
-static void fix_interp_filter(VP10_COMMON *cm, FRAME_COUNTS *counts) {
+static void fix_interp_filter(AV1_COMMON *cm, FRAME_COUNTS *counts) {
if (cm->interp_filter == SWITCHABLE) {
// Check to see if only one of the filters is actually used
int count[SWITCHABLE_FILTERS];
@@ -2644,8 +2637,8 @@
}
}
-static void write_tile_info(const VP10_COMMON *const cm,
- struct vpx_write_bit_buffer *wb) {
+static void write_tile_info(const AV1_COMMON *const cm,
+ struct aom_write_bit_buffer *wb) {
#if CONFIG_EXT_TILE
const int tile_width =
ALIGN_POWER_OF_TWO(cm->tile_width, cm->mib_size_log2) >>
@@ -2662,33 +2655,33 @@
if (cm->sb_size == BLOCK_128X128) {
assert(tile_width <= 32);
assert(tile_height <= 32);
- vpx_wb_write_literal(wb, tile_width - 1, 5);
- vpx_wb_write_literal(wb, tile_height - 1, 5);
+ aom_wb_write_literal(wb, tile_width - 1, 5);
+ aom_wb_write_literal(wb, tile_height - 1, 5);
} else
#endif // CONFIG_EXT_PARTITION
{
assert(tile_width <= 64);
assert(tile_height <= 64);
- vpx_wb_write_literal(wb, tile_width - 1, 6);
- vpx_wb_write_literal(wb, tile_height - 1, 6);
+ aom_wb_write_literal(wb, tile_width - 1, 6);
+ aom_wb_write_literal(wb, tile_height - 1, 6);
}
#else
int min_log2_tile_cols, max_log2_tile_cols, ones;
- vp10_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
+ av1_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
// columns
ones = cm->log2_tile_cols - min_log2_tile_cols;
- while (ones--) vpx_wb_write_bit(wb, 1);
+ while (ones--) aom_wb_write_bit(wb, 1);
- if (cm->log2_tile_cols < max_log2_tile_cols) vpx_wb_write_bit(wb, 0);
+ if (cm->log2_tile_cols < max_log2_tile_cols) aom_wb_write_bit(wb, 0);
// rows
- vpx_wb_write_bit(wb, cm->log2_tile_rows != 0);
- if (cm->log2_tile_rows != 0) vpx_wb_write_bit(wb, cm->log2_tile_rows != 1);
+ aom_wb_write_bit(wb, cm->log2_tile_rows != 0);
+ if (cm->log2_tile_rows != 0) aom_wb_write_bit(wb, cm->log2_tile_rows != 1);
#endif // CONFIG_EXT_TILE
}
-static int get_refresh_mask(VP10_COMP *cpi) {
+static int get_refresh_mask(AV1_COMP *cpi) {
int refresh_mask = 0;
#if CONFIG_EXT_REFS
@@ -2712,12 +2705,12 @@
refresh_mask |= (cpi->refresh_last_frame << cpi->lst_fb_idx);
#endif // CONFIG_EXT_REFS
- if (vp10_preserve_existing_gf(cpi)) {
+ if (av1_preserve_existing_gf(cpi)) {
// We have decided to preserve the previously existing golden frame as our
// new ARF frame. However, in the short term we leave it in the GF slot and,
// if we're updating the GF with the current decoded frame, we save it
// instead to the ARF slot.
- // Later, in the function vp10_encoder.c:vp10_update_reference_frames() we
+ // Later, in the function av1_encoder.c:av1_update_reference_frames() we
// will swap gld_fb_idx and alt_fb_idx to achieve our objective. We do it
// there so that it can be done outside of the recode loop.
// Note: This is highly specific to the use of ARF as a forward reference,
@@ -2793,14 +2786,14 @@
}
#endif // CONFIG_EXT_TILE
-static uint32_t write_tiles(VP10_COMP *const cpi, uint8_t *const dst,
+static uint32_t write_tiles(AV1_COMP *const cpi, uint8_t *const dst,
unsigned int *max_tile_size,
unsigned int *max_tile_col_size) {
- const VP10_COMMON *const cm = &cpi->common;
+ const AV1_COMMON *const cm = &cpi->common;
#if CONFIG_ANS
struct AnsCoder token_ans;
#else
- vp10_writer mode_bc;
+ aom_writer mode_bc;
#endif // CONFIG_ANS
int tile_row, tile_col;
TOKENEXTRA *(*const tok_buffers)[MAX_TILE_COLS] = cpi->tile_tok;
@@ -2827,7 +2820,7 @@
const int is_last_col = (tile_col == tile_cols - 1);
const size_t col_offset = total_size;
- vp10_tile_set_col(&tile_info, cm, tile_col);
+ av1_tile_set_col(&tile_info, cm, tile_col);
// The last column does not have a column header
if (!is_last_col) total_size += 4;
@@ -2839,7 +2832,7 @@
const TOKENEXTRA *tok_end = tok + cpi->tok_count[tile_row][tile_col];
const int data_offset = have_tiles ? 4 : 0;
- vp10_tile_set_row(&tile_info, cm, tile_row);
+ av1_tile_set_row(&tile_info, cm, tile_row);
buf->data = dst + total_size;
@@ -2847,10 +2840,10 @@
// even for the last one, unless no tiling is used at all.
total_size += data_offset;
#if !CONFIG_ANS
- vpx_start_encode(&mode_bc, buf->data + data_offset);
+ aom_start_encode(&mode_bc, buf->data + data_offset);
write_modes(cpi, &tile_info, &mode_bc, &tok, tok_end);
assert(tok == tok_end);
- vpx_stop_encode(&mode_bc);
+ aom_stop_encode(&mode_bc);
tile_size = mode_bc.pos;
#else
buf_ans_write_reset(buf_ans);
@@ -2864,7 +2857,7 @@
buf->size = tile_size;
// Record the maximum tile size we see, so we can compact headers later.
- *max_tile_size = VPXMAX(*max_tile_size, tile_size);
+ *max_tile_size = AOMMAX(*max_tile_size, tile_size);
if (have_tiles) {
// tile header: size of this tile, or copy offset
@@ -2896,7 +2889,7 @@
// If it is not final packing, record the maximum tile column size we see,
// otherwise, check if the tile size is out of the range.
- *max_tile_col_size = VPXMAX(*max_tile_col_size, col_size);
+ *max_tile_col_size = AOMMAX(*max_tile_col_size, col_size);
}
}
#else
@@ -2904,7 +2897,7 @@
TileInfo tile_info;
const int is_last_row = (tile_row == tile_rows - 1);
- vp10_tile_set_row(&tile_info, cm, tile_row);
+ av1_tile_set_row(&tile_info, cm, tile_row);
for (tile_col = 0; tile_col < tile_cols; tile_col++) {
TileBufferEnc *const buf = &tile_buffers[tile_row][tile_col];
@@ -2914,7 +2907,7 @@
const TOKENEXTRA *tok = tok_buffers[tile_row][tile_col];
const TOKENEXTRA *tok_end = tok + cpi->tok_count[tile_row][tile_col];
- vp10_tile_set_col(&tile_info, cm, tile_col);
+ av1_tile_set_col(&tile_info, cm, tile_col);
buf->data = dst + total_size;
@@ -2922,10 +2915,10 @@
if (!is_last_tile) total_size += 4;
#if !CONFIG_ANS
- vpx_start_encode(&mode_bc, dst + total_size);
+ aom_start_encode(&mode_bc, dst + total_size);
write_modes(cpi, &tile_info, &mode_bc, &tok, tok_end);
assert(tok == tok_end);
- vpx_stop_encode(&mode_bc);
+ aom_stop_encode(&mode_bc);
tile_size = mode_bc.pos;
#else
buf_ans_write_reset(buf_ans);
@@ -2941,7 +2934,7 @@
buf->size = tile_size;
if (!is_last_tile) {
- *max_tile_size = VPXMAX(*max_tile_size, tile_size);
+ *max_tile_size = AOMMAX(*max_tile_size, tile_size);
// size of this tile
mem_put_le32(buf->data, tile_size);
}
@@ -2953,28 +2946,28 @@
return (uint32_t)total_size;
}
-static void write_render_size(const VP10_COMMON *cm,
- struct vpx_write_bit_buffer *wb) {
+static void write_render_size(const AV1_COMMON *cm,
+ struct aom_write_bit_buffer *wb) {
const int scaling_active =
cm->width != cm->render_width || cm->height != cm->render_height;
- vpx_wb_write_bit(wb, scaling_active);
+ aom_wb_write_bit(wb, scaling_active);
if (scaling_active) {
- vpx_wb_write_literal(wb, cm->render_width - 1, 16);
- vpx_wb_write_literal(wb, cm->render_height - 1, 16);
+ aom_wb_write_literal(wb, cm->render_width - 1, 16);
+ aom_wb_write_literal(wb, cm->render_height - 1, 16);
}
}
-static void write_frame_size(const VP10_COMMON *cm,
- struct vpx_write_bit_buffer *wb) {
- vpx_wb_write_literal(wb, cm->width - 1, 16);
- vpx_wb_write_literal(wb, cm->height - 1, 16);
+static void write_frame_size(const AV1_COMMON *cm,
+ struct aom_write_bit_buffer *wb) {
+ aom_wb_write_literal(wb, cm->width - 1, 16);
+ aom_wb_write_literal(wb, cm->height - 1, 16);
write_render_size(cm, wb);
}
-static void write_frame_size_with_refs(VP10_COMP *cpi,
- struct vpx_write_bit_buffer *wb) {
- VP10_COMMON *const cm = &cpi->common;
+static void write_frame_size_with_refs(AV1_COMP *cpi,
+ struct aom_write_bit_buffer *wb) {
+ AV1_COMMON *const cm = &cpi->common;
int found = 0;
MV_REFERENCE_FRAME ref_frame;
@@ -2987,66 +2980,66 @@
found &= cm->render_width == cfg->render_width &&
cm->render_height == cfg->render_height;
}
- vpx_wb_write_bit(wb, found);
+ aom_wb_write_bit(wb, found);
if (found) {
break;
}
}
if (!found) {
- vpx_wb_write_literal(wb, cm->width - 1, 16);
- vpx_wb_write_literal(wb, cm->height - 1, 16);
+ aom_wb_write_literal(wb, cm->width - 1, 16);
+ aom_wb_write_literal(wb, cm->height - 1, 16);
write_render_size(cm, wb);
}
}
-static void write_sync_code(struct vpx_write_bit_buffer *wb) {
- vpx_wb_write_literal(wb, VP10_SYNC_CODE_0, 8);
- vpx_wb_write_literal(wb, VP10_SYNC_CODE_1, 8);
- vpx_wb_write_literal(wb, VP10_SYNC_CODE_2, 8);
+static void write_sync_code(struct aom_write_bit_buffer *wb) {
+ aom_wb_write_literal(wb, AV1_SYNC_CODE_0, 8);
+ aom_wb_write_literal(wb, AV1_SYNC_CODE_1, 8);
+ aom_wb_write_literal(wb, AV1_SYNC_CODE_2, 8);
}
static void write_profile(BITSTREAM_PROFILE profile,
- struct vpx_write_bit_buffer *wb) {
+ struct aom_write_bit_buffer *wb) {
switch (profile) {
- case PROFILE_0: vpx_wb_write_literal(wb, 0, 2); break;
- case PROFILE_1: vpx_wb_write_literal(wb, 2, 2); break;
- case PROFILE_2: vpx_wb_write_literal(wb, 1, 2); break;
- case PROFILE_3: vpx_wb_write_literal(wb, 6, 3); break;
+ case PROFILE_0: aom_wb_write_literal(wb, 0, 2); break;
+ case PROFILE_1: aom_wb_write_literal(wb, 2, 2); break;
+ case PROFILE_2: aom_wb_write_literal(wb, 1, 2); break;
+ case PROFILE_3: aom_wb_write_literal(wb, 6, 3); break;
default: assert(0);
}
}
static void write_bitdepth_colorspace_sampling(
- VP10_COMMON *const cm, struct vpx_write_bit_buffer *wb) {
+ AV1_COMMON *const cm, struct aom_write_bit_buffer *wb) {
if (cm->profile >= PROFILE_2) {
- assert(cm->bit_depth > VPX_BITS_8);
- vpx_wb_write_bit(wb, cm->bit_depth == VPX_BITS_10 ? 0 : 1);
+ assert(cm->bit_depth > AOM_BITS_8);
+ aom_wb_write_bit(wb, cm->bit_depth == AOM_BITS_10 ? 0 : 1);
}
- vpx_wb_write_literal(wb, cm->color_space, 3);
- if (cm->color_space != VPX_CS_SRGB) {
+ aom_wb_write_literal(wb, cm->color_space, 3);
+ if (cm->color_space != AOM_CS_SRGB) {
// 0: [16, 235] (i.e. xvYCC), 1: [0, 255]
- vpx_wb_write_bit(wb, cm->color_range);
+ aom_wb_write_bit(wb, cm->color_range);
if (cm->profile == PROFILE_1 || cm->profile == PROFILE_3) {
assert(cm->subsampling_x != 1 || cm->subsampling_y != 1);
- vpx_wb_write_bit(wb, cm->subsampling_x);
- vpx_wb_write_bit(wb, cm->subsampling_y);
- vpx_wb_write_bit(wb, 0); // unused
+ aom_wb_write_bit(wb, cm->subsampling_x);
+ aom_wb_write_bit(wb, cm->subsampling_y);
+ aom_wb_write_bit(wb, 0); // unused
} else {
assert(cm->subsampling_x == 1 && cm->subsampling_y == 1);
}
} else {
assert(cm->profile == PROFILE_1 || cm->profile == PROFILE_3);
- vpx_wb_write_bit(wb, 0); // unused
+ aom_wb_write_bit(wb, 0); // unused
}
}
-static void write_uncompressed_header(VP10_COMP *cpi,
- struct vpx_write_bit_buffer *wb) {
- VP10_COMMON *const cm = &cpi->common;
+static void write_uncompressed_header(AV1_COMP *cpi,
+ struct aom_write_bit_buffer *wb) {
+ AV1_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
- vpx_wb_write_literal(wb, VPX_FRAME_MARKER, 2);
+ aom_wb_write_literal(wb, AOM_FRAME_MARKER, 2);
write_profile(cm->profile, wb);
@@ -3059,45 +3052,45 @@
const int frame_to_show = cm->ref_frame_map[cpi->existing_fb_idx_to_show];
if (frame_to_show < 0 || frame_bufs[frame_to_show].ref_count < 1) {
- vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
+ aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM,
"Buffer %d does not contain a reconstructed frame",
frame_to_show);
}
ref_cnt_fb(frame_bufs, &cm->new_fb_idx, frame_to_show);
- vpx_wb_write_bit(wb, 1); // show_existing_frame
- vpx_wb_write_literal(wb, cpi->existing_fb_idx_to_show, 3);
+ aom_wb_write_bit(wb, 1); // show_existing_frame
+ aom_wb_write_literal(wb, cpi->existing_fb_idx_to_show, 3);
return;
} else {
#endif // CONFIG_EXT_REFS
- vpx_wb_write_bit(wb, 0); // show_existing_frame
+ aom_wb_write_bit(wb, 0); // show_existing_frame
#if CONFIG_EXT_REFS
}
#endif // CONFIG_EXT_REFS
- vpx_wb_write_bit(wb, cm->frame_type);
- vpx_wb_write_bit(wb, cm->show_frame);
- vpx_wb_write_bit(wb, cm->error_resilient_mode);
+ aom_wb_write_bit(wb, cm->frame_type);
+ aom_wb_write_bit(wb, cm->show_frame);
+ aom_wb_write_bit(wb, cm->error_resilient_mode);
if (cm->frame_type == KEY_FRAME) {
write_sync_code(wb);
write_bitdepth_colorspace_sampling(cm, wb);
write_frame_size(cm, wb);
if (frame_is_intra_only(cm))
- vpx_wb_write_bit(wb, cm->allow_screen_content_tools);
+ aom_wb_write_bit(wb, cm->allow_screen_content_tools);
} else {
- if (!cm->show_frame) vpx_wb_write_bit(wb, cm->intra_only);
+ if (!cm->show_frame) aom_wb_write_bit(wb, cm->intra_only);
if (!cm->error_resilient_mode) {
if (cm->intra_only) {
- vpx_wb_write_bit(wb,
+ aom_wb_write_bit(wb,
cm->reset_frame_context == RESET_FRAME_CONTEXT_ALL);
} else {
- vpx_wb_write_bit(wb,
+ aom_wb_write_bit(wb,
cm->reset_frame_context != RESET_FRAME_CONTEXT_NONE);
if (cm->reset_frame_context != RESET_FRAME_CONTEXT_NONE)
- vpx_wb_write_bit(wb,
+ aom_wb_write_bit(wb,
cm->reset_frame_context == RESET_FRAME_CONTEXT_ALL);
}
}
@@ -3111,18 +3104,18 @@
write_bitdepth_colorspace_sampling(cm, wb);
#if CONFIG_EXT_REFS
- vpx_wb_write_literal(wb, cpi->refresh_frame_mask, REF_FRAMES);
+ aom_wb_write_literal(wb, cpi->refresh_frame_mask, REF_FRAMES);
#else
- vpx_wb_write_literal(wb, get_refresh_mask(cpi), REF_FRAMES);
+ aom_wb_write_literal(wb, get_refresh_mask(cpi), REF_FRAMES);
#endif // CONFIG_EXT_REFS
write_frame_size(cm, wb);
} else {
MV_REFERENCE_FRAME ref_frame;
#if CONFIG_EXT_REFS
- vpx_wb_write_literal(wb, cpi->refresh_frame_mask, REF_FRAMES);
+ aom_wb_write_literal(wb, cpi->refresh_frame_mask, REF_FRAMES);
#else
- vpx_wb_write_literal(wb, get_refresh_mask(cpi), REF_FRAMES);
+ aom_wb_write_literal(wb, get_refresh_mask(cpi), REF_FRAMES);
#endif // CONFIG_EXT_REFS
#if CONFIG_EXT_REFS
@@ -3135,14 +3128,14 @@
for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
assert(get_ref_frame_map_idx(cpi, ref_frame) != INVALID_IDX);
- vpx_wb_write_literal(wb, get_ref_frame_map_idx(cpi, ref_frame),
+ aom_wb_write_literal(wb, get_ref_frame_map_idx(cpi, ref_frame),
REF_FRAMES_LOG2);
- vpx_wb_write_bit(wb, cm->ref_frame_sign_bias[ref_frame]);
+ aom_wb_write_bit(wb, cm->ref_frame_sign_bias[ref_frame]);
}
write_frame_size_with_refs(cpi, wb);
- vpx_wb_write_bit(wb, cm->allow_high_precision_mv);
+ aom_wb_write_bit(wb, cm->allow_high_precision_mv);
fix_interp_filter(cm, cpi->td.counts);
write_interp_filter(cm->interp_filter, wb);
@@ -3150,17 +3143,17 @@
}
if (!cm->error_resilient_mode) {
- vpx_wb_write_bit(
+ aom_wb_write_bit(
wb, cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_FORWARD);
}
- vpx_wb_write_literal(wb, cm->frame_context_idx, FRAME_CONTEXTS_LOG2);
+ aom_wb_write_literal(wb, cm->frame_context_idx, FRAME_CONTEXTS_LOG2);
assert(cm->mib_size == num_8x8_blocks_wide_lookup[cm->sb_size]);
assert(cm->mib_size == 1 << cm->mib_size_log2);
#if CONFIG_EXT_PARTITION
assert(cm->sb_size == BLOCK_128X128 || cm->sb_size == BLOCK_64X64);
- vpx_wb_write_bit(wb, cm->sb_size == BLOCK_128X128 ? 1 : 0);
+ aom_wb_write_bit(wb, cm->sb_size == BLOCK_128X128 ? 1 : 0);
#else
assert(cm->sb_size == BLOCK_64X64);
#endif // CONFIG_EXT_PARTITION
@@ -3186,8 +3179,8 @@
const int use_hybrid_pred = cm->reference_mode == REFERENCE_MODE_SELECT;
const int use_compound_pred = cm->reference_mode != SINGLE_REFERENCE;
- vpx_wb_write_bit(wb, use_hybrid_pred);
- if (!use_hybrid_pred) vpx_wb_write_bit(wb, use_compound_pred);
+ aom_wb_write_bit(wb, use_hybrid_pred);
+ if (!use_hybrid_pred) aom_wb_write_bit(wb, use_compound_pred);
}
write_tile_info(cm, wb);
@@ -3195,35 +3188,35 @@
#if CONFIG_GLOBAL_MOTION
static void write_global_motion_params(Global_Motion_Params *params,
- vpx_prob *probs, vp10_writer *w) {
+ aom_prob *probs, aom_writer *w) {
GLOBAL_MOTION_TYPE gmtype = get_gmtype(params);
- vp10_write_token(w, vp10_global_motion_types_tree, probs,
- &global_motion_types_encodings[gmtype]);
+ av1_write_token(w, av1_global_motion_types_tree, probs,
+ &global_motion_types_encodings[gmtype]);
switch (gmtype) {
case GLOBAL_ZERO: break;
case GLOBAL_AFFINE:
- vp10_write_primitive_symmetric(
+ av1_write_primitive_symmetric(
w, params->motion_params.wmmat[4] >> GM_ALPHA_PREC_DIFF,
GM_ABS_ALPHA_BITS);
- vp10_write_primitive_symmetric(
+ av1_write_primitive_symmetric(
w, (params->motion_params.wmmat[5] >> GM_ALPHA_PREC_DIFF) -
(1 << GM_ALPHA_PREC_BITS),
GM_ABS_ALPHA_BITS);
// fallthrough intended
case GLOBAL_ROTZOOM:
- vp10_write_primitive_symmetric(
+ aom_write_primitive_symmetric(
w, (params->motion_params.wmmat[2] >> GM_ALPHA_PREC_DIFF) -
(1 << GM_ALPHA_PREC_BITS),
GM_ABS_ALPHA_BITS);
- vp10_write_primitive_symmetric(
+ aom_write_primitive_symmetric(
w, params->motion_params.wmmat[3] >> GM_ALPHA_PREC_DIFF,
GM_ABS_ALPHA_BITS);
// fallthrough intended
case GLOBAL_TRANSLATION:
- vp10_write_primitive_symmetric(
+ aom_write_primitive_symmetric(
w, params->motion_params.wmmat[0] >> GM_TRANS_PREC_DIFF,
GM_ABS_TRANS_BITS);
- vp10_write_primitive_symmetric(
+ aom_write_primitive_symmetric(
w, params->motion_params.wmmat[1] >> GM_TRANS_PREC_DIFF,
GM_ABS_TRANS_BITS);
break;
@@ -3231,8 +3224,8 @@
}
}
-static void write_global_motion(VP10_COMP *cpi, vp10_writer *w) {
- VP10_COMMON *const cm = &cpi->common;
+static void write_global_motion(AV1_COMP *cpi, aom_writer *w) {
+ AV1_COMMON *const cm = &cpi->common;
int frame;
for (frame = LAST_FRAME; frame <= ALTREF_FRAME; ++frame) {
if (!cpi->global_motion_used[frame]) {
@@ -3244,14 +3237,14 @@
}
#endif
-static uint32_t write_compressed_header(VP10_COMP *cpi, uint8_t *data) {
- VP10_COMMON *const cm = &cpi->common;
+static uint32_t write_compressed_header(AV1_COMP *cpi, uint8_t *data) {
+ AV1_COMMON *const cm = &cpi->common;
#if CONFIG_SUPERTX
MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
#endif // CONFIG_SUPERTX
FRAME_CONTEXT *const fc = cm->fc;
FRAME_COUNTS *counts = cpi->td.counts;
- vp10_writer *header_bc;
+ aom_writer *header_bc;
int i, j;
#if CONFIG_ANS
@@ -3260,9 +3253,9 @@
header_bc = &cpi->buf_ans;
buf_ans_write_reset(header_bc);
#else
- vp10_writer real_header_bc;
+ aom_writer real_header_bc;
header_bc = &real_header_bc;
- vpx_start_encode(header_bc, data);
+ aom_start_encode(header_bc, data);
#endif
update_txfm_probs(cm, header_bc, counts);
update_coef_probs(cpi, header_bc);
@@ -3275,39 +3268,39 @@
update_seg_probs(cpi, header_bc);
for (i = 0; i < INTRA_MODES; ++i)
- prob_diff_update(vp10_intra_mode_tree, fc->uv_mode_prob[i],
+ prob_diff_update(av1_intra_mode_tree, fc->uv_mode_prob[i],
counts->uv_mode[i], INTRA_MODES, header_bc);
#if CONFIG_EXT_PARTITION_TYPES
- prob_diff_update(vp10_partition_tree, fc->partition_prob[0],
+ prob_diff_update(av1_partition_tree, fc->partition_prob[0],
counts->partition[0], PARTITION_TYPES, header_bc);
for (i = 1; i < PARTITION_CONTEXTS; ++i)
- prob_diff_update(vp10_ext_partition_tree, fc->partition_prob[i],
+ prob_diff_update(av1_ext_partition_tree, fc->partition_prob[i],
counts->partition[i], EXT_PARTITION_TYPES, header_bc);
#else
for (i = 0; i < PARTITION_CONTEXTS; ++i)
- prob_diff_update(vp10_partition_tree, fc->partition_prob[i],
+ prob_diff_update(av1_partition_tree, fc->partition_prob[i],
counts->partition[i], PARTITION_TYPES, header_bc);
#endif // CONFIG_EXT_PARTITION_TYPES
#if CONFIG_EXT_INTRA
for (i = 0; i < INTRA_FILTERS + 1; ++i)
- prob_diff_update(vp10_intra_filter_tree, fc->intra_filter_probs[i],
+ prob_diff_update(av1_intra_filter_tree, fc->intra_filter_probs[i],
counts->intra_filter[i], INTRA_FILTERS, header_bc);
#endif // CONFIG_EXT_INTRA
if (frame_is_intra_only(cm)) {
- vp10_copy(cm->kf_y_prob, vp10_kf_y_mode_prob);
+ av1_copy(cm->kf_y_prob, av1_kf_y_mode_prob);
for (i = 0; i < INTRA_MODES; ++i)
for (j = 0; j < INTRA_MODES; ++j)
- prob_diff_update(vp10_intra_mode_tree, cm->kf_y_prob[i][j],
+ prob_diff_update(av1_intra_mode_tree, cm->kf_y_prob[i][j],
counts->kf_y_mode[i][j], INTRA_MODES, header_bc);
} else {
#if CONFIG_REF_MV
update_inter_mode_probs(cm, header_bc, counts);
#else
for (i = 0; i < INTER_MODE_CONTEXTS; ++i)
- prob_diff_update(vp10_inter_mode_tree, cm->fc->inter_mode_probs[i],
+ prob_diff_update(av1_inter_mode_tree, cm->fc->inter_mode_probs[i],
counts->inter_mode[i], INTER_MODES, header_bc);
#endif
@@ -3317,32 +3310,32 @@
if (cm->reference_mode != COMPOUND_REFERENCE) {
for (i = 0; i < BLOCK_SIZE_GROUPS; i++) {
if (is_interintra_allowed_bsize_group(i)) {
- vp10_cond_prob_diff_update(header_bc, &fc->interintra_prob[i],
- cm->counts.interintra[i]);
+ av1_cond_prob_diff_update(header_bc, &fc->interintra_prob[i],
+ cm->counts.interintra[i]);
}
}
for (i = 0; i < BLOCK_SIZE_GROUPS; i++) {
prob_diff_update(
- vp10_interintra_mode_tree, cm->fc->interintra_mode_prob[i],
+ av1_interintra_mode_tree, cm->fc->interintra_mode_prob[i],
counts->interintra_mode[i], INTERINTRA_MODES, header_bc);
}
for (i = 0; i < BLOCK_SIZES; i++) {
if (is_interintra_allowed_bsize(i) && is_interintra_wedge_used(i))
- vp10_cond_prob_diff_update(header_bc, &fc->wedge_interintra_prob[i],
- cm->counts.wedge_interintra[i]);
+ av1_cond_prob_diff_update(header_bc, &fc->wedge_interintra_prob[i],
+ cm->counts.wedge_interintra[i]);
}
}
if (cm->reference_mode != SINGLE_REFERENCE) {
for (i = 0; i < BLOCK_SIZES; i++)
if (is_interinter_wedge_used(i))
- vp10_cond_prob_diff_update(header_bc, &fc->wedge_interinter_prob[i],
- cm->counts.wedge_interinter[i]);
+ av1_cond_prob_diff_update(header_bc, &fc->wedge_interinter_prob[i],
+ cm->counts.wedge_interinter[i]);
}
#endif // CONFIG_EXT_INTER
#if CONFIG_OBMC || CONFIG_WARPED_MOTION
for (i = BLOCK_8X8; i < BLOCK_SIZES; ++i)
- prob_diff_update(vp10_motvar_tree, fc->motvar_prob[i], counts->motvar[i],
+ prob_diff_update(av1_motvar_tree, fc->motvar_prob[i], counts->motvar[i],
MOTION_VARIATIONS, header_bc);
#endif // CONFIG_OBMC || CONFIG_WARPED_MOTION
@@ -3350,22 +3343,22 @@
update_switchable_interp_probs(cm, header_bc, counts);
for (i = 0; i < INTRA_INTER_CONTEXTS; i++)
- vp10_cond_prob_diff_update(header_bc, &fc->intra_inter_prob[i],
- counts->intra_inter[i]);
+ av1_cond_prob_diff_update(header_bc, &fc->intra_inter_prob[i],
+ counts->intra_inter[i]);
if (cpi->allow_comp_inter_inter) {
const int use_hybrid_pred = cm->reference_mode == REFERENCE_MODE_SELECT;
if (use_hybrid_pred)
for (i = 0; i < COMP_INTER_CONTEXTS; i++)
- vp10_cond_prob_diff_update(header_bc, &fc->comp_inter_prob[i],
- counts->comp_inter[i]);
+ av1_cond_prob_diff_update(header_bc, &fc->comp_inter_prob[i],
+ counts->comp_inter[i]);
}
if (cm->reference_mode != COMPOUND_REFERENCE) {
for (i = 0; i < REF_CONTEXTS; i++) {
for (j = 0; j < (SINGLE_REFS - 1); j++) {
- vp10_cond_prob_diff_update(header_bc, &fc->single_ref_prob[i][j],
- counts->single_ref[i][j]);
+ av1_cond_prob_diff_update(header_bc, &fc->single_ref_prob[i][j],
+ counts->single_ref[i][j]);
}
}
}
@@ -3374,31 +3367,31 @@
for (i = 0; i < REF_CONTEXTS; i++) {
#if CONFIG_EXT_REFS
for (j = 0; j < (FWD_REFS - 1); j++) {
- vp10_cond_prob_diff_update(header_bc, &fc->comp_ref_prob[i][j],
- counts->comp_ref[i][j]);
+ av1_cond_prob_diff_update(header_bc, &fc->comp_ref_prob[i][j],
+ counts->comp_ref[i][j]);
}
for (j = 0; j < (BWD_REFS - 1); j++) {
- vp10_cond_prob_diff_update(header_bc, &fc->comp_bwdref_prob[i][j],
- counts->comp_bwdref[i][j]);
+ av1_cond_prob_diff_update(header_bc, &fc->comp_bwdref_prob[i][j],
+ counts->comp_bwdref[i][j]);
}
#else
for (j = 0; j < (COMP_REFS - 1); j++) {
- vp10_cond_prob_diff_update(header_bc, &fc->comp_ref_prob[i][j],
- counts->comp_ref[i][j]);
+ av1_cond_prob_diff_update(header_bc, &fc->comp_ref_prob[i][j],
+ counts->comp_ref[i][j]);
}
#endif // CONFIG_EXT_REFS
}
}
for (i = 0; i < BLOCK_SIZE_GROUPS; ++i)
- prob_diff_update(vp10_intra_mode_tree, cm->fc->y_mode_prob[i],
+ prob_diff_update(av1_intra_mode_tree, cm->fc->y_mode_prob[i],
counts->y_mode[i], INTRA_MODES, header_bc);
- vp10_write_nmv_probs(cm, cm->allow_high_precision_mv, header_bc,
+ aom_write_nmv_probs(cm, cm->allow_high_precision_mv, header_bc,
#if CONFIG_REF_MV
- counts->mv);
+ counts->mv);
#else
- &counts->mv);
+ &counts->mv);
#endif
update_ext_tx_probs(cm, header_bc);
#if CONFIG_SUPERTX
@@ -3415,7 +3408,7 @@
assert(header_size <= 0xffff);
return header_size;
#else
- vpx_stop_encode(header_bc);
+ aom_stop_encode(header_bc);
assert(header_bc->pos <= 0xffff);
return header_bc->pos;
#endif // CONFIG_ANS
@@ -3451,7 +3444,7 @@
}
}
-static int remux_tiles(const VP10_COMMON *const cm, uint8_t *dst,
+static int remux_tiles(const AV1_COMMON *const cm, uint8_t *dst,
const uint32_t data_size, const uint32_t max_tile_size,
const uint32_t max_tile_col_size,
int *const tile_size_bytes,
@@ -3549,19 +3542,19 @@
}
}
-void vp10_pack_bitstream(VP10_COMP *const cpi, uint8_t *dst, size_t *size) {
+void av1_pack_bitstream(AV1_COMP *const cpi, uint8_t *dst, size_t *size) {
uint8_t *data = dst;
uint32_t compressed_header_size;
uint32_t uncompressed_header_size;
uint32_t data_size;
- struct vpx_write_bit_buffer wb = { data, 0 };
- struct vpx_write_bit_buffer saved_wb;
+ struct aom_write_bit_buffer wb = { data, 0 };
+ struct aom_write_bit_buffer saved_wb;
unsigned int max_tile_size;
unsigned int max_tile_col_size;
int tile_size_bytes;
int tile_col_size_bytes;
- VP10_COMMON *const cm = &cpi->common;
+ AV1_COMMON *const cm = &cpi->common;
const int have_tiles = cm->tile_cols * cm->tile_rows > 1;
#if CONFIG_BITSTREAM_DEBUG
@@ -3573,7 +3566,7 @@
#if CONFIG_EXT_REFS
if (cm->show_existing_frame) {
- *size = vpx_wb_bytes_written(&wb);
+ *size = aom_wb_bytes_written(&wb);
return;
}
#endif // CONFIG_EXT_REFS
@@ -3586,18 +3579,18 @@
// describing tile configuration.
#if CONFIG_EXT_TILE
// Number of bytes in tile column size - 1
- vpx_wb_write_literal(&wb, 0, 2);
+ aom_wb_write_literal(&wb, 0, 2);
#endif // CONFIG_EXT_TILE
// Number of bytes in tile size - 1
- vpx_wb_write_literal(&wb, 0, 2);
+ aom_wb_write_literal(&wb, 0, 2);
}
// Size of compressed header
- vpx_wb_write_literal(&wb, 0, 16);
+ aom_wb_write_literal(&wb, 0, 16);
- uncompressed_header_size = (uint32_t)vpx_wb_bytes_written(&wb);
+ uncompressed_header_size = (uint32_t)aom_wb_bytes_written(&wb);
data += uncompressed_header_size;
- vpx_clear_system_state();
+ aom_clear_system_state();
// Write the compressed header
compressed_header_size = write_compressed_header(cpi, data);
@@ -3618,14 +3611,14 @@
if (have_tiles) {
#if CONFIG_EXT_TILE
assert(tile_col_size_bytes >= 1 && tile_col_size_bytes <= 4);
- vpx_wb_write_literal(&saved_wb, tile_col_size_bytes - 1, 2);
+ aom_wb_write_literal(&saved_wb, tile_col_size_bytes - 1, 2);
#endif // CONFIG_EXT_TILE
assert(tile_size_bytes >= 1 && tile_size_bytes <= 4);
- vpx_wb_write_literal(&saved_wb, tile_size_bytes - 1, 2);
+ aom_wb_write_literal(&saved_wb, tile_size_bytes - 1, 2);
}
// TODO(jbb): Figure out what to do if compressed_header_size > 16 bits.
assert(compressed_header_size <= 0xffff);
- vpx_wb_write_literal(&saved_wb, compressed_header_size, 16);
+ aom_wb_write_literal(&saved_wb, compressed_header_size, 16);
*size = data - dst;
}