Port renaming changes from AOMedia
Cherry-Picked the following commits:
0defd8f Changed "WebM" to "AOMedia" & "webm" to "aomedia"
54e6676 Replace "VPx" by "AVx"
5082a36 Change "Vpx" to "Avx"
7df44f1 Replace "Vp9" w/ "Av1"
967f722 Remove kVp9CodecId
828f30c Change "Vp8" to "AOM"
030b5ff AUTHORS regenerated
2524cae Add ref-mv experimental flag
016762b Change copyright notice to AOMedia form
81e5526 Replace vp9 w/ av1
9b94565 Add missing files
fa8ca9f Change "vp9" to "av1"
ec838b7 Convert "vp8" to "aom"
80edfa0 Change "VP9" to "AV1"
d1a11fb Change "vp8" to "aom"
7b58251 Point to WebM test data
dd1a5c8 Replace "VP8" with "AOM"
ff00fc0 Change "VPX" to "AOM"
01dee0b Change "vp10" to "av1" in source code
cebe6f0 Convert "vpx" to "aom"
17b0567 rename vp10*.mk to av1_*.mk
fe5f8a8 rename files vp10_* to av1_*
Change-Id: I6fc3d18eb11fc171e46140c836ad5339cf6c9419
diff --git a/av1/decoder/bitreader.h b/av1/decoder/bitreader.h
index 75d6aa4..aaf1bb8 100644
--- a/av1/decoder/bitreader.h
+++ b/av1/decoder/bitreader.h
@@ -11,28 +11,28 @@
/* The purpose of this header is to provide compile time pluggable bit reader
* implementations with a common interface. */
-#ifndef VPX10_DECODER_BITREADER_H_
-#define VPX10_DECODER_BITREADER_H_
+#ifndef AOM10_DECODER_BITREADER_H_
+#define AOM10_DECODER_BITREADER_H_
-#include "./vpx_config.h"
+#include "./aom_config.h"
#if CONFIG_ANS
#include "av1/common/ans.h"
-#include "aom/vp8dx.h" // for vp10_decrypt_cb
-#define vp10_reader struct AnsDecoder
-#define vp10_reader_has_error ans_reader_has_error
-#define vp10_read uabs_read
-#define vp10_read_bit uabs_read_bit
-#define vp10_read_literal uabs_read_literal
-#define vp10_read_tree uabs_read_tree
+#include "aom/aomdx.h" // for av1_decrypt_cb
+#define aom_reader struct AnsDecoder
+#define aom_reader_has_error ans_reader_has_error
+#define aom_read uabs_read
+#define aom_read_bit uabs_read_bit
+#define aom_read_literal uabs_read_literal
+#define aom_read_tree uabs_read_tree
#else
#include "aom_dsp/bitreader.h"
-#define vp10_reader vpx_reader
-#define vp10_reader_has_error vpx_reader_has_error
-#define vp10_read vpx_read
-#define vp10_read_bit vpx_read_bit
-#define vp10_read_literal vpx_read_literal
-#define vp10_read_tree vpx_read_tree
+#define aom_reader aom_reader
+#define aom_reader_has_error aom_reader_has_error
+#define aom_read aom_read
+#define aom_read_bit aom_read_bit
+#define aom_read_literal aom_read_literal
+#define aom_read_tree aom_read_tree
#endif
-#endif // VPX10_DECODER_BITREADER_H_
+#endif // AOM10_DECODER_BITREADER_H_
diff --git a/av1/decoder/decodeframe.c b/av1/decoder/decodeframe.c
index 0f90c20..1f1f358 100644
--- a/av1/decoder/decodeframe.c
+++ b/av1/decoder/decodeframe.c
@@ -11,20 +11,19 @@
#include <assert.h>
#include <stdlib.h> // qsort()
-#include "./vp10_rtcd.h"
-#include "./vpx_config.h"
-#include "./vpx_dsp_rtcd.h"
-#include "./vpx_scale_rtcd.h"
-#include "./vpx_config.h"
+#include "./av1_rtcd.h"
+#include "./aom_config.h"
+#include "./aom_dsp_rtcd.h"
+#include "./aom_scale_rtcd.h"
#include "aom_dsp/bitreader_buffer.h"
#include "av1/decoder/bitreader.h"
-#include "aom_dsp/vpx_dsp_common.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_mem/aom_mem.h"
#include "aom_ports/mem.h"
#include "aom_ports/mem_ops.h"
-#include "aom_scale/vpx_scale.h"
-#include "aom_util/vpx_thread.h"
+#include "aom_scale/aom_scale.h"
+#include "aom_util/aom_thread.h"
#include "av1/common/alloccommon.h"
#if CONFIG_CLPF
@@ -51,9 +50,9 @@
#include "av1/decoder/decoder.h"
#include "av1/decoder/dsubexp.h"
-#define MAX_VPX_HEADER_SIZE 80
+#define MAX_AV1_HEADER_SIZE 80
-static int is_compound_reference_allowed(const VP10_COMMON *cm) {
+static int is_compound_reference_allowed(const AV1_COMMON *cm) {
int i;
if (frame_is_intra_only(cm)) return 0;
for (i = 1; i < INTER_REFS_PER_FRAME; ++i)
@@ -62,7 +61,7 @@
return 0;
}
-static void setup_compound_reference_mode(VP10_COMMON *cm) {
+static void setup_compound_reference_mode(AV1_COMMON *cm) {
#if CONFIG_EXT_REFS
cm->comp_fwd_ref[0] = LAST_FRAME;
cm->comp_fwd_ref[1] = LAST2_FRAME;
@@ -94,51 +93,51 @@
return len != 0 && len <= (size_t)(end - start);
}
-static int decode_unsigned_max(struct vpx_read_bit_buffer *rb, int max) {
- const int data = vpx_rb_read_literal(rb, get_unsigned_bits(max));
+static int decode_unsigned_max(struct aom_read_bit_buffer *rb, int max) {
+ const int data = aom_rb_read_literal(rb, get_unsigned_bits(max));
return data > max ? max : data;
}
-static TX_MODE read_tx_mode(struct vpx_read_bit_buffer *rb) {
- return vpx_rb_read_bit(rb) ? TX_MODE_SELECT : vpx_rb_read_literal(rb, 2);
+static TX_MODE read_tx_mode(struct aom_read_bit_buffer *rb) {
+ return aom_rb_read_bit(rb) ? TX_MODE_SELECT : aom_rb_read_literal(rb, 2);
}
-static void read_switchable_interp_probs(FRAME_CONTEXT *fc, vp10_reader *r) {
+static void read_switchable_interp_probs(FRAME_CONTEXT *fc, aom_reader *r) {
int i, j;
for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j)
for (i = 0; i < SWITCHABLE_FILTERS - 1; ++i)
- vp10_diff_update_prob(r, &fc->switchable_interp_prob[j][i]);
+ av1_diff_update_prob(r, &fc->switchable_interp_prob[j][i]);
}
-static void read_inter_mode_probs(FRAME_CONTEXT *fc, vp10_reader *r) {
+static void read_inter_mode_probs(FRAME_CONTEXT *fc, aom_reader *r) {
int i;
#if CONFIG_REF_MV
for (i = 0; i < NEWMV_MODE_CONTEXTS; ++i)
- vp10_diff_update_prob(r, &fc->newmv_prob[i]);
+ av1_diff_update_prob(r, &fc->newmv_prob[i]);
for (i = 0; i < ZEROMV_MODE_CONTEXTS; ++i)
- vp10_diff_update_prob(r, &fc->zeromv_prob[i]);
+ av1_diff_update_prob(r, &fc->zeromv_prob[i]);
for (i = 0; i < REFMV_MODE_CONTEXTS; ++i)
- vp10_diff_update_prob(r, &fc->refmv_prob[i]);
+ av1_diff_update_prob(r, &fc->refmv_prob[i]);
for (i = 0; i < DRL_MODE_CONTEXTS; ++i)
- vp10_diff_update_prob(r, &fc->drl_prob[i]);
+ av1_diff_update_prob(r, &fc->drl_prob[i]);
#if CONFIG_EXT_INTER
- vp10_diff_update_prob(r, &fc->new2mv_prob);
+ av1_diff_update_prob(r, &fc->new2mv_prob);
#endif // CONFIG_EXT_INTER
#else
int j;
for (i = 0; i < INTER_MODE_CONTEXTS; ++i)
for (j = 0; j < INTER_MODES - 1; ++j)
- vp10_diff_update_prob(r, &fc->inter_mode_probs[i][j]);
+ av1_diff_update_prob(r, &fc->inter_mode_probs[i][j]);
#endif
}
#if CONFIG_EXT_INTER
-static void read_inter_compound_mode_probs(FRAME_CONTEXT *fc, vp10_reader *r) {
+static void read_inter_compound_mode_probs(FRAME_CONTEXT *fc, aom_reader *r) {
int i, j;
- if (vp10_read(r, GROUP_DIFF_UPDATE_PROB)) {
+ if (aom_read(r, GROUP_DIFF_UPDATE_PROB)) {
for (j = 0; j < INTER_MODE_CONTEXTS; ++j) {
for (i = 0; i < INTER_COMPOUND_MODES - 1; ++i) {
- vp10_diff_update_prob(r, &fc->inter_compound_mode_probs[j][i]);
+ av1_diff_update_prob(r, &fc->inter_compound_mode_probs[j][i]);
}
}
}
@@ -146,28 +145,28 @@
#endif // CONFIG_EXT_INTER
static REFERENCE_MODE read_frame_reference_mode(
- const VP10_COMMON *cm, struct vpx_read_bit_buffer *rb) {
+ const AV1_COMMON *cm, struct aom_read_bit_buffer *rb) {
if (is_compound_reference_allowed(cm)) {
- return vpx_rb_read_bit(rb)
+ return aom_rb_read_bit(rb)
? REFERENCE_MODE_SELECT
- : (vpx_rb_read_bit(rb) ? COMPOUND_REFERENCE : SINGLE_REFERENCE);
+ : (aom_rb_read_bit(rb) ? COMPOUND_REFERENCE : SINGLE_REFERENCE);
} else {
return SINGLE_REFERENCE;
}
}
-static void read_frame_reference_mode_probs(VP10_COMMON *cm, vp10_reader *r) {
+static void read_frame_reference_mode_probs(AV1_COMMON *cm, aom_reader *r) {
FRAME_CONTEXT *const fc = cm->fc;
int i, j;
if (cm->reference_mode == REFERENCE_MODE_SELECT)
for (i = 0; i < COMP_INTER_CONTEXTS; ++i)
- vp10_diff_update_prob(r, &fc->comp_inter_prob[i]);
+ av1_diff_update_prob(r, &fc->comp_inter_prob[i]);
if (cm->reference_mode != COMPOUND_REFERENCE) {
for (i = 0; i < REF_CONTEXTS; ++i) {
for (j = 0; j < (SINGLE_REFS - 1); ++j) {
- vp10_diff_update_prob(r, &fc->single_ref_prob[i][j]);
+ av1_diff_update_prob(r, &fc->single_ref_prob[i][j]);
}
}
}
@@ -176,29 +175,29 @@
for (i = 0; i < REF_CONTEXTS; ++i) {
#if CONFIG_EXT_REFS
for (j = 0; j < (FWD_REFS - 1); ++j)
- vp10_diff_update_prob(r, &fc->comp_ref_prob[i][j]);
+ av1_diff_update_prob(r, &fc->comp_ref_prob[i][j]);
for (j = 0; j < (BWD_REFS - 1); ++j)
- vp10_diff_update_prob(r, &fc->comp_bwdref_prob[i][j]);
+ av1_diff_update_prob(r, &fc->comp_bwdref_prob[i][j]);
#else
for (j = 0; j < (COMP_REFS - 1); ++j)
- vp10_diff_update_prob(r, &fc->comp_ref_prob[i][j]);
+ av1_diff_update_prob(r, &fc->comp_ref_prob[i][j]);
#endif // CONFIG_EXT_REFS
}
}
}
-static void update_mv_probs(vpx_prob *p, int n, vp10_reader *r) {
+static void update_mv_probs(aom_prob *p, int n, aom_reader *r) {
int i;
- for (i = 0; i < n; ++i) vp10_diff_update_prob(r, &p[i]);
+ for (i = 0; i < n; ++i) av1_diff_update_prob(r, &p[i]);
}
-static void read_mv_probs(nmv_context *ctx, int allow_hp, vp10_reader *r) {
+static void read_mv_probs(nmv_context *ctx, int allow_hp, aom_reader *r) {
int i, j;
update_mv_probs(ctx->joints, MV_JOINTS - 1, r);
#if CONFIG_REF_MV
- vp10_diff_update_prob(r, &ctx->zero_rmv);
+ av1_diff_update_prob(r, &ctx->zero_rmv);
#endif
for (i = 0; i < 2; ++i) {
@@ -238,16 +237,16 @@
inv_txfm_param.eob = eob;
inv_txfm_param.lossless = xd->lossless[xd->mi[0]->mbmi.segment_id];
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
inv_txfm_param.bd = xd->bd;
highbd_inv_txfm_add(dqcoeff, dst, stride, &inv_txfm_param);
} else {
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
inv_txfm_add(dqcoeff, dst, stride, &inv_txfm_param);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
if (eob == 1) {
dqcoeff[0] = 0;
@@ -272,7 +271,7 @@
#if CONFIG_ANS
struct AnsDecoder *const r,
#else
- vp10_reader *r,
+ aom_reader *r,
#endif // CONFIG_ANS
MB_MODE_INFO *const mbmi,
int plane, int row, int col,
@@ -287,22 +286,21 @@
if (mbmi->sb_type < BLOCK_8X8)
if (plane == 0) mode = xd->mi[0]->bmi[(row << 1) + col].as_mode;
- vp10_predict_intra_block(xd, pd->n4_wl, pd->n4_hl, tx_size, mode, dst,
- pd->dst.stride, dst, pd->dst.stride, col, row,
- plane);
+ av1_predict_intra_block(xd, pd->n4_wl, pd->n4_hl, tx_size, mode, dst,
+ pd->dst.stride, dst, pd->dst.stride, col, row, plane);
if (!mbmi->skip) {
TX_TYPE tx_type = get_tx_type(plane_type, xd, block_idx, tx_size);
const scan_order *sc = get_scan(tx_size, tx_type, 0);
- const int eob = vp10_decode_block_tokens(xd, plane, sc, col, row, tx_size,
- tx_type, r, mbmi->segment_id);
+ const int eob = av1_decode_block_tokens(xd, plane, sc, col, row, tx_size,
+ tx_type, r, mbmi->segment_id);
inverse_transform_block(xd, plane, tx_type, tx_size, dst, pd->dst.stride,
eob);
}
}
#if CONFIG_VAR_TX
-static void decode_reconstruct_tx(MACROBLOCKD *const xd, vp10_reader *r,
+static void decode_reconstruct_tx(MACROBLOCKD *const xd, aom_reader *r,
MB_MODE_INFO *const mbmi, int plane,
BLOCK_SIZE plane_bsize, int block,
int blk_row, int blk_col, TX_SIZE tx_size,
@@ -330,8 +328,8 @@
TX_TYPE tx_type = get_tx_type(plane_type, xd, block, plane_tx_size);
const scan_order *sc = get_scan(plane_tx_size, tx_type, 1);
const int eob =
- vp10_decode_block_tokens(xd, plane, sc, blk_col, blk_row, plane_tx_size,
- tx_type, r, mbmi->segment_id);
+ av1_decode_block_tokens(xd, plane, sc, blk_col, blk_row, plane_tx_size,
+ tx_type, r, mbmi->segment_id);
inverse_transform_block(
xd, plane, tx_type, plane_tx_size,
&pd->dst.buf[4 * blk_row * pd->dst.stride + 4 * blk_col],
@@ -363,7 +361,7 @@
#if CONFIG_ANS
struct AnsDecoder *const r,
#else
- vp10_reader *r,
+ aom_reader *r,
#endif
int segment_id, int plane, int row, int col,
TX_SIZE tx_size) {
@@ -372,8 +370,8 @@
int block_idx = (row << 1) + col;
TX_TYPE tx_type = get_tx_type(plane_type, xd, block_idx, tx_size);
const scan_order *sc = get_scan(tx_size, tx_type, 1);
- const int eob = vp10_decode_block_tokens(xd, plane, sc, col, row, tx_size,
- tx_type, r, segment_id);
+ const int eob = av1_decode_block_tokens(xd, plane, sc, col, row, tx_size,
+ tx_type, r, segment_id);
inverse_transform_block(xd, plane, tx_type, tx_size,
&pd->dst.buf[4 * row * pd->dst.stride + 4 * col],
@@ -385,8 +383,8 @@
static INLINE TX_SIZE dec_get_uv_tx_size(const MB_MODE_INFO *mbmi, int n4_wl,
int n4_hl) {
// get minimum log2 num4x4s dimension
- const int x = VPXMIN(n4_wl, n4_hl);
- return VPXMIN(txsize_sqr_map[mbmi->tx_size], x);
+ const int x = AOMMIN(n4_wl, n4_hl);
+ return AOMMIN(txsize_sqr_map[mbmi->tx_size], x);
}
static INLINE void dec_reset_skip_context(MACROBLOCKD *xd) {
@@ -409,7 +407,7 @@
}
}
-static MB_MODE_INFO *set_offsets(VP10_COMMON *const cm, MACROBLOCKD *const xd,
+static MB_MODE_INFO *set_offsets(AV1_COMMON *const cm, MACROBLOCKD *const xd,
BLOCK_SIZE bsize, int mi_row, int mi_col,
int bw, int bh, int x_mis, int y_mis, int bwl,
int bhl) {
@@ -439,12 +437,12 @@
// as they are always compared to values that are in 1/8th pel units
set_mi_row_col(xd, tile, mi_row, bh, mi_col, bw, cm->mi_rows, cm->mi_cols);
- vp10_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
+ av1_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
return &xd->mi[0]->mbmi;
}
#if CONFIG_SUPERTX
-static MB_MODE_INFO *set_offsets_extend(VP10_COMMON *const cm,
+static MB_MODE_INFO *set_offsets_extend(AV1_COMMON *const cm,
MACROBLOCKD *const xd,
const TileInfo *const tile,
BLOCK_SIZE bsize_pred, int mi_row_pred,
@@ -471,10 +469,9 @@
return &xd->mi[0]->mbmi;
}
-static MB_MODE_INFO *set_mb_offsets(VP10_COMMON *const cm,
- MACROBLOCKD *const xd, BLOCK_SIZE bsize,
- int mi_row, int mi_col, int bw, int bh,
- int x_mis, int y_mis) {
+static MB_MODE_INFO *set_mb_offsets(AV1_COMMON *const cm, MACROBLOCKD *const xd,
+ BLOCK_SIZE bsize, int mi_row, int mi_col,
+ int bw, int bh, int x_mis, int y_mis) {
const int offset = mi_row * cm->mi_stride + mi_col;
const TileInfo *const tile = &xd->tile;
int x, y;
@@ -489,7 +486,7 @@
return &xd->mi[0]->mbmi;
}
-static void set_offsets_topblock(VP10_COMMON *const cm, MACROBLOCKD *const xd,
+static void set_offsets_topblock(AV1_COMMON *const cm, MACROBLOCKD *const xd,
const TileInfo *const tile, BLOCK_SIZE bsize,
int mi_row, int mi_col) {
const int bw = num_8x8_blocks_wide_lookup[bsize];
@@ -505,16 +502,16 @@
set_mi_row_col(xd, tile, mi_row, bh, mi_col, bw, cm->mi_rows, cm->mi_cols);
- vp10_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
+ av1_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
}
-static void set_param_topblock(VP10_COMMON *const cm, MACROBLOCKD *const xd,
+static void set_param_topblock(AV1_COMMON *const cm, MACROBLOCKD *const xd,
BLOCK_SIZE bsize, int mi_row, int mi_col,
int txfm, int skip) {
const int bw = num_8x8_blocks_wide_lookup[bsize];
const int bh = num_8x8_blocks_high_lookup[bsize];
- const int x_mis = VPXMIN(bw, cm->mi_cols - mi_col);
- const int y_mis = VPXMIN(bh, cm->mi_rows - mi_row);
+ const int x_mis = AOMMIN(bw, cm->mi_cols - mi_col);
+ const int y_mis = AOMMIN(bh, cm->mi_rows - mi_row);
const int offset = mi_row * cm->mi_stride + mi_col;
int x, y;
@@ -534,21 +531,21 @@
#endif
}
-static void set_ref(VP10_COMMON *const cm, MACROBLOCKD *const xd, int idx,
+static void set_ref(AV1_COMMON *const cm, MACROBLOCKD *const xd, int idx,
int mi_row, int mi_col) {
MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
RefBuffer *ref_buffer = &cm->frame_refs[mbmi->ref_frame[idx] - LAST_FRAME];
xd->block_refs[idx] = ref_buffer;
- if (!vp10_is_valid_scale(&ref_buffer->sf))
- vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
+ if (!av1_is_valid_scale(&ref_buffer->sf))
+ aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM,
"Invalid scale factors");
- vp10_setup_pre_planes(xd, idx, ref_buffer->buf, mi_row, mi_col,
- &ref_buffer->sf);
+ av1_setup_pre_planes(xd, idx, ref_buffer->buf, mi_row, mi_col,
+ &ref_buffer->sf);
xd->corrupted |= ref_buffer->buf->corrupted;
}
static void dec_predict_b_extend(
- VP10Decoder *const pbi, MACROBLOCKD *const xd, const TileInfo *const tile,
+ AV1Decoder *const pbi, MACROBLOCKD *const xd, const TileInfo *const tile,
int block, int mi_row_ori, int mi_col_ori, int mi_row_pred, int mi_col_pred,
int mi_row_top, int mi_col_top, uint8_t *dst_buf[3], int dst_stride[3],
BLOCK_SIZE bsize_top, BLOCK_SIZE bsize_pred, int b_sub8x8, int bextend) {
@@ -564,7 +561,7 @@
const int mi_width_top = num_8x8_blocks_wide_lookup[bsize_top];
const int mi_height_top = num_8x8_blocks_high_lookup[bsize_top];
MB_MODE_INFO *mbmi;
- VP10_COMMON *const cm = &pbi->common;
+ AV1_COMMON *const cm = &pbi->common;
if (mi_row_pred < mi_row_top || mi_col_pred < mi_col_top ||
mi_row_pred >= mi_row_top + mi_height_top ||
@@ -596,21 +593,21 @@
(c >> xd->plane[2].subsampling_x);
if (!b_sub8x8)
- vp10_build_inter_predictors_sb_extend(xd,
+ av1_build_inter_predictors_sb_extend(xd,
#if CONFIG_EXT_INTER
- mi_row_ori, mi_col_ori,
+ mi_row_ori, mi_col_ori,
#endif // CONFIG_EXT_INTER
- mi_row_pred, mi_col_pred, bsize_pred);
+ mi_row_pred, mi_col_pred, bsize_pred);
else
- vp10_build_inter_predictors_sb_sub8x8_extend(xd,
+ av1_build_inter_predictors_sb_sub8x8_extend(xd,
#if CONFIG_EXT_INTER
- mi_row_ori, mi_col_ori,
+ mi_row_ori, mi_col_ori,
#endif // CONFIG_EXT_INTER
- mi_row_pred, mi_col_pred,
- bsize_pred, block);
+ mi_row_pred, mi_col_pred,
+ bsize_pred, block);
}
-static void dec_extend_dir(VP10Decoder *const pbi, MACROBLOCKD *const xd,
+static void dec_extend_dir(AV1Decoder *const pbi, MACROBLOCKD *const xd,
const TileInfo *const tile, int block,
BLOCK_SIZE bsize, BLOCK_SIZE top_bsize, int mi_row,
int mi_col, int mi_row_top, int mi_col_top,
@@ -678,7 +675,7 @@
}
}
-static void dec_extend_all(VP10Decoder *const pbi, MACROBLOCKD *const xd,
+static void dec_extend_all(AV1Decoder *const pbi, MACROBLOCKD *const xd,
const TileInfo *const tile, int block,
BLOCK_SIZE bsize, BLOCK_SIZE top_bsize, int mi_row,
int mi_col, int mi_row_top, int mi_col_top,
@@ -701,13 +698,12 @@
mi_row_top, mi_col_top, dst_buf, dst_stride, 7);
}
-static void dec_predict_sb_complex(VP10Decoder *const pbi,
- MACROBLOCKD *const xd,
+static void dec_predict_sb_complex(AV1Decoder *const pbi, MACROBLOCKD *const xd,
const TileInfo *const tile, int mi_row,
int mi_col, int mi_row_top, int mi_col_top,
BLOCK_SIZE bsize, BLOCK_SIZE top_bsize,
uint8_t *dst_buf[3], int dst_stride[3]) {
- const VP10_COMMON *const cm = &pbi->common;
+ const AV1_COMMON *const cm = &pbi->common;
const int hbs = num_8x8_blocks_wide_lookup[bsize] / 2;
const PARTITION_TYPE partition = get_partition(cm, mi_row, mi_col, bsize);
const BLOCK_SIZE subsize = get_subsize(bsize, partition);
@@ -725,7 +721,7 @@
int dst_stride2[3] = { MAX_TX_SIZE, MAX_TX_SIZE, MAX_TX_SIZE };
int dst_stride3[3] = { MAX_TX_SIZE, MAX_TX_SIZE, MAX_TX_SIZE };
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
int len = sizeof(uint16_t);
dst_buf1[0] = CONVERT_TO_BYTEPTR(tmp_buf1);
@@ -748,7 +744,7 @@
dst_buf3[0] = tmp_buf3;
dst_buf3[1] = tmp_buf3 + MAX_TX_SQUARE;
dst_buf3[2] = tmp_buf3 + 2 * MAX_TX_SQUARE;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
}
#endif
@@ -793,7 +789,7 @@
// weighted average to smooth the boundary
xd->plane[0].dst.buf = dst_buf[0];
xd->plane[0].dst.stride = dst_stride[0];
- vp10_build_masked_inter_predictor_complex(
+ av1_build_masked_inter_predictor_complex(
xd, dst_buf[0], dst_stride[0], dst_buf1[0], dst_stride1[0], mi_row,
mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_HORZ,
0);
@@ -827,7 +823,7 @@
for (i = 0; i < MAX_MB_PLANE; i++) {
xd->plane[i].dst.buf = dst_buf[i];
xd->plane[i].dst.stride = dst_stride[i];
- vp10_build_masked_inter_predictor_complex(
+ av1_build_masked_inter_predictor_complex(
xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i],
mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize,
PARTITION_HORZ, i);
@@ -856,7 +852,7 @@
// Smooth
xd->plane[0].dst.buf = dst_buf[0];
xd->plane[0].dst.stride = dst_stride[0];
- vp10_build_masked_inter_predictor_complex(
+ av1_build_masked_inter_predictor_complex(
xd, dst_buf[0], dst_stride[0], dst_buf1[0], dst_stride1[0], mi_row,
mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_VERT,
0);
@@ -890,7 +886,7 @@
for (i = 0; i < MAX_MB_PLANE; i++) {
xd->plane[i].dst.buf = dst_buf[i];
xd->plane[i].dst.stride = dst_stride[i];
- vp10_build_masked_inter_predictor_complex(
+ av1_build_masked_inter_predictor_complex(
xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i],
mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize,
PARTITION_VERT, i);
@@ -943,22 +939,22 @@
if (bsize == BLOCK_8X8 && i != 0)
continue; // Skip <4x4 chroma smoothing
if (mi_row < cm->mi_rows && mi_col + hbs < cm->mi_cols) {
- vp10_build_masked_inter_predictor_complex(
+ av1_build_masked_inter_predictor_complex(
xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i],
mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize,
PARTITION_VERT, i);
if (mi_row + hbs < cm->mi_rows) {
- vp10_build_masked_inter_predictor_complex(
+ av1_build_masked_inter_predictor_complex(
xd, dst_buf2[i], dst_stride2[i], dst_buf3[i], dst_stride3[i],
mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize,
PARTITION_VERT, i);
- vp10_build_masked_inter_predictor_complex(
+ av1_build_masked_inter_predictor_complex(
xd, dst_buf[i], dst_stride[i], dst_buf2[i], dst_stride2[i],
mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize,
PARTITION_HORZ, i);
}
} else if (mi_row + hbs < cm->mi_rows && mi_col < cm->mi_cols) {
- vp10_build_masked_inter_predictor_complex(
+ av1_build_masked_inter_predictor_complex(
xd, dst_buf[i], dst_stride[i], dst_buf2[i], dst_stride2[i],
mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize,
PARTITION_HORZ, i);
@@ -993,13 +989,13 @@
for (i = 0; i < MAX_MB_PLANE; i++) {
xd->plane[i].dst.buf = dst_buf[i];
xd->plane[i].dst.stride = dst_stride[i];
- vp10_build_masked_inter_predictor_complex(
+ av1_build_masked_inter_predictor_complex(
xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i], mi_row,
mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_VERT,
i);
}
for (i = 0; i < MAX_MB_PLANE; i++) {
- vp10_build_masked_inter_predictor_complex(
+ av1_build_masked_inter_predictor_complex(
xd, dst_buf[i], dst_stride[i], dst_buf2[i], dst_stride2[i], mi_row,
mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_HORZ,
i);
@@ -1034,13 +1030,13 @@
for (i = 0; i < MAX_MB_PLANE; i++) {
xd->plane[i].dst.buf = dst_buf[i];
xd->plane[i].dst.stride = dst_stride[i];
- vp10_build_masked_inter_predictor_complex(
+ av1_build_masked_inter_predictor_complex(
xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i], mi_row,
mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_HORZ,
i);
}
for (i = 0; i < MAX_MB_PLANE; i++) {
- vp10_build_masked_inter_predictor_complex(
+ av1_build_masked_inter_predictor_complex(
xd, dst_buf[i], dst_stride[i], dst_buf2[i], dst_stride2[i], mi_row,
mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_VERT,
i);
@@ -1073,7 +1069,7 @@
for (i = 0; i < MAX_MB_PLANE; i++) {
xd->plane[i].dst.buf = dst_buf1[i];
xd->plane[i].dst.stride = dst_stride1[i];
- vp10_build_masked_inter_predictor_complex(
+ av1_build_masked_inter_predictor_complex(
xd, dst_buf1[i], dst_stride1[i], dst_buf2[i], dst_stride2[i],
mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize,
PARTITION_VERT, i);
@@ -1081,7 +1077,7 @@
for (i = 0; i < MAX_MB_PLANE; i++) {
xd->plane[i].dst.buf = dst_buf[i];
xd->plane[i].dst.stride = dst_stride[i];
- vp10_build_masked_inter_predictor_complex(
+ av1_build_masked_inter_predictor_complex(
xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i], mi_row,
mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_HORZ,
i);
@@ -1114,7 +1110,7 @@
for (i = 0; i < MAX_MB_PLANE; i++) {
xd->plane[i].dst.buf = dst_buf1[i];
xd->plane[i].dst.stride = dst_stride1[i];
- vp10_build_masked_inter_predictor_complex(
+ av1_build_masked_inter_predictor_complex(
xd, dst_buf1[i], dst_stride1[i], dst_buf2[i], dst_stride2[i],
mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize,
PARTITION_HORZ, i);
@@ -1122,7 +1118,7 @@
for (i = 0; i < MAX_MB_PLANE; i++) {
xd->plane[i].dst.buf = dst_buf[i];
xd->plane[i].dst.stride = dst_stride[i];
- vp10_build_masked_inter_predictor_complex(
+ av1_build_masked_inter_predictor_complex(
xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i], mi_row,
mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_VERT,
i);
@@ -1133,14 +1129,13 @@
}
}
-static void set_segment_id_supertx(const VP10_COMMON *const cm,
- const int mi_row, const int mi_col,
- const BLOCK_SIZE bsize) {
+static void set_segment_id_supertx(const AV1_COMMON *const cm, const int mi_row,
+ const int mi_col, const BLOCK_SIZE bsize) {
const struct segmentation *seg = &cm->seg;
const int miw =
- VPXMIN(num_8x8_blocks_wide_lookup[bsize], cm->mi_cols - mi_col);
+ AOMMIN(num_8x8_blocks_wide_lookup[bsize], cm->mi_cols - mi_col);
const int mih =
- VPXMIN(num_8x8_blocks_high_lookup[bsize], cm->mi_rows - mi_row);
+ AOMMIN(num_8x8_blocks_high_lookup[bsize], cm->mi_rows - mi_row);
const int mi_offset = mi_row * cm->mi_stride + mi_col;
MODE_INFO **const mip = cm->mi_grid_visible + mi_offset;
int r, c;
@@ -1153,7 +1148,7 @@
for (r = 0; r < mih; r++)
for (c = 0; c < miw; c++)
seg_id_supertx =
- VPXMIN(mip[r * cm->mi_stride + c]->mbmi.segment_id, seg_id_supertx);
+ AOMMIN(mip[r * cm->mi_stride + c]->mbmi.segment_id, seg_id_supertx);
assert(0 <= seg_id_supertx && seg_id_supertx < MAX_SEGMENTS);
}
@@ -1164,21 +1159,21 @@
}
#endif // CONFIG_SUPERTX
-static void decode_block(VP10Decoder *const pbi, MACROBLOCKD *const xd,
+static void decode_block(AV1Decoder *const pbi, MACROBLOCKD *const xd,
#if CONFIG_SUPERTX
int supertx_enabled,
#endif // CONFIG_SUPERTX
- int mi_row, int mi_col, vp10_reader *r,
+ int mi_row, int mi_col, aom_reader *r,
#if CONFIG_EXT_PARTITION_TYPES
PARTITION_TYPE partition,
#endif // CONFIG_EXT_PARTITION_TYPES
BLOCK_SIZE bsize, int bwl, int bhl) {
- VP10_COMMON *const cm = &pbi->common;
+ AV1_COMMON *const cm = &pbi->common;
const int less8x8 = bsize < BLOCK_8X8;
const int bw = 1 << (bwl - 1);
const int bh = 1 << (bhl - 1);
- const int x_mis = VPXMIN(bw, cm->mi_cols - mi_col);
- const int y_mis = VPXMIN(bh, cm->mi_rows - mi_row);
+ const int x_mis = AOMMIN(bw, cm->mi_cols - mi_col);
+ const int y_mis = AOMMIN(bh, cm->mi_rows - mi_row);
#if CONFIG_SUPERTX
MB_MODE_INFO *mbmi;
@@ -1191,22 +1186,21 @@
#if CONFIG_EXT_PARTITION_TYPES
xd->mi[0]->mbmi.partition = partition;
#endif
- vp10_read_mode_info(pbi, xd, supertx_enabled, mi_row, mi_col, r, x_mis,
- y_mis);
+ av1_read_mode_info(pbi, xd, supertx_enabled, mi_row, mi_col, r, x_mis, y_mis);
#else
MB_MODE_INFO *mbmi = set_offsets(cm, xd, bsize, mi_row, mi_col, bw, bh, x_mis,
y_mis, bwl, bhl);
#if CONFIG_EXT_PARTITION_TYPES
xd->mi[0]->mbmi.partition = partition;
#endif
- vp10_read_mode_info(pbi, xd, mi_row, mi_col, r, x_mis, y_mis);
+ av1_read_mode_info(pbi, xd, mi_row, mi_col, r, x_mis, y_mis);
#endif // CONFIG_SUPERTX
if (bsize >= BLOCK_8X8 && (cm->subsampling_x || cm->subsampling_y)) {
const BLOCK_SIZE uv_subsize =
ss_size_lookup[bsize][cm->subsampling_x][cm->subsampling_y];
if (uv_subsize == BLOCK_INVALID)
- vpx_internal_error(xd->error_info, VPX_CODEC_CORRUPT_FRAME,
+ aom_internal_error(xd->error_info, AOM_CODEC_CORRUPT_FRAME,
"Invalid block size.");
}
@@ -1214,7 +1208,7 @@
mbmi->segment_id_supertx = MAX_SEGMENTS;
if (supertx_enabled) {
- xd->corrupted |= vp10_reader_has_error(r);
+ xd->corrupted |= aom_reader_has_error(r);
return;
}
#endif // CONFIG_SUPERTX
@@ -1226,7 +1220,7 @@
int plane;
for (plane = 0; plane <= 1; ++plane) {
if (mbmi->palette_mode_info.palette_size[plane])
- vp10_decode_palette_tokens(xd, plane, r);
+ av1_decode_palette_tokens(xd, plane, r);
}
for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
const struct macroblockd_plane *const pd = &xd->plane[plane];
@@ -1254,17 +1248,16 @@
}
} else {
// Prediction
- vp10_build_inter_predictors_sb(xd, mi_row, mi_col,
- VPXMAX(bsize, BLOCK_8X8));
+ av1_build_inter_predictors_sb(xd, mi_row, mi_col, AOMMAX(bsize, BLOCK_8X8));
#if CONFIG_OBMC
if (mbmi->motion_variation == OBMC_CAUSAL) {
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
DECLARE_ALIGNED(16, uint8_t, tmp_buf1[2 * MAX_MB_PLANE * MAX_SB_SQUARE]);
DECLARE_ALIGNED(16, uint8_t, tmp_buf2[2 * MAX_MB_PLANE * MAX_SB_SQUARE]);
#else
DECLARE_ALIGNED(16, uint8_t, tmp_buf1[MAX_MB_PLANE * MAX_SB_SQUARE]);
DECLARE_ALIGNED(16, uint8_t, tmp_buf2[MAX_MB_PLANE * MAX_SB_SQUARE]);
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
uint8_t *dst_buf1[MAX_MB_PLANE], *dst_buf2[MAX_MB_PLANE];
int dst_width1[MAX_MB_PLANE] = { MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE };
int dst_width2[MAX_MB_PLANE] = { MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE };
@@ -1274,7 +1267,7 @@
int dst_stride2[MAX_MB_PLANE] = { MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE };
assert(mbmi->sb_type >= BLOCK_8X8);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
int len = sizeof(uint16_t);
dst_buf1[0] = CONVERT_TO_BYTEPTR(tmp_buf1);
@@ -1284,25 +1277,23 @@
dst_buf2[1] = CONVERT_TO_BYTEPTR(tmp_buf2 + MAX_SB_SQUARE * len);
dst_buf2[2] = CONVERT_TO_BYTEPTR(tmp_buf2 + MAX_SB_SQUARE * 2 * len);
} else {
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
dst_buf1[0] = tmp_buf1;
dst_buf1[1] = tmp_buf1 + MAX_SB_SQUARE;
dst_buf1[2] = tmp_buf1 + MAX_SB_SQUARE * 2;
dst_buf2[0] = tmp_buf2;
dst_buf2[1] = tmp_buf2 + MAX_SB_SQUARE;
dst_buf2[2] = tmp_buf2 + MAX_SB_SQUARE * 2;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
}
-#endif // CONFIG_VP9_HIGHBITDEPTH
- vp10_build_prediction_by_above_preds(cm, xd, mi_row, mi_col, dst_buf1,
- dst_width1, dst_height1,
- dst_stride1);
- vp10_build_prediction_by_left_preds(cm, xd, mi_row, mi_col, dst_buf2,
- dst_width2, dst_height2, dst_stride2);
- vp10_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row,
- mi_col);
- vp10_build_obmc_inter_prediction(cm, xd, mi_row, mi_col, dst_buf1,
- dst_stride1, dst_buf2, dst_stride2);
+#endif // CONFIG_AOM_HIGHBITDEPTH
+ av1_build_prediction_by_above_preds(cm, xd, mi_row, mi_col, dst_buf1,
+ dst_width1, dst_height1, dst_stride1);
+ av1_build_prediction_by_left_preds(cm, xd, mi_row, mi_col, dst_buf2,
+ dst_width2, dst_height2, dst_stride2);
+ av1_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
+ av1_build_obmc_inter_prediction(cm, xd, mi_row, mi_col, dst_buf1,
+ dst_stride1, dst_buf2, dst_stride2);
}
#endif // CONFIG_OBMC
@@ -1319,7 +1310,7 @@
#if CONFIG_VAR_TX
// TODO(jingning): This can be simplified for decoder performance.
const BLOCK_SIZE plane_bsize =
- get_plane_block_size(VPXMAX(bsize, BLOCK_8X8), pd);
+ get_plane_block_size(AOMMAX(bsize, BLOCK_8X8), pd);
const TX_SIZE max_tx_size = max_txsize_lookup[plane_bsize];
int bw = num_4x4_blocks_wide_txsize_lookup[max_tx_size];
int bh = num_4x4_blocks_high_txsize_lookup[max_tx_size];
@@ -1385,7 +1376,7 @@
}
}
- xd->corrupted |= vp10_reader_has_error(r);
+ xd->corrupted |= aom_reader_has_error(r);
}
static INLINE int dec_partition_plane_context(const MACROBLOCKD *xd, int mi_row,
@@ -1416,31 +1407,31 @@
}
#endif // !CONFIG_EXT_PARTITION_TYPES
-static PARTITION_TYPE read_partition(VP10_COMMON *cm, MACROBLOCKD *xd,
- int mi_row, int mi_col, vp10_reader *r,
+static PARTITION_TYPE read_partition(AV1_COMMON *cm, MACROBLOCKD *xd,
+ int mi_row, int mi_col, aom_reader *r,
int has_rows, int has_cols,
#if CONFIG_EXT_PARTITION_TYPES
BLOCK_SIZE bsize,
#endif
int bsl) {
const int ctx = dec_partition_plane_context(xd, mi_row, mi_col, bsl);
- const vpx_prob *const probs = cm->fc->partition_prob[ctx];
+ const aom_prob *const probs = cm->fc->partition_prob[ctx];
FRAME_COUNTS *counts = xd->counts;
PARTITION_TYPE p;
if (has_rows && has_cols)
#if CONFIG_EXT_PARTITION_TYPES
if (bsize <= BLOCK_8X8)
- p = (PARTITION_TYPE)vp10_read_tree(r, vp10_partition_tree, probs);
+ p = (PARTITION_TYPE)aom_read_tree(r, av1_partition_tree, probs);
else
- p = (PARTITION_TYPE)vp10_read_tree(r, vp10_ext_partition_tree, probs);
+ p = (PARTITION_TYPE)aom_read_tree(r, av1_ext_partition_tree, probs);
#else
- p = (PARTITION_TYPE)vp10_read_tree(r, vp10_partition_tree, probs);
+ p = (PARTITION_TYPE)aom_read_tree(r, av1_partition_tree, probs);
#endif // CONFIG_EXT_PARTITION_TYPES
else if (!has_rows && has_cols)
- p = vp10_read(r, probs[1]) ? PARTITION_SPLIT : PARTITION_HORZ;
+ p = aom_read(r, probs[1]) ? PARTITION_SPLIT : PARTITION_HORZ;
else if (has_rows && !has_cols)
- p = vp10_read(r, probs[2]) ? PARTITION_SPLIT : PARTITION_VERT;
+ p = aom_read(r, probs[2]) ? PARTITION_SPLIT : PARTITION_VERT;
else
p = PARTITION_SPLIT;
@@ -1450,13 +1441,13 @@
}
#if CONFIG_SUPERTX
-static int read_skip(VP10_COMMON *cm, const MACROBLOCKD *xd, int segment_id,
- vp10_reader *r) {
+static int read_skip(AV1_COMMON *cm, const MACROBLOCKD *xd, int segment_id,
+ aom_reader *r) {
if (segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP)) {
return 1;
} else {
- const int ctx = vp10_get_skip_context(xd);
- const int skip = vp10_read(r, cm->fc->skip_probs[ctx]);
+ const int ctx = av1_get_skip_context(xd);
+ const int skip = aom_read(r, cm->fc->skip_probs[ctx]);
FRAME_COUNTS *counts = xd->counts;
if (counts) ++counts->skip[ctx][skip];
return skip;
@@ -1465,13 +1456,13 @@
#endif // CONFIG_SUPERTX
// TODO(slavarnway): eliminate bsize and subsize in future commits
-static void decode_partition(VP10Decoder *const pbi, MACROBLOCKD *const xd,
+static void decode_partition(AV1Decoder *const pbi, MACROBLOCKD *const xd,
#if CONFIG_SUPERTX
int supertx_enabled,
#endif
- int mi_row, int mi_col, vp10_reader *r,
+ int mi_row, int mi_col, aom_reader *r,
BLOCK_SIZE bsize, int n4x4_l2) {
- VP10_COMMON *const cm = &pbi->common;
+ AV1_COMMON *const cm = &pbi->common;
const int n8x8_l2 = n4x4_l2 - 1;
const int num_8x8_wh = 1 << n8x8_l2;
const int hbs = num_8x8_wh >> 1;
@@ -1503,7 +1494,7 @@
bsize <= MAX_SUPERTX_BLOCK_SIZE && !supertx_enabled && !xd->lossless[0]) {
const int supertx_context = partition_supertx_context_lookup[partition];
supertx_enabled =
- vp10_read(r, cm->fc->supertx_prob[supertx_context][supertx_size]);
+ aom_read(r, cm->fc->supertx_prob[supertx_context][supertx_size]);
if (xd->counts)
xd->counts->supertx[supertx_context][supertx_size][supertx_enabled]++;
#if CONFIG_VAR_TX
@@ -1704,21 +1695,21 @@
if (get_ext_tx_types(supertx_size, bsize, 1) > 1) {
int eset = get_ext_tx_set(supertx_size, bsize, 1);
if (eset > 0) {
- txfm = vp10_read_tree(r, vp10_ext_tx_inter_tree[eset],
- cm->fc->inter_ext_tx_prob[eset][supertx_size]);
+ txfm = aom_read_tree(r, av1_ext_tx_inter_tree[eset],
+ cm->fc->inter_ext_tx_prob[eset][supertx_size]);
if (xd->counts) ++xd->counts->inter_ext_tx[eset][supertx_size][txfm];
}
}
#else
if (supertx_size < TX_32X32) {
- txfm = vp10_read_tree(r, vp10_ext_tx_tree,
- cm->fc->inter_ext_tx_prob[supertx_size]);
+ txfm = aom_read_tree(r, av1_ext_tx_tree,
+ cm->fc->inter_ext_tx_prob[supertx_size]);
if (xd->counts) ++xd->counts->inter_ext_tx[supertx_size][txfm];
}
#endif // CONFIG_EXT_TX
}
- vp10_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
+ av1_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
for (i = 0; i < MAX_MB_PLANE; i++) {
dst_buf[i] = xd->plane[i].dst.buf;
dst_stride[i] = xd->plane[i].dst.stride;
@@ -1800,7 +1791,7 @@
if (bsize == BLOCK_64X64) {
if (cm->dering_level != 0 && !sb_all_skip(cm, mi_row, mi_col)) {
cm->mi_grid_visible[mi_row * cm->mi_stride + mi_col]->mbmi.dering_gain =
- vpx_read_literal(r, DERING_REFINEMENT_BITS);
+ aom_read_literal(r, DERING_REFINEMENT_BITS);
} else {
cm->mi_grid_visible[mi_row * cm->mi_stride + mi_col]->mbmi.dering_gain =
0;
@@ -1813,26 +1804,26 @@
#if !CONFIG_ANS
static void setup_bool_decoder(const uint8_t *data, const uint8_t *data_end,
const size_t read_size,
- struct vpx_internal_error_info *error_info,
- vp10_reader *r, vpx_decrypt_cb decrypt_cb,
+ struct aom_internal_error_info *error_info,
+ aom_reader *r, aom_decrypt_cb decrypt_cb,
void *decrypt_state) {
// Validate the calculated partition length. If the buffer
// described by the partition can't be fully read, then restrict
// it to the portion that can be (for EC mode) or throw an error.
if (!read_is_valid(data, read_size, data_end))
- vpx_internal_error(error_info, VPX_CODEC_CORRUPT_FRAME,
+ aom_internal_error(error_info, AOM_CODEC_CORRUPT_FRAME,
"Truncated packet or corrupt tile length");
- if (vpx_reader_init(r, data, read_size, decrypt_cb, decrypt_state))
- vpx_internal_error(error_info, VPX_CODEC_MEM_ERROR,
+ if (aom_reader_init(r, data, read_size, decrypt_cb, decrypt_state))
+ aom_internal_error(error_info, AOM_CODEC_MEM_ERROR,
"Failed to allocate bool decoder %d", 1);
}
#else
static void setup_token_decoder(const uint8_t *data, const uint8_t *data_end,
const size_t read_size,
- struct vpx_internal_error_info *error_info,
+ struct aom_internal_error_info *error_info,
struct AnsDecoder *const ans,
- vpx_decrypt_cb decrypt_cb,
+ aom_decrypt_cb decrypt_cb,
void *decrypt_state) {
(void)decrypt_cb;
(void)decrypt_state;
@@ -1840,104 +1831,103 @@
// described by the partition can't be fully read, then restrict
// it to the portion that can be (for EC mode) or throw an error.
if (!read_is_valid(data, read_size, data_end))
- vpx_internal_error(error_info, VPX_CODEC_CORRUPT_FRAME,
+ aom_internal_error(error_info, AOM_CODEC_CORRUPT_FRAME,
"Truncated packet or corrupt tile length");
if (read_size > INT_MAX || ans_read_init(ans, data, (int)read_size))
- vpx_internal_error(error_info, VPX_CODEC_MEM_ERROR,
+ aom_internal_error(error_info, AOM_CODEC_MEM_ERROR,
"Failed to allocate token decoder %d", 1);
}
#endif
-static void read_coef_probs_common(vp10_coeff_probs_model *coef_probs,
- vp10_reader *r) {
+static void read_coef_probs_common(av1_coeff_probs_model *coef_probs,
+ aom_reader *r) {
int i, j, k, l, m;
- if (vp10_read_bit(r))
+ if (aom_read_bit(r))
for (i = 0; i < PLANE_TYPES; ++i)
for (j = 0; j < REF_TYPES; ++j)
for (k = 0; k < COEF_BANDS; ++k)
for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l)
for (m = 0; m < UNCONSTRAINED_NODES; ++m)
- vp10_diff_update_prob(r, &coef_probs[i][j][k][l][m]);
+ av1_diff_update_prob(r, &coef_probs[i][j][k][l][m]);
}
-static void read_coef_probs(FRAME_CONTEXT *fc, TX_MODE tx_mode,
- vp10_reader *r) {
+static void read_coef_probs(FRAME_CONTEXT *fc, TX_MODE tx_mode, aom_reader *r) {
const TX_SIZE max_tx_size = tx_mode_to_biggest_tx_size[tx_mode];
TX_SIZE tx_size;
for (tx_size = TX_4X4; tx_size <= max_tx_size; ++tx_size)
read_coef_probs_common(fc->coef_probs[tx_size], r);
#if CONFIG_ANS
- vp10_coef_pareto_cdfs(fc);
+ av1_coef_pareto_cdfs(fc);
#endif // CONFIG_ANS
}
-static void setup_segmentation(VP10_COMMON *const cm,
- struct vpx_read_bit_buffer *rb) {
+static void setup_segmentation(AV1_COMMON *const cm,
+ struct aom_read_bit_buffer *rb) {
struct segmentation *const seg = &cm->seg;
int i, j;
seg->update_map = 0;
seg->update_data = 0;
- seg->enabled = vpx_rb_read_bit(rb);
+ seg->enabled = aom_rb_read_bit(rb);
if (!seg->enabled) return;
// Segmentation map update
if (frame_is_intra_only(cm) || cm->error_resilient_mode) {
seg->update_map = 1;
} else {
- seg->update_map = vpx_rb_read_bit(rb);
+ seg->update_map = aom_rb_read_bit(rb);
}
if (seg->update_map) {
if (frame_is_intra_only(cm) || cm->error_resilient_mode) {
seg->temporal_update = 0;
} else {
- seg->temporal_update = vpx_rb_read_bit(rb);
+ seg->temporal_update = aom_rb_read_bit(rb);
}
}
// Segmentation data update
- seg->update_data = vpx_rb_read_bit(rb);
+ seg->update_data = aom_rb_read_bit(rb);
if (seg->update_data) {
- seg->abs_delta = vpx_rb_read_bit(rb);
+ seg->abs_delta = aom_rb_read_bit(rb);
- vp10_clearall_segfeatures(seg);
+ av1_clearall_segfeatures(seg);
for (i = 0; i < MAX_SEGMENTS; i++) {
for (j = 0; j < SEG_LVL_MAX; j++) {
int data = 0;
- const int feature_enabled = vpx_rb_read_bit(rb);
+ const int feature_enabled = aom_rb_read_bit(rb);
if (feature_enabled) {
- vp10_enable_segfeature(seg, i, j);
- data = decode_unsigned_max(rb, vp10_seg_feature_data_max(j));
- if (vp10_is_segfeature_signed(j))
- data = vpx_rb_read_bit(rb) ? -data : data;
+ av1_enable_segfeature(seg, i, j);
+ data = decode_unsigned_max(rb, av1_seg_feature_data_max(j));
+ if (av1_is_segfeature_signed(j))
+ data = aom_rb_read_bit(rb) ? -data : data;
}
- vp10_set_segdata(seg, i, j, data);
+ av1_set_segdata(seg, i, j, data);
}
}
}
}
#if CONFIG_LOOP_RESTORATION
-static void setup_restoration(VP10_COMMON *cm, struct vpx_read_bit_buffer *rb) {
+static void setup_restoration(AV1_COMMON *cm, struct aom_read_bit_buffer *rb) {
int i;
RestorationInfo *rsi = &cm->rst_info;
int ntiles;
- if (vpx_rb_read_bit(rb)) {
- if (vpx_rb_read_bit(rb)) {
+ if (aom_rb_read_bit(rb)) {
+ if (aom_rb_read_bit(rb)) {
rsi->restoration_type = RESTORE_BILATERAL;
- ntiles = vp10_get_restoration_ntiles(BILATERAL_TILESIZE, cm->width,
- cm->height);
- rsi->bilateral_level = (int *)vpx_realloc(
+ ntiles =
+ av1_get_restoration_ntiles(BILATERAL_TILESIZE, cm->width, cm->height);
+ rsi->bilateral_level = (int *)aom_realloc(
rsi->bilateral_level, sizeof(*rsi->bilateral_level) * ntiles);
assert(rsi->bilateral_level != NULL);
for (i = 0; i < ntiles; ++i) {
- if (vpx_rb_read_bit(rb)) {
+ if (aom_rb_read_bit(rb)) {
rsi->bilateral_level[i] =
- vpx_rb_read_literal(rb, vp10_bilateral_level_bits(cm));
+ aom_rb_read_literal(rb, av1_bilateral_level_bits(cm));
} else {
rsi->bilateral_level[i] = -1;
}
@@ -1945,30 +1935,30 @@
} else {
rsi->restoration_type = RESTORE_WIENER;
ntiles =
- vp10_get_restoration_ntiles(WIENER_TILESIZE, cm->width, cm->height);
- rsi->wiener_level = (int *)vpx_realloc(
+ av1_get_restoration_ntiles(WIENER_TILESIZE, cm->width, cm->height);
+ rsi->wiener_level = (int *)aom_realloc(
rsi->wiener_level, sizeof(*rsi->wiener_level) * ntiles);
assert(rsi->wiener_level != NULL);
- rsi->vfilter = (int(*)[RESTORATION_HALFWIN])vpx_realloc(
+ rsi->vfilter = (int(*)[RESTORATION_HALFWIN])aom_realloc(
rsi->vfilter, sizeof(*rsi->vfilter) * ntiles);
assert(rsi->vfilter != NULL);
- rsi->hfilter = (int(*)[RESTORATION_HALFWIN])vpx_realloc(
+ rsi->hfilter = (int(*)[RESTORATION_HALFWIN])aom_realloc(
rsi->hfilter, sizeof(*rsi->hfilter) * ntiles);
assert(rsi->hfilter != NULL);
for (i = 0; i < ntiles; ++i) {
- rsi->wiener_level[i] = vpx_rb_read_bit(rb);
+ rsi->wiener_level[i] = aom_rb_read_bit(rb);
if (rsi->wiener_level[i]) {
- rsi->vfilter[i][0] = vpx_rb_read_literal(rb, WIENER_FILT_TAP0_BITS) +
+ rsi->vfilter[i][0] = aom_rb_read_literal(rb, WIENER_FILT_TAP0_BITS) +
WIENER_FILT_TAP0_MINV;
- rsi->vfilter[i][1] = vpx_rb_read_literal(rb, WIENER_FILT_TAP1_BITS) +
+ rsi->vfilter[i][1] = aom_rb_read_literal(rb, WIENER_FILT_TAP1_BITS) +
WIENER_FILT_TAP1_MINV;
- rsi->vfilter[i][2] = vpx_rb_read_literal(rb, WIENER_FILT_TAP2_BITS) +
+ rsi->vfilter[i][2] = aom_rb_read_literal(rb, WIENER_FILT_TAP2_BITS) +
WIENER_FILT_TAP2_MINV;
- rsi->hfilter[i][0] = vpx_rb_read_literal(rb, WIENER_FILT_TAP0_BITS) +
+ rsi->hfilter[i][0] = aom_rb_read_literal(rb, WIENER_FILT_TAP0_BITS) +
WIENER_FILT_TAP0_MINV;
- rsi->hfilter[i][1] = vpx_rb_read_literal(rb, WIENER_FILT_TAP1_BITS) +
+ rsi->hfilter[i][1] = aom_rb_read_literal(rb, WIENER_FILT_TAP1_BITS) +
WIENER_FILT_TAP1_MINV;
- rsi->hfilter[i][2] = vpx_rb_read_literal(rb, WIENER_FILT_TAP2_BITS) +
+ rsi->hfilter[i][2] = aom_rb_read_literal(rb, WIENER_FILT_TAP2_BITS) +
WIENER_FILT_TAP2_MINV;
} else {
rsi->vfilter[i][0] = rsi->vfilter[i][1] = rsi->vfilter[i][2] = 0;
@@ -1982,60 +1972,60 @@
}
#endif // CONFIG_LOOP_RESTORATION
-static void setup_loopfilter(VP10_COMMON *cm, struct vpx_read_bit_buffer *rb) {
+static void setup_loopfilter(AV1_COMMON *cm, struct aom_read_bit_buffer *rb) {
struct loopfilter *lf = &cm->lf;
- lf->filter_level = vpx_rb_read_literal(rb, 6);
- lf->sharpness_level = vpx_rb_read_literal(rb, 3);
+ lf->filter_level = aom_rb_read_literal(rb, 6);
+ lf->sharpness_level = aom_rb_read_literal(rb, 3);
// Read in loop filter deltas applied at the MB level based on mode or ref
// frame.
lf->mode_ref_delta_update = 0;
- lf->mode_ref_delta_enabled = vpx_rb_read_bit(rb);
+ lf->mode_ref_delta_enabled = aom_rb_read_bit(rb);
if (lf->mode_ref_delta_enabled) {
- lf->mode_ref_delta_update = vpx_rb_read_bit(rb);
+ lf->mode_ref_delta_update = aom_rb_read_bit(rb);
if (lf->mode_ref_delta_update) {
int i;
for (i = 0; i < TOTAL_REFS_PER_FRAME; i++)
- if (vpx_rb_read_bit(rb))
- lf->ref_deltas[i] = vpx_rb_read_inv_signed_literal(rb, 6);
+ if (aom_rb_read_bit(rb))
+ lf->ref_deltas[i] = aom_rb_read_inv_signed_literal(rb, 6);
for (i = 0; i < MAX_MODE_LF_DELTAS; i++)
- if (vpx_rb_read_bit(rb))
- lf->mode_deltas[i] = vpx_rb_read_inv_signed_literal(rb, 6);
+ if (aom_rb_read_bit(rb))
+ lf->mode_deltas[i] = aom_rb_read_inv_signed_literal(rb, 6);
}
}
}
#if CONFIG_CLPF
-static void setup_clpf(VP10_COMMON *cm, struct vpx_read_bit_buffer *rb) {
- cm->clpf = vpx_rb_read_literal(rb, 1);
+static void setup_clpf(AV1_COMMON *cm, struct aom_read_bit_buffer *rb) {
+ cm->clpf = aom_rb_read_literal(rb, 1);
}
#endif
#if CONFIG_DERING
-static void setup_dering(VP10_COMMON *cm, struct vpx_read_bit_buffer *rb) {
- cm->dering_level = vpx_rb_read_literal(rb, DERING_LEVEL_BITS);
+static void setup_dering(AV1_COMMON *cm, struct aom_read_bit_buffer *rb) {
+ cm->dering_level = aom_rb_read_literal(rb, DERING_LEVEL_BITS);
}
#endif // CONFIG_DERING
-static INLINE int read_delta_q(struct vpx_read_bit_buffer *rb) {
- return vpx_rb_read_bit(rb) ? vpx_rb_read_inv_signed_literal(rb, 6) : 0;
+static INLINE int read_delta_q(struct aom_read_bit_buffer *rb) {
+ return aom_rb_read_bit(rb) ? aom_rb_read_inv_signed_literal(rb, 6) : 0;
}
-static void setup_quantization(VP10_COMMON *const cm,
- struct vpx_read_bit_buffer *rb) {
- cm->base_qindex = vpx_rb_read_literal(rb, QINDEX_BITS);
+static void setup_quantization(AV1_COMMON *const cm,
+ struct aom_read_bit_buffer *rb) {
+ cm->base_qindex = aom_rb_read_literal(rb, QINDEX_BITS);
cm->y_dc_delta_q = read_delta_q(rb);
cm->uv_dc_delta_q = read_delta_q(rb);
cm->uv_ac_delta_q = read_delta_q(rb);
cm->dequant_bit_depth = cm->bit_depth;
#if CONFIG_AOM_QM
- cm->using_qmatrix = vpx_rb_read_bit(rb);
+ cm->using_qmatrix = aom_rb_read_bit(rb);
if (cm->using_qmatrix) {
- cm->min_qmlevel = vpx_rb_read_literal(rb, QM_LEVEL_BITS);
- cm->max_qmlevel = vpx_rb_read_literal(rb, QM_LEVEL_BITS);
+ cm->min_qmlevel = aom_rb_read_literal(rb, QM_LEVEL_BITS);
+ cm->max_qmlevel = aom_rb_read_literal(rb, QM_LEVEL_BITS);
} else {
cm->min_qmlevel = 0;
cm->max_qmlevel = 0;
@@ -2043,7 +2033,7 @@
#endif
}
-static void setup_segmentation_dequant(VP10_COMMON *const cm) {
+static void setup_segmentation_dequant(AV1_COMMON *const cm) {
// Build y/uv dequant values based on segmentation.
int i = 0;
#if CONFIG_AOM_QM
@@ -2060,14 +2050,14 @@
#endif // CONFIG_NEW_QUANT
if (cm->seg.enabled) {
for (i = 0; i < MAX_SEGMENTS; ++i) {
- const int qindex = vp10_get_qindex(&cm->seg, i, cm->base_qindex);
+ const int qindex = av1_get_qindex(&cm->seg, i, cm->base_qindex);
cm->y_dequant[i][0] =
- vp10_dc_quant(qindex, cm->y_dc_delta_q, cm->bit_depth);
- cm->y_dequant[i][1] = vp10_ac_quant(qindex, 0, cm->bit_depth);
+ av1_dc_quant(qindex, cm->y_dc_delta_q, cm->bit_depth);
+ cm->y_dequant[i][1] = av1_ac_quant(qindex, 0, cm->bit_depth);
cm->uv_dequant[i][0] =
- vp10_dc_quant(qindex, cm->uv_dc_delta_q, cm->bit_depth);
+ av1_dc_quant(qindex, cm->uv_dc_delta_q, cm->bit_depth);
cm->uv_dequant[i][1] =
- vp10_ac_quant(qindex, cm->uv_ac_delta_q, cm->bit_depth);
+ av1_ac_quant(qindex, cm->uv_ac_delta_q, cm->bit_depth);
#if CONFIG_AOM_QM
lossless = qindex == 0 && cm->y_dc_delta_q == 0 &&
cm->uv_dc_delta_q == 0 && cm->uv_ac_delta_q == 0;
@@ -2086,10 +2076,10 @@
#if CONFIG_NEW_QUANT
for (dq = 0; dq < QUANT_PROFILES; dq++) {
for (b = 0; b < COEF_BANDS; ++b) {
- vp10_get_dequant_val_nuq(cm->y_dequant[i][b != 0], qindex, b,
- cm->y_dequant_nuq[i][dq][b], NULL, dq);
- vp10_get_dequant_val_nuq(cm->uv_dequant[i][b != 0], qindex, b,
- cm->uv_dequant_nuq[i][dq][b], NULL, dq);
+ av1_get_dequant_val_nuq(cm->y_dequant[i][b != 0], qindex, b,
+ cm->y_dequant_nuq[i][dq][b], NULL, dq);
+ av1_get_dequant_val_nuq(cm->uv_dequant[i][b != 0], qindex, b,
+ cm->uv_dequant_nuq[i][dq][b], NULL, dq);
}
}
#endif // CONFIG_NEW_QUANT
@@ -2098,13 +2088,12 @@
const int qindex = cm->base_qindex;
// When segmentation is disabled, only the first value is used. The
// remaining are don't cares.
- cm->y_dequant[0][0] =
- vp10_dc_quant(qindex, cm->y_dc_delta_q, cm->bit_depth);
- cm->y_dequant[0][1] = vp10_ac_quant(qindex, 0, cm->bit_depth);
+ cm->y_dequant[0][0] = av1_dc_quant(qindex, cm->y_dc_delta_q, cm->bit_depth);
+ cm->y_dequant[0][1] = av1_ac_quant(qindex, 0, cm->bit_depth);
cm->uv_dequant[0][0] =
- vp10_dc_quant(qindex, cm->uv_dc_delta_q, cm->bit_depth);
+ av1_dc_quant(qindex, cm->uv_dc_delta_q, cm->bit_depth);
cm->uv_dequant[0][1] =
- vp10_ac_quant(qindex, cm->uv_ac_delta_q, cm->bit_depth);
+ av1_ac_quant(qindex, cm->uv_ac_delta_q, cm->bit_depth);
#if CONFIG_AOM_QM
lossless = qindex == 0 && cm->y_dc_delta_q == 0 && cm->uv_dc_delta_q == 0 &&
cm->uv_ac_delta_q == 0;
@@ -2122,41 +2111,41 @@
#if CONFIG_NEW_QUANT
for (dq = 0; dq < QUANT_PROFILES; dq++) {
for (b = 0; b < COEF_BANDS; ++b) {
- vp10_get_dequant_val_nuq(cm->y_dequant[0][b != 0], qindex, b,
- cm->y_dequant_nuq[0][dq][b], NULL, dq);
- vp10_get_dequant_val_nuq(cm->uv_dequant[0][b != 0], qindex, b,
- cm->uv_dequant_nuq[0][dq][b], NULL, dq);
+ av1_get_dequant_val_nuq(cm->y_dequant[0][b != 0], qindex, b,
+ cm->y_dequant_nuq[0][dq][b], NULL, dq);
+ av1_get_dequant_val_nuq(cm->uv_dequant[0][b != 0], qindex, b,
+ cm->uv_dequant_nuq[0][dq][b], NULL, dq);
}
}
#endif // CONFIG_NEW_QUANT
}
}
-static INTERP_FILTER read_interp_filter(struct vpx_read_bit_buffer *rb) {
- return vpx_rb_read_bit(rb) ? SWITCHABLE
- : vpx_rb_read_literal(rb, 2 + CONFIG_EXT_INTERP);
+static INTERP_FILTER read_interp_filter(struct aom_read_bit_buffer *rb) {
+ return aom_rb_read_bit(rb) ? SWITCHABLE
+ : aom_rb_read_literal(rb, 2 + CONFIG_EXT_INTERP);
}
-static void setup_render_size(VP10_COMMON *cm, struct vpx_read_bit_buffer *rb) {
+static void setup_render_size(AV1_COMMON *cm, struct aom_read_bit_buffer *rb) {
cm->render_width = cm->width;
cm->render_height = cm->height;
- if (vpx_rb_read_bit(rb))
- vp10_read_frame_size(rb, &cm->render_width, &cm->render_height);
+ if (aom_rb_read_bit(rb))
+ av1_read_frame_size(rb, &cm->render_width, &cm->render_height);
}
-static void resize_mv_buffer(VP10_COMMON *cm) {
- vpx_free(cm->cur_frame->mvs);
+static void resize_mv_buffer(AV1_COMMON *cm) {
+ aom_free(cm->cur_frame->mvs);
cm->cur_frame->mi_rows = cm->mi_rows;
cm->cur_frame->mi_cols = cm->mi_cols;
CHECK_MEM_ERROR(cm, cm->cur_frame->mvs,
- (MV_REF *)vpx_calloc(cm->mi_rows * cm->mi_cols,
+ (MV_REF *)aom_calloc(cm->mi_rows * cm->mi_cols,
sizeof(*cm->cur_frame->mvs)));
}
-static void resize_context_buffers(VP10_COMMON *cm, int width, int height) {
+static void resize_context_buffers(AV1_COMMON *cm, int width, int height) {
#if CONFIG_SIZE_LIMIT
if (width > DECODE_WIDTH_LIMIT || height > DECODE_HEIGHT_LIMIT)
- vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+ aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
"Dimensions of %dx%d beyond allowed size of %dx%d.",
width, height, DECODE_WIDTH_LIMIT, DECODE_HEIGHT_LIMIT);
#endif
@@ -2166,16 +2155,16 @@
const int new_mi_cols =
ALIGN_POWER_OF_TWO(width, MI_SIZE_LOG2) >> MI_SIZE_LOG2;
- // Allocations in vp10_alloc_context_buffers() depend on individual
+ // Allocations in av1_alloc_context_buffers() depend on individual
// dimensions as well as the overall size.
if (new_mi_cols > cm->mi_cols || new_mi_rows > cm->mi_rows) {
- if (vp10_alloc_context_buffers(cm, width, height))
- vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+ if (av1_alloc_context_buffers(cm, width, height))
+ aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR,
"Failed to allocate context buffers");
} else {
- vp10_set_mb_mi(cm, width, height);
+ av1_set_mb_mi(cm, width, height);
}
- vp10_init_context_buffers(cm);
+ av1_init_context_buffers(cm);
cm->width = width;
cm->height = height;
}
@@ -2185,25 +2174,25 @@
}
}
-static void setup_frame_size(VP10_COMMON *cm, struct vpx_read_bit_buffer *rb) {
+static void setup_frame_size(AV1_COMMON *cm, struct aom_read_bit_buffer *rb) {
int width, height;
BufferPool *const pool = cm->buffer_pool;
- vp10_read_frame_size(rb, &width, &height);
+ av1_read_frame_size(rb, &width, &height);
resize_context_buffers(cm, width, height);
setup_render_size(cm, rb);
lock_buffer_pool(pool);
- if (vpx_realloc_frame_buffer(
+ if (aom_realloc_frame_buffer(
get_frame_new_buffer(cm), cm->width, cm->height, cm->subsampling_x,
cm->subsampling_y,
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
cm->use_highbitdepth,
#endif
- VPX_DEC_BORDER_IN_PIXELS, cm->byte_alignment,
+ AOM_DEC_BORDER_IN_PIXELS, cm->byte_alignment,
&pool->frame_bufs[cm->new_fb_idx].raw_frame_buffer, pool->get_fb_cb,
pool->cb_priv)) {
unlock_buffer_pool(pool);
- vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+ aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR,
"Failed to allocate frame buffer");
}
unlock_buffer_pool(pool);
@@ -2217,22 +2206,22 @@
pool->frame_bufs[cm->new_fb_idx].buf.render_height = cm->render_height;
}
-static INLINE int valid_ref_frame_img_fmt(vpx_bit_depth_t ref_bit_depth,
+static INLINE int valid_ref_frame_img_fmt(aom_bit_depth_t ref_bit_depth,
int ref_xss, int ref_yss,
- vpx_bit_depth_t this_bit_depth,
+ aom_bit_depth_t this_bit_depth,
int this_xss, int this_yss) {
return ref_bit_depth == this_bit_depth && ref_xss == this_xss &&
ref_yss == this_yss;
}
-static void setup_frame_size_with_refs(VP10_COMMON *cm,
- struct vpx_read_bit_buffer *rb) {
+static void setup_frame_size_with_refs(AV1_COMMON *cm,
+ struct aom_read_bit_buffer *rb) {
int width, height;
int found = 0, i;
int has_valid_ref_frame = 0;
BufferPool *const pool = cm->buffer_pool;
for (i = 0; i < INTER_REFS_PER_FRAME; ++i) {
- if (vpx_rb_read_bit(rb)) {
+ if (aom_rb_read_bit(rb)) {
YV12_BUFFER_CONFIG *const buf = cm->frame_refs[i].buf;
width = buf->y_crop_width;
height = buf->y_crop_height;
@@ -2244,12 +2233,12 @@
}
if (!found) {
- vp10_read_frame_size(rb, &width, &height);
+ av1_read_frame_size(rb, &width, &height);
setup_render_size(cm, rb);
}
if (width <= 0 || height <= 0)
- vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+ aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
"Invalid frame size");
// Check to make sure at least one of frames that this frame references
@@ -2261,7 +2250,7 @@
ref_frame->buf->y_crop_height, width, height);
}
if (!has_valid_ref_frame)
- vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+ aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
"Referenced frame has invalid size");
for (i = 0; i < INTER_REFS_PER_FRAME; ++i) {
RefBuffer *const ref_frame = &cm->frame_refs[i];
@@ -2269,24 +2258,24 @@
ref_frame->buf->subsampling_x,
ref_frame->buf->subsampling_y, cm->bit_depth,
cm->subsampling_x, cm->subsampling_y))
- vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+ aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
"Referenced frame has incompatible color format");
}
resize_context_buffers(cm, width, height);
lock_buffer_pool(pool);
- if (vpx_realloc_frame_buffer(
+ if (aom_realloc_frame_buffer(
get_frame_new_buffer(cm), cm->width, cm->height, cm->subsampling_x,
cm->subsampling_y,
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
cm->use_highbitdepth,
#endif
- VPX_DEC_BORDER_IN_PIXELS, cm->byte_alignment,
+ AOM_DEC_BORDER_IN_PIXELS, cm->byte_alignment,
&pool->frame_bufs[cm->new_fb_idx].raw_frame_buffer, pool->get_fb_cb,
pool->cb_priv)) {
unlock_buffer_pool(pool);
- vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+ aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR,
"Failed to allocate frame buffer");
}
unlock_buffer_pool(pool);
@@ -2300,27 +2289,27 @@
pool->frame_bufs[cm->new_fb_idx].buf.render_height = cm->render_height;
}
-static void read_tile_info(VP10Decoder *const pbi,
- struct vpx_read_bit_buffer *const rb) {
- VP10_COMMON *const cm = &pbi->common;
+static void read_tile_info(AV1Decoder *const pbi,
+ struct aom_read_bit_buffer *const rb) {
+ AV1_COMMON *const cm = &pbi->common;
#if CONFIG_EXT_TILE
// Read the tile width/height
#if CONFIG_EXT_PARTITION
if (cm->sb_size == BLOCK_128X128) {
- cm->tile_width = vpx_rb_read_literal(rb, 5) + 1;
- cm->tile_height = vpx_rb_read_literal(rb, 5) + 1;
+ cm->tile_width = aom_rb_read_literal(rb, 5) + 1;
+ cm->tile_height = aom_rb_read_literal(rb, 5) + 1;
} else
#endif // CONFIG_EXT_PARTITION
{
- cm->tile_width = vpx_rb_read_literal(rb, 6) + 1;
- cm->tile_height = vpx_rb_read_literal(rb, 6) + 1;
+ cm->tile_width = aom_rb_read_literal(rb, 6) + 1;
+ cm->tile_height = aom_rb_read_literal(rb, 6) + 1;
}
cm->tile_width <<= cm->mib_size_log2;
cm->tile_height <<= cm->mib_size_log2;
- cm->tile_width = VPXMIN(cm->tile_width, cm->mi_cols);
- cm->tile_height = VPXMIN(cm->tile_height, cm->mi_rows);
+ cm->tile_width = AOMMIN(cm->tile_width, cm->mi_cols);
+ cm->tile_height = AOMMIN(cm->tile_height, cm->mi_rows);
// Get the number of tiles
cm->tile_cols = 1;
@@ -2331,25 +2320,25 @@
if (cm->tile_cols * cm->tile_rows > 1) {
// Read the number of bytes used to store tile size
- pbi->tile_col_size_bytes = vpx_rb_read_literal(rb, 2) + 1;
- pbi->tile_size_bytes = vpx_rb_read_literal(rb, 2) + 1;
+ pbi->tile_col_size_bytes = aom_rb_read_literal(rb, 2) + 1;
+ pbi->tile_size_bytes = aom_rb_read_literal(rb, 2) + 1;
}
#else
int min_log2_tile_cols, max_log2_tile_cols, max_ones;
- vp10_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
+ av1_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
// columns
max_ones = max_log2_tile_cols - min_log2_tile_cols;
cm->log2_tile_cols = min_log2_tile_cols;
- while (max_ones-- && vpx_rb_read_bit(rb)) cm->log2_tile_cols++;
+ while (max_ones-- && aom_rb_read_bit(rb)) cm->log2_tile_cols++;
if (cm->log2_tile_cols > 6)
- vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+ aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
"Invalid number of tile columns");
// rows
- cm->log2_tile_rows = vpx_rb_read_bit(rb);
- if (cm->log2_tile_rows) cm->log2_tile_rows += vpx_rb_read_bit(rb);
+ cm->log2_tile_rows = aom_rb_read_bit(rb);
+ if (cm->log2_tile_rows) cm->log2_tile_rows += aom_rb_read_bit(rb);
cm->tile_cols = 1 << cm->log2_tile_cols;
cm->tile_rows = 1 << cm->log2_tile_rows;
@@ -2365,7 +2354,7 @@
// tile size magnitude
if (cm->tile_rows > 1 || cm->tile_cols > 1) {
- pbi->tile_size_bytes = vpx_rb_read_literal(rb, 2) + 1;
+ pbi->tile_size_bytes = aom_rb_read_literal(rb, 2) + 1;
}
#endif // CONFIG_EXT_TILE
}
@@ -2384,8 +2373,8 @@
// Reads the next tile returning its size and adjusting '*data' accordingly
// based on 'is_last'.
static void get_tile_buffer(const uint8_t *const data_end,
- struct vpx_internal_error_info *error_info,
- const uint8_t **data, vpx_decrypt_cb decrypt_cb,
+ struct aom_internal_error_info *error_info,
+ const uint8_t **data, aom_decrypt_cb decrypt_cb,
void *decrypt_state,
TileBufferDec (*const tile_buffers)[MAX_TILE_COLS],
int tile_size_bytes, int col, int row) {
@@ -2395,7 +2384,7 @@
const uint8_t *copy_data = NULL;
if (!read_is_valid(*data, tile_size_bytes, data_end))
- vpx_internal_error(error_info, VPX_CODEC_CORRUPT_FRAME,
+ aom_internal_error(error_info, AOM_CODEC_CORRUPT_FRAME,
"Truncated packet or corrupt tile length");
if (decrypt_cb) {
uint8_t be_data[4];
@@ -2421,7 +2410,7 @@
*data += tile_size_bytes;
if (size > (size_t)(data_end - *data))
- vpx_internal_error(error_info, VPX_CODEC_CORRUPT_FRAME,
+ aom_internal_error(error_info, AOM_CODEC_CORRUPT_FRAME,
"Truncated packet or corrupt tile size");
if (size > 0) {
@@ -2438,9 +2427,9 @@
}
static void get_tile_buffers(
- VP10Decoder *pbi, const uint8_t *data, const uint8_t *data_end,
+ AV1Decoder *pbi, const uint8_t *data, const uint8_t *data_end,
TileBufferDec (*const tile_buffers)[MAX_TILE_COLS]) {
- VP10_COMMON *const cm = &pbi->common;
+ AV1_COMMON *const cm = &pbi->common;
const int tile_cols = cm->tile_cols;
const int tile_rows = cm->tile_rows;
const int have_tiles = tile_cols * tile_rows > 1;
@@ -2459,11 +2448,11 @@
const uint8_t *tile_col_data_end[MAX_TILE_COLS];
const uint8_t *const data_start = data;
- const int dec_tile_row = VPXMIN(pbi->dec_tile_row, tile_rows);
+ const int dec_tile_row = AOMMIN(pbi->dec_tile_row, tile_rows);
const int single_row = pbi->dec_tile_row >= 0;
const int tile_rows_start = single_row ? dec_tile_row : 0;
const int tile_rows_end = single_row ? tile_rows_start + 1 : tile_rows;
- const int dec_tile_col = VPXMIN(pbi->dec_tile_col, tile_cols);
+ const int dec_tile_col = AOMMIN(pbi->dec_tile_col, tile_cols);
const int single_col = pbi->dec_tile_col >= 0;
const int tile_cols_start = single_col ? dec_tile_col : 0;
const int tile_cols_end = single_col ? tile_cols_start + 1 : tile_cols;
@@ -2529,14 +2518,14 @@
// based on 'is_last'.
static void get_tile_buffer(const uint8_t *const data_end,
const int tile_size_bytes, int is_last,
- struct vpx_internal_error_info *error_info,
- const uint8_t **data, vpx_decrypt_cb decrypt_cb,
+ struct aom_internal_error_info *error_info,
+ const uint8_t **data, aom_decrypt_cb decrypt_cb,
void *decrypt_state, TileBufferDec *const buf) {
size_t size;
if (!is_last) {
if (!read_is_valid(*data, 4, data_end))
- vpx_internal_error(error_info, VPX_CODEC_CORRUPT_FRAME,
+ aom_internal_error(error_info, AOM_CODEC_CORRUPT_FRAME,
"Truncated packet or corrupt tile length");
if (decrypt_cb) {
@@ -2549,7 +2538,7 @@
*data += tile_size_bytes;
if (size > (size_t)(data_end - *data))
- vpx_internal_error(error_info, VPX_CODEC_CORRUPT_FRAME,
+ aom_internal_error(error_info, AOM_CODEC_CORRUPT_FRAME,
"Truncated packet or corrupt tile size");
} else {
size = data_end - *data;
@@ -2562,9 +2551,9 @@
}
static void get_tile_buffers(
- VP10Decoder *pbi, const uint8_t *data, const uint8_t *data_end,
+ AV1Decoder *pbi, const uint8_t *data, const uint8_t *data_end,
TileBufferDec (*const tile_buffers)[MAX_TILE_COLS]) {
- VP10_COMMON *const cm = &pbi->common;
+ AV1_COMMON *const cm = &pbi->common;
int r, c;
const int tile_cols = cm->tile_cols;
const int tile_rows = cm->tile_rows;
@@ -2581,20 +2570,20 @@
}
#endif // CONFIG_EXT_TILE
-static const uint8_t *decode_tiles(VP10Decoder *pbi, const uint8_t *data,
+static const uint8_t *decode_tiles(AV1Decoder *pbi, const uint8_t *data,
const uint8_t *data_end) {
- VP10_COMMON *const cm = &pbi->common;
- const VPxWorkerInterface *const winterface = vpx_get_worker_interface();
+ AV1_COMMON *const cm = &pbi->common;
+ const AVxWorkerInterface *const winterface = aom_get_worker_interface();
const int tile_cols = cm->tile_cols;
const int tile_rows = cm->tile_rows;
const int n_tiles = tile_cols * tile_rows;
TileBufferDec (*const tile_buffers)[MAX_TILE_COLS] = pbi->tile_buffers;
#if CONFIG_EXT_TILE
- const int dec_tile_row = VPXMIN(pbi->dec_tile_row, tile_rows);
+ const int dec_tile_row = AOMMIN(pbi->dec_tile_row, tile_rows);
const int single_row = pbi->dec_tile_row >= 0;
const int tile_rows_start = single_row ? dec_tile_row : 0;
const int tile_rows_end = single_row ? dec_tile_row + 1 : tile_rows;
- const int dec_tile_col = VPXMIN(pbi->dec_tile_col, tile_cols);
+ const int dec_tile_col = AOMMIN(pbi->dec_tile_col, tile_cols);
const int single_col = pbi->dec_tile_col >= 0;
const int tile_cols_start = single_col ? dec_tile_col : 0;
const int tile_cols_end = single_col ? tile_cols_start + 1 : tile_cols;
@@ -2617,10 +2606,10 @@
if (cm->lf.filter_level && !cm->skip_loop_filter &&
pbi->lf_worker.data1 == NULL) {
CHECK_MEM_ERROR(cm, pbi->lf_worker.data1,
- vpx_memalign(32, sizeof(LFWorkerData)));
- pbi->lf_worker.hook = (VPxWorkerHook)vp10_loop_filter_worker;
+ aom_memalign(32, sizeof(LFWorkerData)));
+ pbi->lf_worker.hook = (AVxWorkerHook)av1_loop_filter_worker;
if (pbi->max_threads > 1 && !winterface->reset(&pbi->lf_worker)) {
- vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
+ aom_internal_error(&cm->error, AOM_CODEC_ERROR,
"Loop filter thread creation failed");
}
}
@@ -2629,8 +2618,8 @@
LFWorkerData *const lf_data = (LFWorkerData *)pbi->lf_worker.data1;
// Be sure to sync as we might be resuming after a failed frame decode.
winterface->sync(&pbi->lf_worker);
- vp10_loop_filter_data_reset(lf_data, get_frame_new_buffer(cm), cm,
- pbi->mb.plane);
+ av1_loop_filter_data_reset(lf_data, get_frame_new_buffer(cm), cm,
+ pbi->mb.plane);
}
assert(tile_rows <= MAX_TILE_ROWS);
@@ -2639,9 +2628,9 @@
get_tile_buffers(pbi, data, data_end, tile_buffers);
if (pbi->tile_data == NULL || n_tiles != pbi->allocated_tiles) {
- vpx_free(pbi->tile_data);
+ aom_free(pbi->tile_data);
CHECK_MEM_ERROR(cm, pbi->tile_data,
- vpx_memalign(32, n_tiles * (sizeof(*pbi->tile_data))));
+ aom_memalign(32, n_tiles * (sizeof(*pbi->tile_data))));
pbi->allocated_tiles = n_tiles;
}
@@ -2658,8 +2647,8 @@
cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD
? &cm->counts
: NULL;
- vp10_zero(td->dqcoeff);
- vp10_tile_init(&td->xd.tile, td->cm, tile_row, tile_col);
+ av1_zero(td->dqcoeff);
+ av1_tile_init(&td->xd.tile, td->cm, tile_row, tile_col);
#if !CONFIG_ANS
setup_bool_decoder(buf->data, data_end, buf->size, &cm->error,
&td->bit_reader, pbi->decrypt_cb, pbi->decrypt_state);
@@ -2667,7 +2656,7 @@
setup_token_decoder(buf->data, data_end, buf->size, &cm->error,
&td->bit_reader, pbi->decrypt_cb, pbi->decrypt_state);
#endif
- vp10_init_macroblockd(cm, &td->xd, td->dqcoeff);
+ av1_init_macroblockd(cm, &td->xd, td->dqcoeff);
td->xd.plane[0].color_index_map = td->color_index_map[0];
td->xd.plane[1].color_index_map = td->color_index_map[1];
}
@@ -2678,21 +2667,21 @@
int mi_row = 0;
TileInfo tile_info;
- vp10_tile_set_row(&tile_info, cm, row);
+ av1_tile_set_row(&tile_info, cm, row);
for (tile_col = tile_cols_start; tile_col < tile_cols_end; ++tile_col) {
const int col = inv_col_order ? tile_cols - 1 - tile_col : tile_col;
TileData *const td = pbi->tile_data + tile_cols * row + col;
- vp10_tile_set_col(&tile_info, cm, col);
+ av1_tile_set_col(&tile_info, cm, col);
- vp10_zero_above_context(cm, tile_info.mi_col_start, tile_info.mi_col_end);
+ av1_zero_above_context(cm, tile_info.mi_col_start, tile_info.mi_col_end);
for (mi_row = tile_info.mi_row_start; mi_row < tile_info.mi_row_end;
mi_row += cm->mib_size) {
int mi_col;
- vp10_zero_left_context(&td->xd);
+ av1_zero_left_context(&td->xd);
for (mi_col = tile_info.mi_col_start; mi_col < tile_info.mi_col_end;
mi_col += cm->mib_size) {
@@ -2705,18 +2694,18 @@
}
pbi->mb.corrupted |= td->xd.corrupted;
if (pbi->mb.corrupted)
- vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+ aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
"Failed to decode tile data");
#if CONFIG_ENTROPY
if (cm->do_subframe_update &&
cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD) {
if ((mi_row + MI_SIZE) %
(MI_SIZE *
- VPXMAX(cm->mi_rows / MI_SIZE / COEF_PROBS_BUFS, 1)) ==
+ AOMMAX(cm->mi_rows / MI_SIZE / COEF_PROBS_BUFS, 1)) ==
0 &&
mi_row + MI_SIZE < cm->mi_rows &&
cm->coef_probs_update_idx < COEF_PROBS_BUFS - 1) {
- vp10_partial_adapt_probs(cm, mi_row, mi_col);
+ av1_partial_adapt_probs(cm, mi_row, mi_col);
++cm->coef_probs_update_idx;
}
}
@@ -2730,7 +2719,7 @@
// Loopfilter one tile row.
if (cm->lf.filter_level && !cm->skip_loop_filter) {
LFWorkerData *const lf_data = (LFWorkerData *)pbi->lf_worker.data1;
- const int lf_start = VPXMAX(0, tile_info.mi_row_start - cm->mib_size);
+ const int lf_start = AOMMAX(0, tile_info.mi_row_start - cm->mib_size);
const int lf_end = tile_info.mi_row_end - cm->mib_size;
// Delay the loopfilter if the first tile row is only
@@ -2753,14 +2742,14 @@
// After loopfiltering, the last 7 row pixels in each superblock row may
// still be changed by the longest loopfilter of the next superblock row.
if (cm->frame_parallel_decode)
- vp10_frameworker_broadcast(pbi->cur_buf, mi_row << cm->mib_size_log2);
+ av1_frameworker_broadcast(pbi->cur_buf, mi_row << cm->mib_size_log2);
#endif // !CONFIG_VAR_TX
}
#if CONFIG_VAR_TX
// Loopfilter the whole frame.
- vp10_loop_filter_frame(get_frame_new_buffer(cm), cm, &pbi->mb,
- cm->lf.filter_level, 0, 0);
+ av1_loop_filter_frame(get_frame_new_buffer(cm), cm, &pbi->mb,
+ cm->lf.filter_level, 0, 0);
#else
// Loopfilter remaining rows in the frame.
if (cm->lf.filter_level && !cm->skip_loop_filter) {
@@ -2773,16 +2762,16 @@
#endif // CONFIG_VAR_TX
#if CONFIG_CLPF
if (cm->clpf && !cm->skip_loop_filter)
- vp10_clpf_frame(&pbi->cur_buf->buf, cm, &pbi->mb);
+ av1_clpf_frame(&pbi->cur_buf->buf, cm, &pbi->mb);
#endif
#if CONFIG_DERING
if (cm->dering_level && !cm->skip_loop_filter) {
- vp10_dering_frame(&pbi->cur_buf->buf, cm, &pbi->mb, cm->dering_level);
+ av1_dering_frame(&pbi->cur_buf->buf, cm, &pbi->mb, cm->dering_level);
}
#endif // CONFIG_DERING
if (cm->frame_parallel_decode)
- vp10_frameworker_broadcast(pbi->cur_buf, INT_MAX);
+ av1_frameworker_broadcast(pbi->cur_buf, INT_MAX);
#if CONFIG_EXT_TILE
if (n_tiles == 1) {
@@ -2790,7 +2779,7 @@
return data_end;
#else
// Find the end of the single tile buffer
- return vpx_reader_find_end(&pbi->tile_data->bit_reader);
+ return aom_reader_find_end(&pbi->tile_data->bit_reader);
#endif // CONFIG_ANS
} else {
// Return the end of the last tile buffer
@@ -2803,7 +2792,7 @@
{
// Get last tile data.
TileData *const td = pbi->tile_data + tile_cols * tile_rows - 1;
- return vpx_reader_find_end(&td->bit_reader);
+ return aom_reader_find_end(&td->bit_reader);
}
#endif // CONFIG_ANS
#endif // CONFIG_EXT_TILE
@@ -2811,8 +2800,8 @@
static int tile_worker_hook(TileWorkerData *const tile_data,
const TileInfo *const tile) {
- VP10Decoder *const pbi = tile_data->pbi;
- const VP10_COMMON *const cm = &pbi->common;
+ AV1Decoder *const pbi = tile_data->pbi;
+ const AV1_COMMON *const cm = &pbi->common;
int mi_row, mi_col;
if (setjmp(tile_data->error_info.jmp)) {
@@ -2824,11 +2813,11 @@
tile_data->error_info.setjmp = 1;
tile_data->xd.error_info = &tile_data->error_info;
- vp10_zero_above_context(&pbi->common, tile->mi_col_start, tile->mi_col_end);
+ av1_zero_above_context(&pbi->common, tile->mi_col_start, tile->mi_col_end);
for (mi_row = tile->mi_row_start; mi_row < tile->mi_row_end;
mi_row += cm->mib_size) {
- vp10_zero_left_context(&tile_data->xd);
+ av1_zero_left_context(&tile_data->xd);
for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end;
mi_col += cm->mib_size) {
@@ -2850,20 +2839,20 @@
return (int)(buf2->size - buf1->size);
}
-static const uint8_t *decode_tiles_mt(VP10Decoder *pbi, const uint8_t *data,
+static const uint8_t *decode_tiles_mt(AV1Decoder *pbi, const uint8_t *data,
const uint8_t *data_end) {
- VP10_COMMON *const cm = &pbi->common;
- const VPxWorkerInterface *const winterface = vpx_get_worker_interface();
+ AV1_COMMON *const cm = &pbi->common;
+ const AVxWorkerInterface *const winterface = aom_get_worker_interface();
const int tile_cols = cm->tile_cols;
const int tile_rows = cm->tile_rows;
- const int num_workers = VPXMIN(pbi->max_threads & ~1, tile_cols);
+ const int num_workers = AOMMIN(pbi->max_threads & ~1, tile_cols);
TileBufferDec (*const tile_buffers)[MAX_TILE_COLS] = pbi->tile_buffers;
#if CONFIG_EXT_TILE
- const int dec_tile_row = VPXMIN(pbi->dec_tile_row, tile_rows);
+ const int dec_tile_row = AOMMIN(pbi->dec_tile_row, tile_rows);
const int single_row = pbi->dec_tile_row >= 0;
const int tile_rows_start = single_row ? dec_tile_row : 0;
const int tile_rows_end = single_row ? dec_tile_row + 1 : tile_rows;
- const int dec_tile_col = VPXMIN(pbi->dec_tile_col, tile_cols);
+ const int dec_tile_col = AOMMIN(pbi->dec_tile_col, tile_cols);
const int single_col = pbi->dec_tile_col >= 0;
const int tile_cols_start = single_col ? dec_tile_col : 0;
const int tile_cols_end = single_col ? tile_cols_start + 1 : tile_cols;
@@ -2895,22 +2884,22 @@
if (pbi->num_tile_workers == 0) {
const int num_threads = pbi->max_threads & ~1;
CHECK_MEM_ERROR(cm, pbi->tile_workers,
- vpx_malloc(num_threads * sizeof(*pbi->tile_workers)));
+ aom_malloc(num_threads * sizeof(*pbi->tile_workers)));
// Ensure tile data offsets will be properly aligned. This may fail on
// platforms without DECLARE_ALIGNED().
assert((sizeof(*pbi->tile_worker_data) % 16) == 0);
CHECK_MEM_ERROR(
cm, pbi->tile_worker_data,
- vpx_memalign(32, num_threads * sizeof(*pbi->tile_worker_data)));
+ aom_memalign(32, num_threads * sizeof(*pbi->tile_worker_data)));
CHECK_MEM_ERROR(cm, pbi->tile_worker_info,
- vpx_malloc(num_threads * sizeof(*pbi->tile_worker_info)));
+ aom_malloc(num_threads * sizeof(*pbi->tile_worker_info)));
for (i = 0; i < num_threads; ++i) {
- VPxWorker *const worker = &pbi->tile_workers[i];
+ AVxWorker *const worker = &pbi->tile_workers[i];
++pbi->num_tile_workers;
winterface->init(worker);
if (i < num_threads - 1 && !winterface->reset(worker)) {
- vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
+ aom_internal_error(&cm->error, AOM_CODEC_ERROR,
"Tile decoder thread creation failed");
}
}
@@ -2918,9 +2907,9 @@
// Reset tile decoding hook
for (i = 0; i < num_workers; ++i) {
- VPxWorker *const worker = &pbi->tile_workers[i];
+ AVxWorker *const worker = &pbi->tile_workers[i];
winterface->sync(worker);
- worker->hook = (VPxWorkerHook)tile_worker_hook;
+ worker->hook = (AVxWorkerHook)tile_worker_hook;
worker->data1 = &pbi->tile_worker_data[i];
worker->data2 = &pbi->tile_worker_info[i];
}
@@ -2929,7 +2918,7 @@
if (cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD) {
for (i = 0; i < num_workers; ++i) {
TileWorkerData *const twd = (TileWorkerData *)pbi->tile_workers[i].data1;
- vp10_zero(twd->counts);
+ av1_zero(twd->counts);
}
}
@@ -2950,7 +2939,7 @@
int group_start;
for (group_start = tile_cols_start; group_start < tile_cols_end;
group_start += num_workers) {
- const int group_end = VPXMIN(group_start + num_workers, tile_cols);
+ const int group_end = AOMMIN(group_start + num_workers, tile_cols);
const TileBufferDec largest = tile_buffers[tile_row][group_start];
memmove(&tile_buffers[tile_row][group_start],
&tile_buffers[tile_row][group_start + 1],
@@ -2964,7 +2953,7 @@
for (i = 0; i < num_workers && tile_col < tile_cols_end;
++i, ++tile_col) {
TileBufferDec *const buf = &tile_buffers[tile_row][tile_col];
- VPxWorker *const worker = &pbi->tile_workers[i];
+ AVxWorker *const worker = &pbi->tile_workers[i];
TileWorkerData *const twd = (TileWorkerData *)worker->data1;
TileInfo *const tile_info = (TileInfo *)worker->data2;
@@ -2975,9 +2964,9 @@
cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD
? &twd->counts
: NULL;
- vp10_zero(twd->dqcoeff);
- vp10_tile_init(tile_info, cm, tile_row, buf->col);
- vp10_tile_init(&twd->xd.tile, cm, tile_row, buf->col);
+ av1_zero(twd->dqcoeff);
+ av1_tile_init(tile_info, cm, tile_row, buf->col);
+ av1_tile_init(&twd->xd.tile, cm, tile_row, buf->col);
#if !CONFIG_ANS
setup_bool_decoder(buf->data, data_end, buf->size, &cm->error,
&twd->bit_reader, pbi->decrypt_cb,
@@ -2987,7 +2976,7 @@
&twd->bit_reader, pbi->decrypt_cb,
pbi->decrypt_state);
#endif // CONFIG_ANS
- vp10_init_macroblockd(cm, &twd->xd, twd->dqcoeff);
+ av1_init_macroblockd(cm, &twd->xd, twd->dqcoeff);
twd->xd.plane[0].color_index_map = twd->color_index_map[0];
twd->xd.plane[1].color_index_map = twd->color_index_map[1];
@@ -3007,9 +2996,9 @@
// Sync all workers
for (; i > 0; --i) {
- VPxWorker *const worker = &pbi->tile_workers[i - 1];
+ AVxWorker *const worker = &pbi->tile_workers[i - 1];
// TODO(jzern): The tile may have specific error data associated with
- // its vpx_internal_error_info which could be propagated to the main
+ // its aom_internal_error_info which could be propagated to the main
// info in cm. Additionally once the threads have been synced and an
// error is detected, there's no point in continuing to decode tiles.
pbi->mb.corrupted |= !winterface->sync(worker);
@@ -3021,7 +3010,7 @@
if (cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD) {
for (i = 0; i < num_workers; ++i) {
TileWorkerData *const twd = (TileWorkerData *)pbi->tile_workers[i].data1;
- vp10_accumulate_frame_counts(cm, &twd->counts);
+ av1_accumulate_frame_counts(cm, &twd->counts);
}
}
@@ -3036,42 +3025,42 @@
{
TileWorkerData *const twd =
(TileWorkerData *)pbi->tile_workers[final_worker].data1;
- return vpx_reader_find_end(&twd->bit_reader);
+ return aom_reader_find_end(&twd->bit_reader);
}
#endif // CONFIG_ANS
#endif // CONFIG_EXT_TILE
}
static void error_handler(void *data) {
- VP10_COMMON *const cm = (VP10_COMMON *)data;
- vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, "Truncated packet");
+ AV1_COMMON *const cm = (AV1_COMMON *)data;
+ aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, "Truncated packet");
}
-static void read_bitdepth_colorspace_sampling(VP10_COMMON *cm,
- struct vpx_read_bit_buffer *rb) {
+static void read_bitdepth_colorspace_sampling(AV1_COMMON *cm,
+ struct aom_read_bit_buffer *rb) {
if (cm->profile >= PROFILE_2) {
- cm->bit_depth = vpx_rb_read_bit(rb) ? VPX_BITS_12 : VPX_BITS_10;
-#if CONFIG_VP9_HIGHBITDEPTH
+ cm->bit_depth = aom_rb_read_bit(rb) ? AOM_BITS_12 : AOM_BITS_10;
+#if CONFIG_AOM_HIGHBITDEPTH
cm->use_highbitdepth = 1;
#endif
} else {
- cm->bit_depth = VPX_BITS_8;
-#if CONFIG_VP9_HIGHBITDEPTH
+ cm->bit_depth = AOM_BITS_8;
+#if CONFIG_AOM_HIGHBITDEPTH
cm->use_highbitdepth = 0;
#endif
}
- cm->color_space = vpx_rb_read_literal(rb, 3);
- if (cm->color_space != VPX_CS_SRGB) {
+ cm->color_space = aom_rb_read_literal(rb, 3);
+ if (cm->color_space != AOM_CS_SRGB) {
// [16,235] (including xvycc) vs [0,255] range
- cm->color_range = vpx_rb_read_bit(rb);
+ cm->color_range = aom_rb_read_bit(rb);
if (cm->profile == PROFILE_1 || cm->profile == PROFILE_3) {
- cm->subsampling_x = vpx_rb_read_bit(rb);
- cm->subsampling_y = vpx_rb_read_bit(rb);
+ cm->subsampling_x = aom_rb_read_bit(rb);
+ cm->subsampling_y = aom_rb_read_bit(rb);
if (cm->subsampling_x == 1 && cm->subsampling_y == 1)
- vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
+ aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM,
"4:2:0 color not supported in profile 1 or 3");
- if (vpx_rb_read_bit(rb))
- vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
+ if (aom_rb_read_bit(rb))
+ aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM,
"Reserved bit set");
} else {
cm->subsampling_y = cm->subsampling_x = 1;
@@ -3081,19 +3070,19 @@
// Note if colorspace is SRGB then 4:4:4 chroma sampling is assumed.
// 4:2:2 or 4:4:0 chroma sampling is not allowed.
cm->subsampling_y = cm->subsampling_x = 0;
- if (vpx_rb_read_bit(rb))
- vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
+ if (aom_rb_read_bit(rb))
+ aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM,
"Reserved bit set");
} else {
- vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
+ aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM,
"4:4:4 color not supported in profile 0 or 2");
}
}
}
-static size_t read_uncompressed_header(VP10Decoder *pbi,
- struct vpx_read_bit_buffer *rb) {
- VP10_COMMON *const cm = &pbi->common;
+static size_t read_uncompressed_header(AV1Decoder *pbi,
+ struct aom_read_bit_buffer *rb) {
+ AV1_COMMON *const cm = &pbi->common;
MACROBLOCKD *const xd = &pbi->mb;
BufferPool *const pool = cm->buffer_pool;
RefCntBuffer *const frame_bufs = pool->frame_bufs;
@@ -3111,31 +3100,31 @@
cm->is_reference_frame = 1;
#endif // CONFIG_EXT_REFS
- if (vpx_rb_read_literal(rb, 2) != VPX_FRAME_MARKER)
- vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
+ if (aom_rb_read_literal(rb, 2) != AOM_FRAME_MARKER)
+ aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM,
"Invalid frame marker");
- cm->profile = vp10_read_profile(rb);
-#if CONFIG_VP9_HIGHBITDEPTH
+ cm->profile = av1_read_profile(rb);
+#if CONFIG_AOM_HIGHBITDEPTH
if (cm->profile >= MAX_PROFILES)
- vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
+ aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM,
"Unsupported bitstream profile");
#else
if (cm->profile >= PROFILE_2)
- vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
+ aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM,
"Unsupported bitstream profile");
#endif
- cm->show_existing_frame = vpx_rb_read_bit(rb);
+ cm->show_existing_frame = aom_rb_read_bit(rb);
if (cm->show_existing_frame) {
// Show an existing frame directly.
- const int frame_to_show = cm->ref_frame_map[vpx_rb_read_literal(rb, 3)];
+ const int frame_to_show = cm->ref_frame_map[aom_rb_read_literal(rb, 3)];
lock_buffer_pool(pool);
if (frame_to_show < 0 || frame_bufs[frame_to_show].ref_count < 1) {
unlock_buffer_pool(pool);
- vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
+ aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM,
"Buffer %d does not contain a decoded frame",
frame_to_show);
}
@@ -3154,13 +3143,13 @@
return 0;
}
- cm->frame_type = (FRAME_TYPE)vpx_rb_read_bit(rb);
- cm->show_frame = vpx_rb_read_bit(rb);
- cm->error_resilient_mode = vpx_rb_read_bit(rb);
+ cm->frame_type = (FRAME_TYPE)aom_rb_read_bit(rb);
+ cm->show_frame = aom_rb_read_bit(rb);
+ cm->error_resilient_mode = aom_rb_read_bit(rb);
if (cm->frame_type == KEY_FRAME) {
- if (!vp10_read_sync_code(rb))
- vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
+ if (!av1_read_sync_code(rb))
+ aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM,
"Invalid frame sync code");
read_bitdepth_colorspace_sampling(cm, rb);
@@ -3177,43 +3166,43 @@
pbi->need_resync = 0;
}
if (frame_is_intra_only(cm))
- cm->allow_screen_content_tools = vpx_rb_read_bit(rb);
+ cm->allow_screen_content_tools = aom_rb_read_bit(rb);
} else {
- cm->intra_only = cm->show_frame ? 0 : vpx_rb_read_bit(rb);
+ cm->intra_only = cm->show_frame ? 0 : aom_rb_read_bit(rb);
if (cm->error_resilient_mode) {
cm->reset_frame_context = RESET_FRAME_CONTEXT_ALL;
} else {
if (cm->intra_only) {
- cm->reset_frame_context = vpx_rb_read_bit(rb)
+ cm->reset_frame_context = aom_rb_read_bit(rb)
? RESET_FRAME_CONTEXT_ALL
: RESET_FRAME_CONTEXT_CURRENT;
} else {
- cm->reset_frame_context = vpx_rb_read_bit(rb)
+ cm->reset_frame_context = aom_rb_read_bit(rb)
? RESET_FRAME_CONTEXT_CURRENT
: RESET_FRAME_CONTEXT_NONE;
if (cm->reset_frame_context == RESET_FRAME_CONTEXT_CURRENT)
- cm->reset_frame_context = vpx_rb_read_bit(rb)
+ cm->reset_frame_context = aom_rb_read_bit(rb)
? RESET_FRAME_CONTEXT_ALL
: RESET_FRAME_CONTEXT_CURRENT;
}
}
if (cm->intra_only) {
- if (!vp10_read_sync_code(rb))
- vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
+ if (!av1_read_sync_code(rb))
+ aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM,
"Invalid frame sync code");
read_bitdepth_colorspace_sampling(cm, rb);
- pbi->refresh_frame_flags = vpx_rb_read_literal(rb, REF_FRAMES);
+ pbi->refresh_frame_flags = aom_rb_read_literal(rb, REF_FRAMES);
setup_frame_size(cm, rb);
if (pbi->need_resync) {
memset(&cm->ref_frame_map, -1, sizeof(cm->ref_frame_map));
pbi->need_resync = 0;
}
} else if (pbi->need_resync != 1) { /* Skip if need resync */
- pbi->refresh_frame_flags = vpx_rb_read_literal(rb, REF_FRAMES);
+ pbi->refresh_frame_flags = aom_rb_read_literal(rb, REF_FRAMES);
#if CONFIG_EXT_REFS
if (!pbi->refresh_frame_flags) {
@@ -3224,35 +3213,35 @@
#endif // CONFIG_EXT_REFS
for (i = 0; i < INTER_REFS_PER_FRAME; ++i) {
- const int ref = vpx_rb_read_literal(rb, REF_FRAMES_LOG2);
+ const int ref = aom_rb_read_literal(rb, REF_FRAMES_LOG2);
const int idx = cm->ref_frame_map[ref];
RefBuffer *const ref_frame = &cm->frame_refs[i];
ref_frame->idx = idx;
ref_frame->buf = &frame_bufs[idx].buf;
- cm->ref_frame_sign_bias[LAST_FRAME + i] = vpx_rb_read_bit(rb);
+ cm->ref_frame_sign_bias[LAST_FRAME + i] = aom_rb_read_bit(rb);
}
setup_frame_size_with_refs(cm, rb);
- cm->allow_high_precision_mv = vpx_rb_read_bit(rb);
+ cm->allow_high_precision_mv = aom_rb_read_bit(rb);
cm->interp_filter = read_interp_filter(rb);
for (i = 0; i < INTER_REFS_PER_FRAME; ++i) {
RefBuffer *const ref_buf = &cm->frame_refs[i];
-#if CONFIG_VP9_HIGHBITDEPTH
- vp10_setup_scale_factors_for_frame(
+#if CONFIG_AOM_HIGHBITDEPTH
+ av1_setup_scale_factors_for_frame(
&ref_buf->sf, ref_buf->buf->y_crop_width,
ref_buf->buf->y_crop_height, cm->width, cm->height,
cm->use_highbitdepth);
#else
- vp10_setup_scale_factors_for_frame(
+ av1_setup_scale_factors_for_frame(
&ref_buf->sf, ref_buf->buf->y_crop_width,
ref_buf->buf->y_crop_height, cm->width, cm->height);
#endif
}
}
}
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
get_frame_new_buffer(cm)->bit_depth = cm->bit_depth;
#endif
get_frame_new_buffer(cm)->color_space = cm->color_space;
@@ -3261,22 +3250,22 @@
get_frame_new_buffer(cm)->render_height = cm->render_height;
if (pbi->need_resync) {
- vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+ aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
"Keyframe / intra-only frame required to reset decoder"
" state");
}
if (!cm->error_resilient_mode) {
- cm->refresh_frame_context = vpx_rb_read_bit(rb)
+ cm->refresh_frame_context = aom_rb_read_bit(rb)
? REFRESH_FRAME_CONTEXT_FORWARD
: REFRESH_FRAME_CONTEXT_BACKWARD;
} else {
cm->refresh_frame_context = REFRESH_FRAME_CONTEXT_FORWARD;
}
- // This flag will be overridden by the call to vp10_setup_past_independence
+ // This flag will be overridden by the call to av1_setup_past_independence
// below, forcing the use of context 0 for those frame types.
- cm->frame_context_idx = vpx_rb_read_literal(rb, FRAME_CONTEXTS_LOG2);
+ cm->frame_context_idx = aom_rb_read_literal(rb, FRAME_CONTEXTS_LOG2);
// Generate next_ref_frame_map.
lock_buffer_pool(pool);
@@ -3304,10 +3293,10 @@
pbi->hold_ref_buf = 1;
if (frame_is_intra_only(cm) || cm->error_resilient_mode)
- vp10_setup_past_independence(cm);
+ av1_setup_past_independence(cm);
#if CONFIG_EXT_PARTITION
- set_sb_size(cm, vpx_rb_read_bit(rb) ? BLOCK_128X128 : BLOCK_64X64);
+ set_sb_size(cm, aom_rb_read_bit(rb) ? BLOCK_128X128 : BLOCK_64X64);
#else
set_sb_size(cm, BLOCK_64X64);
#endif // CONFIG_EXT_PARTITION
@@ -3323,12 +3312,12 @@
setup_restoration(cm, rb);
#endif // CONFIG_LOOP_RESTORATION
setup_quantization(cm, rb);
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
xd->bd = (int)cm->bit_depth;
#endif
#if CONFIG_ENTROPY
- vp10_default_coef_probs(cm);
+ av1_default_coef_probs(cm);
if (cm->frame_type == KEY_FRAME || cm->error_resilient_mode ||
cm->reset_frame_context == RESET_FRAME_CONTEXT_ALL) {
for (i = 0; i < FRAME_CONTEXTS; ++i) cm->frame_contexts[i] = *cm->fc;
@@ -3343,7 +3332,7 @@
int i;
for (i = 0; i < MAX_SEGMENTS; ++i) {
const int qindex = cm->seg.enabled
- ? vp10_get_qindex(&cm->seg, i, cm->base_qindex)
+ ? av1_get_qindex(&cm->seg, i, cm->base_qindex)
: cm->base_qindex;
xd->lossless[i] = qindex == 0 && cm->y_dc_delta_q == 0 &&
cm->uv_dc_delta_q == 0 && cm->uv_ac_delta_q == 0;
@@ -3356,36 +3345,36 @@
cm->reference_mode = read_frame_reference_mode(cm, rb);
read_tile_info(pbi, rb);
- sz = vpx_rb_read_literal(rb, 16);
+ sz = aom_rb_read_literal(rb, 16);
if (sz == 0)
- vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+ aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
"Invalid header size");
return sz;
}
#if CONFIG_EXT_TX
-static void read_ext_tx_probs(FRAME_CONTEXT *fc, vp10_reader *r) {
+static void read_ext_tx_probs(FRAME_CONTEXT *fc, aom_reader *r) {
int i, j, k;
int s;
for (s = 1; s < EXT_TX_SETS_INTER; ++s) {
- if (vp10_read(r, GROUP_DIFF_UPDATE_PROB)) {
+ if (aom_read(r, GROUP_DIFF_UPDATE_PROB)) {
for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
if (!use_inter_ext_tx_for_txsize[s][i]) continue;
for (j = 0; j < num_ext_tx_set_inter[s] - 1; ++j)
- vp10_diff_update_prob(r, &fc->inter_ext_tx_prob[s][i][j]);
+ av1_diff_update_prob(r, &fc->inter_ext_tx_prob[s][i][j]);
}
}
}
for (s = 1; s < EXT_TX_SETS_INTRA; ++s) {
- if (vp10_read(r, GROUP_DIFF_UPDATE_PROB)) {
+ if (aom_read(r, GROUP_DIFF_UPDATE_PROB)) {
for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
if (!use_intra_ext_tx_for_txsize[s][i]) continue;
for (j = 0; j < INTRA_MODES; ++j)
for (k = 0; k < num_ext_tx_set_intra[s] - 1; ++k)
- vp10_diff_update_prob(r, &fc->intra_ext_tx_prob[s][i][j][k]);
+ av1_diff_update_prob(r, &fc->intra_ext_tx_prob[s][i][j][k]);
}
}
}
@@ -3393,31 +3382,31 @@
#else
-static void read_ext_tx_probs(FRAME_CONTEXT *fc, vp10_reader *r) {
+static void read_ext_tx_probs(FRAME_CONTEXT *fc, aom_reader *r) {
int i, j, k;
- if (vp10_read(r, GROUP_DIFF_UPDATE_PROB)) {
+ if (aom_read(r, GROUP_DIFF_UPDATE_PROB)) {
for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
for (j = 0; j < TX_TYPES; ++j)
for (k = 0; k < TX_TYPES - 1; ++k)
- vp10_diff_update_prob(r, &fc->intra_ext_tx_prob[i][j][k]);
+ av1_diff_update_prob(r, &fc->intra_ext_tx_prob[i][j][k]);
}
}
- if (vp10_read(r, GROUP_DIFF_UPDATE_PROB)) {
+ if (aom_read(r, GROUP_DIFF_UPDATE_PROB)) {
for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
for (k = 0; k < TX_TYPES - 1; ++k)
- vp10_diff_update_prob(r, &fc->inter_ext_tx_prob[i][k]);
+ av1_diff_update_prob(r, &fc->inter_ext_tx_prob[i][k]);
}
}
}
#endif // CONFIG_EXT_TX
#if CONFIG_SUPERTX
-static void read_supertx_probs(FRAME_CONTEXT *fc, vp10_reader *r) {
+static void read_supertx_probs(FRAME_CONTEXT *fc, aom_reader *r) {
int i, j;
- if (vp10_read(r, GROUP_DIFF_UPDATE_PROB)) {
+ if (aom_read(r, GROUP_DIFF_UPDATE_PROB)) {
for (i = 0; i < PARTITION_SUPERTX_CONTEXTS; ++i) {
for (j = 1; j < TX_SIZES; ++j) {
- vp10_diff_update_prob(r, &fc->supertx_prob[i][j]);
+ av1_diff_update_prob(r, &fc->supertx_prob[i][j]);
}
}
}
@@ -3426,44 +3415,44 @@
#if CONFIG_GLOBAL_MOTION
static void read_global_motion_params(Global_Motion_Params *params,
- vpx_prob *probs, vp10_reader *r) {
+ aom_prob *probs, aom_reader *r) {
GLOBAL_MOTION_TYPE gmtype =
- vp10_read_tree(r, vp10_global_motion_types_tree, probs);
+ aom_read_tree(r, av1_global_motion_types_tree, probs);
params->gmtype = gmtype;
params->motion_params.wmtype = gm_to_trans_type(gmtype);
switch (gmtype) {
case GLOBAL_ZERO: break;
case GLOBAL_AFFINE:
params->motion_params.wmmat[4] =
- (vp10_read_primitive_symmetric(r, GM_ABS_ALPHA_BITS) *
+ (aom_read_primitive_symmetric(r, GM_ABS_ALPHA_BITS) *
GM_ALPHA_DECODE_FACTOR);
params->motion_params.wmmat[5] =
- vp10_read_primitive_symmetric(r, GM_ABS_ALPHA_BITS) *
+ aom_read_primitive_symmetric(r, GM_ABS_ALPHA_BITS) *
GM_ALPHA_DECODE_FACTOR +
(1 << WARPEDMODEL_PREC_BITS);
// fallthrough intended
case GLOBAL_ROTZOOM:
params->motion_params.wmmat[2] =
- (vp10_read_primitive_symmetric(r, GM_ABS_ALPHA_BITS) *
+ (aom_read_primitive_symmetric(r, GM_ABS_ALPHA_BITS) *
GM_ALPHA_DECODE_FACTOR) +
(1 << WARPEDMODEL_PREC_BITS);
params->motion_params.wmmat[3] =
- vp10_read_primitive_symmetric(r, GM_ABS_ALPHA_BITS) *
+ aom_read_primitive_symmetric(r, GM_ABS_ALPHA_BITS) *
GM_ALPHA_DECODE_FACTOR;
// fallthrough intended
case GLOBAL_TRANSLATION:
params->motion_params.wmmat[0] =
- vp10_read_primitive_symmetric(r, GM_ABS_TRANS_BITS) *
+ aom_read_primitive_symmetric(r, GM_ABS_TRANS_BITS) *
GM_TRANS_DECODE_FACTOR;
params->motion_params.wmmat[1] =
- vp10_read_primitive_symmetric(r, GM_ABS_TRANS_BITS) *
+ aom_read_primitive_symmetric(r, GM_ABS_TRANS_BITS) *
GM_TRANS_DECODE_FACTOR;
break;
default: assert(0);
}
}
-static void read_global_motion(VP10_COMMON *cm, vp10_reader *r) {
+static void read_global_motion(AV1_COMMON *cm, aom_reader *r) {
int frame;
memset(cm->global_motion, 0, sizeof(cm->global_motion));
for (frame = LAST_FRAME; frame <= ALTREF_FRAME; ++frame) {
@@ -3473,24 +3462,24 @@
}
#endif // CONFIG_GLOBAL_MOTION
-static int read_compressed_header(VP10Decoder *pbi, const uint8_t *data,
+static int read_compressed_header(AV1Decoder *pbi, const uint8_t *data,
size_t partition_size) {
- VP10_COMMON *const cm = &pbi->common;
+ AV1_COMMON *const cm = &pbi->common;
#if CONFIG_SUPERTX
MACROBLOCKD *const xd = &pbi->mb;
#endif
FRAME_CONTEXT *const fc = cm->fc;
- vp10_reader r;
+ aom_reader r;
int k, i, j;
#if !CONFIG_ANS
- if (vpx_reader_init(&r, data, partition_size, pbi->decrypt_cb,
+ if (aom_reader_init(&r, data, partition_size, pbi->decrypt_cb,
pbi->decrypt_state))
- vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+ aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR,
"Failed to allocate bool decoder 0");
#else
if (ans_read_init(&r, data, partition_size))
- vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+ aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR,
"Failed to allocate compressed header ANS decoder");
#endif // !CONFIG_ANS
@@ -3498,56 +3487,56 @@
for (i = 0; i < TX_SIZES - 1; ++i)
for (j = 0; j < TX_SIZE_CONTEXTS; ++j)
for (k = 0; k < i + 1; ++k)
- vp10_diff_update_prob(&r, &fc->tx_size_probs[i][j][k]);
+ av1_diff_update_prob(&r, &fc->tx_size_probs[i][j][k]);
}
read_coef_probs(fc, cm->tx_mode, &r);
#if CONFIG_VAR_TX
for (k = 0; k < TXFM_PARTITION_CONTEXTS; ++k)
- vp10_diff_update_prob(&r, &fc->txfm_partition_prob[k]);
+ av1_diff_update_prob(&r, &fc->txfm_partition_prob[k]);
#endif
for (k = 0; k < SKIP_CONTEXTS; ++k)
- vp10_diff_update_prob(&r, &fc->skip_probs[k]);
+ av1_diff_update_prob(&r, &fc->skip_probs[k]);
if (cm->seg.enabled && cm->seg.update_map) {
if (cm->seg.temporal_update) {
for (k = 0; k < PREDICTION_PROBS; k++)
- vp10_diff_update_prob(&r, &cm->fc->seg.pred_probs[k]);
+ av1_diff_update_prob(&r, &cm->fc->seg.pred_probs[k]);
}
for (k = 0; k < MAX_SEGMENTS - 1; k++)
- vp10_diff_update_prob(&r, &cm->fc->seg.tree_probs[k]);
+ av1_diff_update_prob(&r, &cm->fc->seg.tree_probs[k]);
}
for (j = 0; j < INTRA_MODES; j++)
for (i = 0; i < INTRA_MODES - 1; ++i)
- vp10_diff_update_prob(&r, &fc->uv_mode_prob[j][i]);
+ av1_diff_update_prob(&r, &fc->uv_mode_prob[j][i]);
#if CONFIG_EXT_PARTITION_TYPES
for (i = 0; i < PARTITION_TYPES - 1; ++i)
- vp10_diff_update_prob(&r, &fc->partition_prob[0][i]);
+ av1_diff_update_prob(&r, &fc->partition_prob[0][i]);
for (j = 1; j < PARTITION_CONTEXTS; ++j)
for (i = 0; i < EXT_PARTITION_TYPES - 1; ++i)
- vp10_diff_update_prob(&r, &fc->partition_prob[j][i]);
+ av1_diff_update_prob(&r, &fc->partition_prob[j][i]);
#else
for (j = 0; j < PARTITION_CONTEXTS; ++j)
for (i = 0; i < PARTITION_TYPES - 1; ++i)
- vp10_diff_update_prob(&r, &fc->partition_prob[j][i]);
+ av1_diff_update_prob(&r, &fc->partition_prob[j][i]);
#endif // CONFIG_EXT_PARTITION_TYPES
#if CONFIG_EXT_INTRA
for (i = 0; i < INTRA_FILTERS + 1; ++i)
for (j = 0; j < INTRA_FILTERS - 1; ++j)
- vp10_diff_update_prob(&r, &fc->intra_filter_probs[i][j]);
+ av1_diff_update_prob(&r, &fc->intra_filter_probs[i][j]);
#endif // CONFIG_EXT_INTRA
if (frame_is_intra_only(cm)) {
- vp10_copy(cm->kf_y_prob, vp10_kf_y_mode_prob);
+ av1_copy(cm->kf_y_prob, av1_kf_y_mode_prob);
for (k = 0; k < INTRA_MODES; k++)
for (j = 0; j < INTRA_MODES; j++)
for (i = 0; i < INTRA_MODES - 1; ++i)
- vp10_diff_update_prob(&r, &cm->kf_y_prob[k][j][i]);
+ av1_diff_update_prob(&r, &cm->kf_y_prob[k][j][i]);
} else {
#if !CONFIG_REF_MV
nmv_context *const nmvc = &fc->nmvc;
@@ -3560,23 +3549,23 @@
if (cm->reference_mode != COMPOUND_REFERENCE) {
for (i = 0; i < BLOCK_SIZE_GROUPS; i++) {
if (is_interintra_allowed_bsize_group(i)) {
- vp10_diff_update_prob(&r, &fc->interintra_prob[i]);
+ av1_diff_update_prob(&r, &fc->interintra_prob[i]);
}
}
for (i = 0; i < BLOCK_SIZE_GROUPS; i++) {
for (j = 0; j < INTERINTRA_MODES - 1; j++)
- vp10_diff_update_prob(&r, &fc->interintra_mode_prob[i][j]);
+ av1_diff_update_prob(&r, &fc->interintra_mode_prob[i][j]);
}
for (i = 0; i < BLOCK_SIZES; i++) {
if (is_interintra_allowed_bsize(i) && is_interintra_wedge_used(i)) {
- vp10_diff_update_prob(&r, &fc->wedge_interintra_prob[i]);
+ av1_diff_update_prob(&r, &fc->wedge_interintra_prob[i]);
}
}
}
if (cm->reference_mode != SINGLE_REFERENCE) {
for (i = 0; i < BLOCK_SIZES; i++) {
if (is_interinter_wedge_used(i)) {
- vp10_diff_update_prob(&r, &fc->wedge_interinter_prob[i]);
+ av1_diff_update_prob(&r, &fc->wedge_interinter_prob[i]);
}
}
}
@@ -3585,14 +3574,14 @@
#if CONFIG_OBMC || CONFIG_WARPED_MOTION
for (i = BLOCK_8X8; i < BLOCK_SIZES; ++i) {
for (j = 0; j < MOTION_VARIATIONS - 1; ++j)
- vp10_diff_update_prob(&r, &fc->motvar_prob[i][j]);
+ av1_diff_update_prob(&r, &fc->motvar_prob[i][j]);
}
#endif // CONFIG_OBMC || CONFIG_WARPED_MOTION
if (cm->interp_filter == SWITCHABLE) read_switchable_interp_probs(fc, &r);
for (i = 0; i < INTRA_INTER_CONTEXTS; i++)
- vp10_diff_update_prob(&r, &fc->intra_inter_prob[i]);
+ av1_diff_update_prob(&r, &fc->intra_inter_prob[i]);
if (cm->reference_mode != SINGLE_REFERENCE)
setup_compound_reference_mode(cm);
@@ -3601,7 +3590,7 @@
for (j = 0; j < BLOCK_SIZE_GROUPS; j++)
for (i = 0; i < INTRA_MODES - 1; ++i)
- vp10_diff_update_prob(&r, &fc->y_mode_prob[j][i]);
+ av1_diff_update_prob(&r, &fc->y_mode_prob[j][i]);
#if CONFIG_REF_MV
for (i = 0; i < NMV_CONTEXTS; ++i)
@@ -3618,7 +3607,7 @@
#endif // CONFIG_GLOBAL_MOTION
}
- return vp10_reader_has_error(&r);
+ return aom_reader_has_error(&r);
}
#ifdef NDEBUG
@@ -3626,9 +3615,9 @@
#else // !NDEBUG
// Counts should only be incremented when frame_parallel_decoding_mode and
// error_resilient_mode are disabled.
-static void debug_check_frame_counts(const VP10_COMMON *const cm) {
+static void debug_check_frame_counts(const AV1_COMMON *const cm) {
FRAME_COUNTS zero_counts;
- vp10_zero(zero_counts);
+ av1_zero(zero_counts);
assert(cm->refresh_frame_context != REFRESH_FRAME_CONTEXT_BACKWARD ||
cm->error_resilient_mode);
assert(!memcmp(cm->counts.y_mode, zero_counts.y_mode,
@@ -3689,14 +3678,14 @@
}
#endif // NDEBUG
-static struct vpx_read_bit_buffer *init_read_bit_buffer(
- VP10Decoder *pbi, struct vpx_read_bit_buffer *rb, const uint8_t *data,
- const uint8_t *data_end, uint8_t clear_data[MAX_VPX_HEADER_SIZE]) {
+static struct aom_read_bit_buffer *init_read_bit_buffer(
+ AV1Decoder *pbi, struct aom_read_bit_buffer *rb, const uint8_t *data,
+ const uint8_t *data_end, uint8_t clear_data[MAX_AV1_HEADER_SIZE]) {
rb->bit_offset = 0;
rb->error_handler = error_handler;
rb->error_handler_data = &pbi->common;
if (pbi->decrypt_cb) {
- const int n = (int)VPXMIN(MAX_VPX_HEADER_SIZE, data_end - data);
+ const int n = (int)AOMMIN(MAX_AV1_HEADER_SIZE, data_end - data);
pbi->decrypt_cb(pbi->decrypt_state, data, clear_data, n);
rb->bit_buffer = clear_data;
rb->bit_buffer_end = clear_data + n;
@@ -3709,32 +3698,32 @@
//------------------------------------------------------------------------------
-int vp10_read_sync_code(struct vpx_read_bit_buffer *const rb) {
- return vpx_rb_read_literal(rb, 8) == VP10_SYNC_CODE_0 &&
- vpx_rb_read_literal(rb, 8) == VP10_SYNC_CODE_1 &&
- vpx_rb_read_literal(rb, 8) == VP10_SYNC_CODE_2;
+int av1_read_sync_code(struct aom_read_bit_buffer *const rb) {
+ return aom_rb_read_literal(rb, 8) == AV1_SYNC_CODE_0 &&
+ aom_rb_read_literal(rb, 8) == AV1_SYNC_CODE_1 &&
+ aom_rb_read_literal(rb, 8) == AV1_SYNC_CODE_2;
}
-void vp10_read_frame_size(struct vpx_read_bit_buffer *rb, int *width,
- int *height) {
- *width = vpx_rb_read_literal(rb, 16) + 1;
- *height = vpx_rb_read_literal(rb, 16) + 1;
+void av1_read_frame_size(struct aom_read_bit_buffer *rb, int *width,
+ int *height) {
+ *width = aom_rb_read_literal(rb, 16) + 1;
+ *height = aom_rb_read_literal(rb, 16) + 1;
}
-BITSTREAM_PROFILE vp10_read_profile(struct vpx_read_bit_buffer *rb) {
- int profile = vpx_rb_read_bit(rb);
- profile |= vpx_rb_read_bit(rb) << 1;
- if (profile > 2) profile += vpx_rb_read_bit(rb);
+BITSTREAM_PROFILE av1_read_profile(struct aom_read_bit_buffer *rb) {
+ int profile = aom_rb_read_bit(rb);
+ profile |= aom_rb_read_bit(rb) << 1;
+ if (profile > 2) profile += aom_rb_read_bit(rb);
return (BITSTREAM_PROFILE)profile;
}
-void vp10_decode_frame(VP10Decoder *pbi, const uint8_t *data,
- const uint8_t *data_end, const uint8_t **p_data_end) {
- VP10_COMMON *const cm = &pbi->common;
+void av1_decode_frame(AV1Decoder *pbi, const uint8_t *data,
+ const uint8_t *data_end, const uint8_t **p_data_end) {
+ AV1_COMMON *const cm = &pbi->common;
MACROBLOCKD *const xd = &pbi->mb;
- struct vpx_read_bit_buffer rb;
+ struct aom_read_bit_buffer rb;
int context_updated = 0;
- uint8_t clear_data[MAX_VPX_HEADER_SIZE];
+ uint8_t clear_data[MAX_AV1_HEADER_SIZE];
const size_t first_partition_size = read_uncompressed_header(
pbi, init_read_bit_buffer(pbi, &rb, data, data_end, clear_data));
YV12_BUFFER_CONFIG *const new_fb = get_frame_new_buffer(cm);
@@ -3747,7 +3736,7 @@
// showing a frame directly
#if CONFIG_EXT_REFS
if (cm->show_existing_frame)
- *p_data_end = data + vpx_rb_bytes_read(&rb);
+ *p_data_end = data + aom_rb_bytes_read(&rb);
else
#endif // CONFIG_EXT_REFS
*p_data_end = data + (cm->profile <= PROFILE_2 ? 1 : 2);
@@ -3755,9 +3744,9 @@
return;
}
- data += vpx_rb_bytes_read(&rb);
+ data += aom_rb_bytes_read(&rb);
if (!read_is_valid(data, first_partition_size, data_end))
- vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+ aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
"Truncated packet or corrupt header length");
cm->use_prev_frame_mvs =
@@ -3782,46 +3771,46 @@
}
#endif // CONFIG_EXT_REFS
- vp10_setup_block_planes(xd, cm->subsampling_x, cm->subsampling_y);
+ av1_setup_block_planes(xd, cm->subsampling_x, cm->subsampling_y);
*cm->fc = cm->frame_contexts[cm->frame_context_idx];
if (!cm->fc->initialized)
- vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+ aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
"Uninitialized entropy context.");
- vp10_zero(cm->counts);
+ av1_zero(cm->counts);
xd->corrupted = 0;
new_fb->corrupted = read_compressed_header(pbi, data, first_partition_size);
if (new_fb->corrupted)
- vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+ aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
"Decode failed. Frame data header is corrupted.");
if (cm->lf.filter_level && !cm->skip_loop_filter) {
- vp10_loop_filter_frame_init(cm, cm->lf.filter_level);
+ av1_loop_filter_frame_init(cm, cm->lf.filter_level);
}
// If encoded in frame parallel mode, frame context is ready after decoding
// the frame header.
if (cm->frame_parallel_decode &&
cm->refresh_frame_context != REFRESH_FRAME_CONTEXT_BACKWARD) {
- VPxWorker *const worker = pbi->frame_worker_owner;
+ AVxWorker *const worker = pbi->frame_worker_owner;
FrameWorkerData *const frame_worker_data = worker->data1;
if (cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_FORWARD) {
context_updated = 1;
cm->frame_contexts[cm->frame_context_idx] = *cm->fc;
}
- vp10_frameworker_lock_stats(worker);
+ av1_frameworker_lock_stats(worker);
pbi->cur_buf->row = -1;
pbi->cur_buf->col = -1;
frame_worker_data->frame_context_ready = 1;
// Signal the main thread that context is ready.
- vp10_frameworker_signal_stats(worker);
- vp10_frameworker_unlock_stats(worker);
+ av1_frameworker_signal_stats(worker);
+ av1_frameworker_unlock_stats(worker);
}
#if CONFIG_ENTROPY
- vp10_copy(cm->starting_coef_probs, cm->fc->coef_probs);
+ av1_copy(cm->starting_coef_probs, cm->fc->coef_probs);
cm->coef_probs_update_idx = 0;
#endif // CONFIG_ENTROPY
@@ -3836,12 +3825,12 @@
if (!cm->skip_loop_filter) {
// If multiple threads are used to decode tiles, then we use those
// threads to do parallel loopfiltering.
- vp10_loop_filter_frame_mt(new_fb, cm, pbi->mb.plane,
- cm->lf.filter_level, 0, 0, pbi->tile_workers,
- pbi->num_tile_workers, &pbi->lf_row_sync);
+ av1_loop_filter_frame_mt(new_fb, cm, pbi->mb.plane, cm->lf.filter_level,
+ 0, 0, pbi->tile_workers, pbi->num_tile_workers,
+ &pbi->lf_row_sync);
}
} else {
- vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+ aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
"Decode failed. Frame data is corrupted.");
}
} else {
@@ -3849,10 +3838,10 @@
}
#if CONFIG_LOOP_RESTORATION
if (cm->rst_info.restoration_type != RESTORE_NONE) {
- vp10_loop_restoration_init(&cm->rst_internal, &cm->rst_info,
- cm->frame_type == KEY_FRAME, cm->width,
- cm->height);
- vp10_loop_restoration_rows(new_fb, cm, 0, cm->mi_rows, 0);
+ av1_loop_restoration_init(&cm->rst_internal, &cm->rst_info,
+ cm->frame_type == KEY_FRAME, cm->width,
+ cm->height);
+ av1_loop_restoration_rows(new_fb, cm, 0, cm->mi_rows, 0);
}
#endif // CONFIG_LOOP_RESTORATION
@@ -3861,18 +3850,18 @@
#if CONFIG_ENTROPY
cm->partial_prob_update = 0;
#endif // CONFIG_ENTROPY
- vp10_adapt_coef_probs(cm);
- vp10_adapt_intra_frame_probs(cm);
+ av1_adapt_coef_probs(cm);
+ av1_adapt_intra_frame_probs(cm);
if (!frame_is_intra_only(cm)) {
- vp10_adapt_inter_frame_probs(cm);
- vp10_adapt_mv_probs(cm, cm->allow_high_precision_mv);
+ av1_adapt_inter_frame_probs(cm);
+ av1_adapt_mv_probs(cm, cm->allow_high_precision_mv);
}
} else {
debug_check_frame_counts(cm);
}
} else {
- vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+ aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
"Decode failed. Frame data is corrupted.");
}
diff --git a/av1/decoder/decodeframe.h b/av1/decoder/decodeframe.h
index 7fdff0b..020c424 100644
--- a/av1/decoder/decodeframe.h
+++ b/av1/decoder/decodeframe.h
@@ -8,26 +8,26 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_DECODER_DECODEFRAME_H_
-#define VP10_DECODER_DECODEFRAME_H_
+#ifndef AV1_DECODER_DECODEFRAME_H_
+#define AV1_DECODER_DECODEFRAME_H_
#ifdef __cplusplus
extern "C" {
#endif
-struct VP10Decoder;
-struct vpx_read_bit_buffer;
+struct AV1Decoder;
+struct aom_read_bit_buffer;
-int vp10_read_sync_code(struct vpx_read_bit_buffer *const rb);
-void vp10_read_frame_size(struct vpx_read_bit_buffer *rb, int *width,
- int *height);
-BITSTREAM_PROFILE vp10_read_profile(struct vpx_read_bit_buffer *rb);
+int av1_read_sync_code(struct aom_read_bit_buffer *const rb);
+void av1_read_frame_size(struct aom_read_bit_buffer *rb, int *width,
+ int *height);
+BITSTREAM_PROFILE av1_read_profile(struct aom_read_bit_buffer *rb);
-void vp10_decode_frame(struct VP10Decoder *pbi, const uint8_t *data,
- const uint8_t *data_end, const uint8_t **p_data_end);
+void av1_decode_frame(struct AV1Decoder *pbi, const uint8_t *data,
+ const uint8_t *data_end, const uint8_t **p_data_end);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_DECODER_DECODEFRAME_H_
+#endif // AV1_DECODER_DECODEFRAME_H_
diff --git a/av1/decoder/decodemv.c b/av1/decoder/decodemv.c
index ef776a0..47cfea6 100644
--- a/av1/decoder/decodemv.c
+++ b/av1/decoder/decodemv.c
@@ -22,27 +22,27 @@
#include "av1/decoder/decodemv.h"
#include "av1/decoder/decodeframe.h"
-#include "aom_dsp/vpx_dsp_common.h"
+#include "aom_dsp/aom_dsp_common.h"
-static INLINE int read_uniform(vp10_reader *r, int n) {
+static INLINE int read_uniform(aom_reader *r, int n) {
int l = get_unsigned_bits(n);
int m = (1 << l) - n;
- int v = vp10_read_literal(r, l - 1);
+ int v = aom_read_literal(r, l - 1);
assert(l != 0);
if (v < m)
return v;
else
- return (v << 1) - m + vp10_read_literal(r, 1);
+ return (v << 1) - m + aom_read_literal(r, 1);
}
-static PREDICTION_MODE read_intra_mode(vp10_reader *r, const vpx_prob *p) {
- return (PREDICTION_MODE)vp10_read_tree(r, vp10_intra_mode_tree, p);
+static PREDICTION_MODE read_intra_mode(aom_reader *r, const aom_prob *p) {
+ return (PREDICTION_MODE)aom_read_tree(r, av1_intra_mode_tree, p);
}
-static PREDICTION_MODE read_intra_mode_y(VP10_COMMON *cm, MACROBLOCKD *xd,
- vp10_reader *r, int size_group) {
+static PREDICTION_MODE read_intra_mode_y(AV1_COMMON *cm, MACROBLOCKD *xd,
+ aom_reader *r, int size_group) {
const PREDICTION_MODE y_mode =
read_intra_mode(r, cm->fc->y_mode_prob[size_group]);
FRAME_COUNTS *counts = xd->counts;
@@ -50,8 +50,8 @@
return y_mode;
}
-static PREDICTION_MODE read_intra_mode_uv(VP10_COMMON *cm, MACROBLOCKD *xd,
- vp10_reader *r,
+static PREDICTION_MODE read_intra_mode_uv(AV1_COMMON *cm, MACROBLOCKD *xd,
+ aom_reader *r,
PREDICTION_MODE y_mode) {
const PREDICTION_MODE uv_mode =
read_intra_mode(r, cm->fc->uv_mode_prob[y_mode]);
@@ -61,27 +61,27 @@
}
#if CONFIG_EXT_INTER
-static INTERINTRA_MODE read_interintra_mode(VP10_COMMON *cm, MACROBLOCKD *xd,
- vp10_reader *r, int size_group) {
- const INTERINTRA_MODE ii_mode = (INTERINTRA_MODE)vp10_read_tree(
- r, vp10_interintra_mode_tree, cm->fc->interintra_mode_prob[size_group]);
+static INTERINTRA_MODE read_interintra_mode(AV1_COMMON *cm, MACROBLOCKD *xd,
+ aom_reader *r, int size_group) {
+ const INTERINTRA_MODE ii_mode = (INTERINTRA_MODE)aom_read_tree(
+ r, av1_interintra_mode_tree, cm->fc->interintra_mode_prob[size_group]);
FRAME_COUNTS *counts = xd->counts;
if (counts) ++counts->interintra_mode[size_group][ii_mode];
return ii_mode;
}
#endif // CONFIG_EXT_INTER
-static PREDICTION_MODE read_inter_mode(VP10_COMMON *cm, MACROBLOCKD *xd,
+static PREDICTION_MODE read_inter_mode(AV1_COMMON *cm, MACROBLOCKD *xd,
#if CONFIG_REF_MV && CONFIG_EXT_INTER
MB_MODE_INFO *mbmi,
#endif
- vp10_reader *r, int16_t ctx) {
+ aom_reader *r, int16_t ctx) {
#if CONFIG_REF_MV
FRAME_COUNTS *counts = xd->counts;
int16_t mode_ctx = ctx & NEWMV_CTX_MASK;
- vpx_prob mode_prob = cm->fc->newmv_prob[mode_ctx];
+ aom_prob mode_prob = cm->fc->newmv_prob[mode_ctx];
- if (vp10_read(r, mode_prob) == 0) {
+ if (aom_read(r, mode_prob) == 0) {
if (counts) ++counts->newmv_mode[mode_ctx][0];
#if CONFIG_EXT_INTER
@@ -91,7 +91,7 @@
#if CONFIG_EXT_INTER
} else {
mode_prob = cm->fc->new2mv_prob;
- if (vp10_read(r, mode_prob) == 0) {
+ if (aom_read(r, mode_prob) == 0) {
if (counts) ++counts->new2mv_mode[0];
return NEWMV;
} else {
@@ -108,7 +108,7 @@
mode_ctx = (ctx >> ZEROMV_OFFSET) & ZEROMV_CTX_MASK;
mode_prob = cm->fc->zeromv_prob[mode_ctx];
- if (vp10_read(r, mode_prob) == 0) {
+ if (aom_read(r, mode_prob) == 0) {
if (counts) ++counts->zeromv_mode[mode_ctx][0];
return ZEROMV;
}
@@ -122,7 +122,7 @@
mode_prob = cm->fc->refmv_prob[mode_ctx];
- if (vp10_read(r, mode_prob) == 0) {
+ if (aom_read(r, mode_prob) == 0) {
if (counts) ++counts->refmv_mode[mode_ctx][0];
return NEARESTMV;
@@ -135,7 +135,7 @@
assert(0);
#else
const int mode =
- vp10_read_tree(r, vp10_inter_mode_tree, cm->fc->inter_mode_probs[ctx]);
+ aom_read_tree(r, av1_inter_mode_tree, cm->fc->inter_mode_probs[ctx]);
FRAME_COUNTS *counts = xd->counts;
if (counts) ++counts->inter_mode[ctx][mode];
@@ -144,18 +144,18 @@
}
#if CONFIG_REF_MV
-static void read_drl_idx(const VP10_COMMON *cm, MACROBLOCKD *xd,
- MB_MODE_INFO *mbmi, vp10_reader *r) {
- uint8_t ref_frame_type = vp10_ref_frame_type(mbmi->ref_frame);
+static void read_drl_idx(const AV1_COMMON *cm, MACROBLOCKD *xd,
+ MB_MODE_INFO *mbmi, aom_reader *r) {
+ uint8_t ref_frame_type = av1_ref_frame_type(mbmi->ref_frame);
mbmi->ref_mv_idx = 0;
if (mbmi->mode == NEWMV) {
int idx;
for (idx = 0; idx < 2; ++idx) {
if (xd->ref_mv_count[ref_frame_type] > idx + 1) {
- uint8_t drl_ctx = vp10_drl_ctx(xd->ref_mv_stack[ref_frame_type], idx);
- vpx_prob drl_prob = cm->fc->drl_prob[drl_ctx];
- if (!vp10_read(r, drl_prob)) {
+ uint8_t drl_ctx = av1_drl_ctx(xd->ref_mv_stack[ref_frame_type], idx);
+ aom_prob drl_prob = cm->fc->drl_prob[drl_ctx];
+ if (!aom_read(r, drl_prob)) {
mbmi->ref_mv_idx = idx;
if (xd->counts) ++xd->counts->drl_mode[drl_ctx][0];
return;
@@ -173,9 +173,9 @@
// mode is factored in.
for (idx = 1; idx < 3; ++idx) {
if (xd->ref_mv_count[ref_frame_type] > idx + 1) {
- uint8_t drl_ctx = vp10_drl_ctx(xd->ref_mv_stack[ref_frame_type], idx);
- vpx_prob drl_prob = cm->fc->drl_prob[drl_ctx];
- if (!vp10_read(r, drl_prob)) {
+ uint8_t drl_ctx = av1_drl_ctx(xd->ref_mv_stack[ref_frame_type], idx);
+ aom_prob drl_prob = cm->fc->drl_prob[drl_ctx];
+ if (!aom_read(r, drl_prob)) {
mbmi->ref_mv_idx = idx - 1;
if (xd->counts) ++xd->counts->drl_mode[drl_ctx][0];
return;
@@ -189,11 +189,10 @@
#endif
#if CONFIG_EXT_INTER
-static PREDICTION_MODE read_inter_compound_mode(VP10_COMMON *cm,
- MACROBLOCKD *xd, vp10_reader *r,
- int16_t ctx) {
- const int mode = vp10_read_tree(r, vp10_inter_compound_mode_tree,
- cm->fc->inter_compound_mode_probs[ctx]);
+static PREDICTION_MODE read_inter_compound_mode(AV1_COMMON *cm, MACROBLOCKD *xd,
+ aom_reader *r, int16_t ctx) {
+ const int mode = aom_read_tree(r, av1_inter_compound_mode_tree,
+ cm->fc->inter_compound_mode_probs[ctx]);
FRAME_COUNTS *counts = xd->counts;
if (counts) ++counts->inter_compound_mode[ctx][mode];
@@ -203,16 +202,16 @@
}
#endif // CONFIG_EXT_INTER
-static int read_segment_id(vp10_reader *r,
+static int read_segment_id(aom_reader *r,
const struct segmentation_probs *segp) {
- return vp10_read_tree(r, vp10_segment_tree, segp->tree_probs);
+ return aom_read_tree(r, av1_segment_tree, segp->tree_probs);
}
#if CONFIG_VAR_TX
-static void read_tx_size_vartx(VP10_COMMON *cm, MACROBLOCKD *xd,
+static void read_tx_size_vartx(AV1_COMMON *cm, MACROBLOCKD *xd,
MB_MODE_INFO *mbmi, FRAME_COUNTS *counts,
TX_SIZE tx_size, int blk_row, int blk_col,
- vp10_reader *r) {
+ aom_reader *r) {
int is_split = 0;
const int tx_row = blk_row >> 1;
const int tx_col = blk_col >> 1;
@@ -229,7 +228,7 @@
if (blk_row >= max_blocks_high || blk_col >= max_blocks_wide) return;
- is_split = vp10_read(r, cm->fc->txfm_partition_prob[ctx]);
+ is_split = aom_read(r, cm->fc->txfm_partition_prob[ctx]);
if (is_split) {
BLOCK_SIZE bsize = txsize_to_bsize[tx_size];
@@ -268,18 +267,18 @@
}
#endif
-static TX_SIZE read_selected_tx_size(VP10_COMMON *cm, MACROBLOCKD *xd,
- int tx_size_cat, vp10_reader *r) {
+static TX_SIZE read_selected_tx_size(AV1_COMMON *cm, MACROBLOCKD *xd,
+ int tx_size_cat, aom_reader *r) {
FRAME_COUNTS *counts = xd->counts;
const int ctx = get_tx_size_context(xd);
- int tx_size = vp10_read_tree(r, vp10_tx_size_tree[tx_size_cat],
- cm->fc->tx_size_probs[tx_size_cat][ctx]);
+ int tx_size = aom_read_tree(r, av1_tx_size_tree[tx_size_cat],
+ cm->fc->tx_size_probs[tx_size_cat][ctx]);
if (counts) ++counts->tx_size[tx_size_cat][ctx][tx_size];
return (TX_SIZE)tx_size;
}
-static TX_SIZE read_tx_size_intra(VP10_COMMON *cm, MACROBLOCKD *xd,
- vp10_reader *r) {
+static TX_SIZE read_tx_size_intra(AV1_COMMON *cm, MACROBLOCKD *xd,
+ aom_reader *r) {
TX_MODE tx_mode = cm->tx_mode;
BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
if (xd->lossless[xd->mi[0]->mbmi.segment_id]) return TX_4X4;
@@ -297,8 +296,8 @@
}
}
-static TX_SIZE read_tx_size_inter(VP10_COMMON *cm, MACROBLOCKD *xd,
- int allow_select, vp10_reader *r) {
+static TX_SIZE read_tx_size_inter(AV1_COMMON *cm, MACROBLOCKD *xd,
+ int allow_select, aom_reader *r) {
TX_MODE tx_mode = cm->tx_mode;
BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
if (xd->lossless[xd->mi[0]->mbmi.segment_id]) return TX_4X4;
@@ -328,20 +327,20 @@
}
}
-static int dec_get_segment_id(const VP10_COMMON *cm, const uint8_t *segment_ids,
+static int dec_get_segment_id(const AV1_COMMON *cm, const uint8_t *segment_ids,
int mi_offset, int x_mis, int y_mis) {
int x, y, segment_id = INT_MAX;
for (y = 0; y < y_mis; y++)
for (x = 0; x < x_mis; x++)
segment_id =
- VPXMIN(segment_id, segment_ids[mi_offset + y * cm->mi_cols + x]);
+ AOMMIN(segment_id, segment_ids[mi_offset + y * cm->mi_cols + x]);
assert(segment_id >= 0 && segment_id < MAX_SEGMENTS);
return segment_id;
}
-static void set_segment_id(VP10_COMMON *cm, int mi_offset, int x_mis, int y_mis,
+static void set_segment_id(AV1_COMMON *cm, int mi_offset, int x_mis, int y_mis,
int segment_id) {
int x, y;
@@ -352,9 +351,9 @@
cm->current_frame_seg_map[mi_offset + y * cm->mi_cols + x] = segment_id;
}
-static int read_intra_segment_id(VP10_COMMON *const cm, MACROBLOCKD *const xd,
+static int read_intra_segment_id(AV1_COMMON *const cm, MACROBLOCKD *const xd,
int mi_offset, int x_mis, int y_mis,
- vp10_reader *r) {
+ aom_reader *r) {
struct segmentation *const seg = &cm->seg;
FRAME_COUNTS *counts = xd->counts;
struct segmentation_probs *const segp = &cm->fc->seg;
@@ -370,7 +369,7 @@
return segment_id;
}
-static void copy_segment_id(const VP10_COMMON *cm,
+static void copy_segment_id(const AV1_COMMON *cm,
const uint8_t *last_segment_ids,
uint8_t *current_segment_ids, int mi_offset,
int x_mis, int y_mis) {
@@ -383,8 +382,8 @@
: 0;
}
-static int read_inter_segment_id(VP10_COMMON *const cm, MACROBLOCKD *const xd,
- int mi_row, int mi_col, vp10_reader *r) {
+static int read_inter_segment_id(AV1_COMMON *const cm, MACROBLOCKD *const xd,
+ int mi_row, int mi_col, aom_reader *r) {
struct segmentation *const seg = &cm->seg;
FRAME_COUNTS *counts = xd->counts;
struct segmentation_probs *const segp = &cm->fc->seg;
@@ -395,8 +394,8 @@
const int bh = num_8x8_blocks_high_lookup[mbmi->sb_type];
// TODO(slavarnway): move x_mis, y_mis into xd ?????
- const int x_mis = VPXMIN(cm->mi_cols - mi_col, bw);
- const int y_mis = VPXMIN(cm->mi_rows - mi_row, bh);
+ const int x_mis = AOMMIN(cm->mi_cols - mi_col, bw);
+ const int y_mis = AOMMIN(cm->mi_rows - mi_row, bh);
if (!seg->enabled) return 0; // Default for disabled segmentation
@@ -412,9 +411,9 @@
}
if (seg->temporal_update) {
- const int ctx = vp10_get_pred_context_seg_id(xd);
- const vpx_prob pred_prob = segp->pred_probs[ctx];
- mbmi->seg_id_predicted = vp10_read(r, pred_prob);
+ const int ctx = av1_get_pred_context_seg_id(xd);
+ const aom_prob pred_prob = segp->pred_probs[ctx];
+ mbmi->seg_id_predicted = aom_read(r, pred_prob);
if (counts) ++counts->seg.pred[ctx][mbmi->seg_id_predicted];
if (mbmi->seg_id_predicted) {
segment_id = predicted_segment_id;
@@ -430,21 +429,21 @@
return segment_id;
}
-static int read_skip(VP10_COMMON *cm, const MACROBLOCKD *xd, int segment_id,
- vp10_reader *r) {
+static int read_skip(AV1_COMMON *cm, const MACROBLOCKD *xd, int segment_id,
+ aom_reader *r) {
if (segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP)) {
return 1;
} else {
- const int ctx = vp10_get_skip_context(xd);
- const int skip = vp10_read(r, cm->fc->skip_probs[ctx]);
+ const int ctx = av1_get_skip_context(xd);
+ const int skip = aom_read(r, cm->fc->skip_probs[ctx]);
FRAME_COUNTS *counts = xd->counts;
if (counts) ++counts->skip[ctx][skip];
return skip;
}
}
-static void read_palette_mode_info(VP10_COMMON *const cm, MACROBLOCKD *const xd,
- vp10_reader *r) {
+static void read_palette_mode_info(AV1_COMMON *const cm, MACROBLOCKD *const xd,
+ aom_reader *r) {
MODE_INFO *const mi = xd->mi[0];
MB_MODE_INFO *const mbmi = &mi->mbmi;
const MODE_INFO *const above_mi = xd->above_mi;
@@ -458,16 +457,16 @@
palette_ctx += (above_mi->mbmi.palette_mode_info.palette_size[0] > 0);
if (left_mi)
palette_ctx += (left_mi->mbmi.palette_mode_info.palette_size[0] > 0);
- if (vp10_read(
+ if (aom_read(
r,
- vp10_default_palette_y_mode_prob[bsize - BLOCK_8X8][palette_ctx])) {
+ av1_default_palette_y_mode_prob[bsize - BLOCK_8X8][palette_ctx])) {
pmi->palette_size[0] =
- vp10_read_tree(r, vp10_palette_size_tree,
- vp10_default_palette_y_size_prob[bsize - BLOCK_8X8]) +
+ aom_read_tree(r, av1_palette_size_tree,
+ av1_default_palette_y_size_prob[bsize - BLOCK_8X8]) +
2;
n = pmi->palette_size[0];
for (i = 0; i < n; ++i)
- pmi->palette_colors[i] = vp10_read_literal(r, cm->bit_depth);
+ pmi->palette_colors[i] = aom_read_literal(r, cm->bit_depth);
xd->plane[0].color_index_map[0] = read_uniform(r, n);
assert(xd->plane[0].color_index_map[0] < n);
@@ -475,18 +474,18 @@
}
if (mbmi->uv_mode == DC_PRED) {
- if (vp10_read(
- r, vp10_default_palette_uv_mode_prob[pmi->palette_size[0] > 0])) {
+ if (aom_read(r,
+ av1_default_palette_uv_mode_prob[pmi->palette_size[0] > 0])) {
pmi->palette_size[1] =
- vp10_read_tree(r, vp10_palette_size_tree,
- vp10_default_palette_uv_size_prob[bsize - BLOCK_8X8]) +
+ aom_read_tree(r, av1_palette_size_tree,
+ av1_default_palette_uv_size_prob[bsize - BLOCK_8X8]) +
2;
n = pmi->palette_size[1];
for (i = 0; i < n; ++i) {
pmi->palette_colors[PALETTE_MAX_SIZE + i] =
- vp10_read_literal(r, cm->bit_depth);
+ aom_read_literal(r, cm->bit_depth);
pmi->palette_colors[2 * PALETTE_MAX_SIZE + i] =
- vp10_read_literal(r, cm->bit_depth);
+ aom_read_literal(r, cm->bit_depth);
}
xd->plane[1].color_index_map[0] = read_uniform(r, n);
assert(xd->plane[1].color_index_map[0] < n);
@@ -495,8 +494,8 @@
}
#if CONFIG_EXT_INTRA
-static void read_ext_intra_mode_info(VP10_COMMON *const cm,
- MACROBLOCKD *const xd, vp10_reader *r) {
+static void read_ext_intra_mode_info(AV1_COMMON *const cm,
+ MACROBLOCKD *const xd, aom_reader *r) {
MODE_INFO *const mi = xd->mi[0];
MB_MODE_INFO *const mbmi = &mi->mbmi;
FRAME_COUNTS *counts = xd->counts;
@@ -506,7 +505,7 @@
#endif
if (mbmi->mode == DC_PRED && mbmi->palette_mode_info.palette_size[0] == 0) {
mbmi->ext_intra_mode_info.use_ext_intra_mode[0] =
- vp10_read(r, cm->fc->ext_intra_probs[0]);
+ aom_read(r, cm->fc->ext_intra_probs[0]);
if (mbmi->ext_intra_mode_info.use_ext_intra_mode[0]) {
mbmi->ext_intra_mode_info.ext_intra_mode[0] =
read_uniform(r, FILTER_INTRA_MODES);
@@ -517,7 +516,7 @@
if (mbmi->uv_mode == DC_PRED &&
mbmi->palette_mode_info.palette_size[1] == 0) {
mbmi->ext_intra_mode_info.use_ext_intra_mode[1] =
- vp10_read(r, cm->fc->ext_intra_probs[1]);
+ aom_read(r, cm->fc->ext_intra_probs[1]);
if (mbmi->ext_intra_mode_info.use_ext_intra_mode[1]) {
mbmi->ext_intra_mode_info.ext_intra_mode[1] =
read_uniform(r, FILTER_INTRA_MODES);
@@ -527,11 +526,11 @@
}
}
-static void read_intra_angle_info(VP10_COMMON *const cm, MACROBLOCKD *const xd,
- vp10_reader *r) {
+static void read_intra_angle_info(AV1_COMMON *const cm, MACROBLOCKD *const xd,
+ aom_reader *r) {
MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
const BLOCK_SIZE bsize = mbmi->sb_type;
- const int ctx = vp10_get_pred_context_intra_interp(xd);
+ const int ctx = av1_get_pred_context_intra_interp(xd);
int p_angle;
if (bsize < BLOCK_8X8) return;
@@ -540,10 +539,10 @@
mbmi->angle_delta[0] =
read_uniform(r, 2 * MAX_ANGLE_DELTAS + 1) - MAX_ANGLE_DELTAS;
p_angle = mode_to_angle_map[mbmi->mode] + mbmi->angle_delta[0] * ANGLE_STEP;
- if (vp10_is_intra_filter_switchable(p_angle)) {
+ if (av1_is_intra_filter_switchable(p_angle)) {
FRAME_COUNTS *counts = xd->counts;
- mbmi->intra_filter = vp10_read_tree(r, vp10_intra_filter_tree,
- cm->fc->intra_filter_probs[ctx]);
+ mbmi->intra_filter = aom_read_tree(r, av1_intra_filter_tree,
+ cm->fc->intra_filter_probs[ctx]);
if (counts) ++counts->intra_filter[ctx][mbmi->intra_filter];
} else {
mbmi->intra_filter = INTRA_FILTER_LINEAR;
@@ -557,9 +556,9 @@
}
#endif // CONFIG_EXT_INTRA
-static void read_intra_frame_mode_info(VP10_COMMON *const cm,
+static void read_intra_frame_mode_info(AV1_COMMON *const cm,
MACROBLOCKD *const xd, int mi_row,
- int mi_col, vp10_reader *r) {
+ int mi_col, aom_reader *r) {
MODE_INFO *const mi = xd->mi[0];
MB_MODE_INFO *const mbmi = &mi->mbmi;
const MODE_INFO *above_mi = xd->above_mi;
@@ -571,8 +570,8 @@
const int bh = xd->plane[0].n4_h >> 1;
// TODO(slavarnway): move x_mis, y_mis into xd ?????
- const int x_mis = VPXMIN(cm->mi_cols - mi_col, bw);
- const int y_mis = VPXMIN(cm->mi_rows - mi_row, bh);
+ const int x_mis = AOMMIN(cm->mi_cols - mi_col, bw);
+ const int y_mis = AOMMIN(cm->mi_rows - mi_row, bh);
mbmi->segment_id = read_intra_segment_id(cm, xd, mi_offset, x_mis, y_mis, r);
mbmi->skip = read_skip(cm, xd, mbmi->segment_id, r);
@@ -627,8 +626,8 @@
FRAME_COUNTS *counts = xd->counts;
int eset = get_ext_tx_set(mbmi->tx_size, mbmi->sb_type, 0);
if (eset > 0) {
- mbmi->tx_type = vp10_read_tree(
- r, vp10_ext_tx_intra_tree[eset],
+ mbmi->tx_type = aom_read_tree(
+ r, av1_ext_tx_intra_tree[eset],
cm->fc->intra_ext_tx_prob[eset][mbmi->tx_size][mbmi->mode]);
if (counts)
++counts
@@ -643,8 +642,8 @@
FRAME_COUNTS *counts = xd->counts;
TX_TYPE tx_type_nom = intra_mode_to_tx_type_context[mbmi->mode];
mbmi->tx_type =
- vp10_read_tree(r, vp10_ext_tx_tree,
- cm->fc->intra_ext_tx_prob[mbmi->tx_size][tx_type_nom]);
+ aom_read_tree(r, av1_ext_tx_tree,
+ cm->fc->intra_ext_tx_prob[mbmi->tx_size][tx_type_nom]);
if (counts)
++counts->intra_ext_tx[mbmi->tx_size][tx_type_nom][mbmi->tx_type];
} else {
@@ -654,64 +653,63 @@
}
}
-static int read_mv_component(vp10_reader *r, const nmv_component *mvcomp,
+static int read_mv_component(aom_reader *r, const nmv_component *mvcomp,
int usehp) {
int mag, d, fr, hp;
- const int sign = vp10_read(r, mvcomp->sign);
- const int mv_class = vp10_read_tree(r, vp10_mv_class_tree, mvcomp->classes);
+ const int sign = aom_read(r, mvcomp->sign);
+ const int mv_class = aom_read_tree(r, av1_mv_class_tree, mvcomp->classes);
const int class0 = mv_class == MV_CLASS_0;
// Integer part
if (class0) {
- d = vp10_read_tree(r, vp10_mv_class0_tree, mvcomp->class0);
+ d = aom_read_tree(r, av1_mv_class0_tree, mvcomp->class0);
mag = 0;
} else {
int i;
const int n = mv_class + CLASS0_BITS - 1; // number of bits
d = 0;
- for (i = 0; i < n; ++i) d |= vp10_read(r, mvcomp->bits[i]) << i;
+ for (i = 0; i < n; ++i) d |= aom_read(r, mvcomp->bits[i]) << i;
mag = CLASS0_SIZE << (mv_class + 2);
}
// Fractional part
- fr = vp10_read_tree(r, vp10_mv_fp_tree,
- class0 ? mvcomp->class0_fp[d] : mvcomp->fp);
+ fr = aom_read_tree(r, av1_mv_fp_tree,
+ class0 ? mvcomp->class0_fp[d] : mvcomp->fp);
// High precision part (if hp is not used, the default value of the hp is 1)
- hp = usehp ? vp10_read(r, class0 ? mvcomp->class0_hp : mvcomp->hp) : 1;
+ hp = usehp ? aom_read(r, class0 ? mvcomp->class0_hp : mvcomp->hp) : 1;
// Result
mag += ((d << 3) | (fr << 1) | hp) + 1;
return sign ? -mag : mag;
}
-static INLINE void read_mv(vp10_reader *r, MV *mv, const MV *ref,
+static INLINE void read_mv(aom_reader *r, MV *mv, const MV *ref,
#if CONFIG_REF_MV
int is_compound,
#endif
const nmv_context *ctx, nmv_context_counts *counts,
int allow_hp) {
MV_JOINT_TYPE joint_type;
- const int use_hp = allow_hp && vp10_use_mv_hp(ref);
+ const int use_hp = allow_hp && av1_use_mv_hp(ref);
MV diff = { 0, 0 };
#if CONFIG_REF_MV && !CONFIG_EXT_INTER
if (is_compound) {
- int is_zero_rmv = vp10_read(r, ctx->zero_rmv);
+ int is_zero_rmv = aom_read(r, ctx->zero_rmv);
if (is_zero_rmv) {
joint_type = MV_JOINT_ZERO;
} else {
joint_type =
- (MV_JOINT_TYPE)vp10_read_tree(r, vp10_mv_joint_tree, ctx->joints);
+ (MV_JOINT_TYPE)aom_read_tree(r, av1_mv_joint_tree, ctx->joints);
}
} else {
joint_type =
- (MV_JOINT_TYPE)vp10_read_tree(r, vp10_mv_joint_tree, ctx->joints);
+ (MV_JOINT_TYPE)aom_read_tree(r, av1_mv_joint_tree, ctx->joints);
}
#else
- joint_type =
- (MV_JOINT_TYPE)vp10_read_tree(r, vp10_mv_joint_tree, ctx->joints);
+ joint_type = (MV_JOINT_TYPE)aom_read_tree(r, av1_mv_joint_tree, ctx->joints);
#endif
#if CONFIG_REF_MV && CONFIG_EXT_INTER
@@ -724,19 +722,19 @@
if (mv_joint_horizontal(joint_type))
diff.col = read_mv_component(r, &ctx->comps[1], use_hp);
- vp10_inc_mv(&diff, counts, use_hp);
+ av1_inc_mv(&diff, counts, use_hp);
mv->row = ref->row + diff.row;
mv->col = ref->col + diff.col;
}
-static REFERENCE_MODE read_block_reference_mode(VP10_COMMON *cm,
+static REFERENCE_MODE read_block_reference_mode(AV1_COMMON *cm,
const MACROBLOCKD *xd,
- vp10_reader *r) {
+ aom_reader *r) {
if (cm->reference_mode == REFERENCE_MODE_SELECT) {
- const int ctx = vp10_get_reference_mode_context(cm, xd);
+ const int ctx = av1_get_reference_mode_context(cm, xd);
const REFERENCE_MODE mode =
- (REFERENCE_MODE)vp10_read(r, cm->fc->comp_inter_prob[ctx]);
+ (REFERENCE_MODE)aom_read(r, cm->fc->comp_inter_prob[ctx]);
FRAME_COUNTS *counts = xd->counts;
if (counts) ++counts->comp_inter[ctx][mode];
return mode; // SINGLE_REFERENCE or COMPOUND_REFERENCE
@@ -746,8 +744,8 @@
}
// Read the referncence frame
-static void read_ref_frames(VP10_COMMON *const cm, MACROBLOCKD *const xd,
- vp10_reader *r, int segment_id,
+static void read_ref_frames(AV1_COMMON *const cm, MACROBLOCKD *const xd,
+ aom_reader *r, int segment_id,
MV_REFERENCE_FRAME ref_frame[2]) {
FRAME_CONTEXT *const fc = cm->fc;
FRAME_COUNTS *counts = xd->counts;
@@ -765,29 +763,29 @@
#else
const int idx = cm->ref_frame_sign_bias[cm->comp_fixed_ref];
#endif // CONFIG_EXT_REFS
- const int ctx = vp10_get_pred_context_comp_ref_p(cm, xd);
- const int bit = vp10_read(r, fc->comp_ref_prob[ctx][0]);
+ const int ctx = av1_get_pred_context_comp_ref_p(cm, xd);
+ const int bit = aom_read(r, fc->comp_ref_prob[ctx][0]);
if (counts) ++counts->comp_ref[ctx][0][bit];
#if CONFIG_EXT_REFS
// Decode forward references.
if (!bit) {
- const int ctx1 = vp10_get_pred_context_comp_ref_p1(cm, xd);
- const int bit1 = vp10_read(r, fc->comp_ref_prob[ctx1][1]);
+ const int ctx1 = av1_get_pred_context_comp_ref_p1(cm, xd);
+ const int bit1 = aom_read(r, fc->comp_ref_prob[ctx1][1]);
if (counts) ++counts->comp_ref[ctx1][1][bit1];
ref_frame[!idx] = cm->comp_fwd_ref[bit1 ? 0 : 1];
} else {
- const int ctx2 = vp10_get_pred_context_comp_ref_p2(cm, xd);
- const int bit2 = vp10_read(r, fc->comp_ref_prob[ctx2][2]);
+ const int ctx2 = av1_get_pred_context_comp_ref_p2(cm, xd);
+ const int bit2 = aom_read(r, fc->comp_ref_prob[ctx2][2]);
if (counts) ++counts->comp_ref[ctx2][2][bit2];
ref_frame[!idx] = cm->comp_fwd_ref[bit2 ? 3 : 2];
}
// Decode backward references.
{
- const int ctx_bwd = vp10_get_pred_context_comp_bwdref_p(cm, xd);
- const int bit_bwd = vp10_read(r, fc->comp_bwdref_prob[ctx_bwd][0]);
+ const int ctx_bwd = av1_get_pred_context_comp_bwdref_p(cm, xd);
+ const int bit_bwd = aom_read(r, fc->comp_bwdref_prob[ctx_bwd][0]);
if (counts) ++counts->comp_bwdref[ctx_bwd][0][bit_bwd];
ref_frame[idx] = cm->comp_bwd_ref[bit_bwd];
}
@@ -797,39 +795,39 @@
#endif // CONFIG_EXT_REFS
} else if (mode == SINGLE_REFERENCE) {
#if CONFIG_EXT_REFS
- const int ctx0 = vp10_get_pred_context_single_ref_p1(xd);
- const int bit0 = vp10_read(r, fc->single_ref_prob[ctx0][0]);
+ const int ctx0 = av1_get_pred_context_single_ref_p1(xd);
+ const int bit0 = aom_read(r, fc->single_ref_prob[ctx0][0]);
if (counts) ++counts->single_ref[ctx0][0][bit0];
if (bit0) {
- const int ctx1 = vp10_get_pred_context_single_ref_p2(xd);
- const int bit1 = vp10_read(r, fc->single_ref_prob[ctx1][1]);
+ const int ctx1 = av1_get_pred_context_single_ref_p2(xd);
+ const int bit1 = aom_read(r, fc->single_ref_prob[ctx1][1]);
if (counts) ++counts->single_ref[ctx1][1][bit1];
ref_frame[0] = bit1 ? ALTREF_FRAME : BWDREF_FRAME;
} else {
- const int ctx2 = vp10_get_pred_context_single_ref_p3(xd);
- const int bit2 = vp10_read(r, fc->single_ref_prob[ctx2][2]);
+ const int ctx2 = av1_get_pred_context_single_ref_p3(xd);
+ const int bit2 = aom_read(r, fc->single_ref_prob[ctx2][2]);
if (counts) ++counts->single_ref[ctx2][2][bit2];
if (bit2) {
- const int ctx4 = vp10_get_pred_context_single_ref_p5(xd);
- const int bit4 = vp10_read(r, fc->single_ref_prob[ctx4][4]);
+ const int ctx4 = av1_get_pred_context_single_ref_p5(xd);
+ const int bit4 = aom_read(r, fc->single_ref_prob[ctx4][4]);
if (counts) ++counts->single_ref[ctx4][4][bit4];
ref_frame[0] = bit4 ? GOLDEN_FRAME : LAST3_FRAME;
} else {
- const int ctx3 = vp10_get_pred_context_single_ref_p4(xd);
- const int bit3 = vp10_read(r, fc->single_ref_prob[ctx3][3]);
+ const int ctx3 = av1_get_pred_context_single_ref_p4(xd);
+ const int bit3 = aom_read(r, fc->single_ref_prob[ctx3][3]);
if (counts) ++counts->single_ref[ctx3][3][bit3];
ref_frame[0] = bit3 ? LAST2_FRAME : LAST_FRAME;
}
}
#else
- const int ctx0 = vp10_get_pred_context_single_ref_p1(xd);
- const int bit0 = vp10_read(r, fc->single_ref_prob[ctx0][0]);
+ const int ctx0 = av1_get_pred_context_single_ref_p1(xd);
+ const int bit0 = aom_read(r, fc->single_ref_prob[ctx0][0]);
if (counts) ++counts->single_ref[ctx0][0][bit0];
if (bit0) {
- const int ctx1 = vp10_get_pred_context_single_ref_p2(xd);
- const int bit1 = vp10_read(r, fc->single_ref_prob[ctx1][1]);
+ const int ctx1 = av1_get_pred_context_single_ref_p2(xd);
+ const int bit1 = aom_read(r, fc->single_ref_prob[ctx1][1]);
if (counts) ++counts->single_ref[ctx1][1][bit1];
ref_frame[0] = bit1 ? ALTREF_FRAME : GOLDEN_FRAME;
} else {
@@ -845,16 +843,16 @@
}
#if CONFIG_OBMC || CONFIG_WARPED_MOTION
-static MOTION_VARIATION read_motvar_block(VP10_COMMON *const cm,
+static MOTION_VARIATION read_motvar_block(AV1_COMMON *const cm,
MACROBLOCKD *const xd,
- vp10_reader *r) {
+ aom_reader *r) {
BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
FRAME_COUNTS *counts = xd->counts;
MOTION_VARIATION motvar;
if (is_motvar_allowed(&xd->mi[0]->mbmi)) {
- motvar = (MOTION_VARIATION)vp10_read_tree(r, vp10_motvar_tree,
- cm->fc->motvar_prob[bsize]);
+ motvar = (MOTION_VARIATION)aom_read_tree(r, av1_motvar_tree,
+ cm->fc->motvar_prob[bsize]);
if (counts) ++counts->motvar[bsize][motvar];
return motvar;
} else {
@@ -863,34 +861,34 @@
}
#endif // CONFIG_OBMC || CONFIG_WARPED_MOTION
-static INLINE INTERP_FILTER read_interp_filter(VP10_COMMON *const cm,
+static INLINE INTERP_FILTER read_interp_filter(AV1_COMMON *const cm,
MACROBLOCKD *const xd,
#if CONFIG_DUAL_FILTER
int dir,
#endif
- vp10_reader *r) {
+ aom_reader *r) {
#if CONFIG_EXT_INTERP
- if (!vp10_is_interp_needed(xd)) return EIGHTTAP_REGULAR;
+ if (!av1_is_interp_needed(xd)) return EIGHTTAP_REGULAR;
#endif
if (cm->interp_filter != SWITCHABLE) {
return cm->interp_filter;
} else {
#if CONFIG_DUAL_FILTER
- const int ctx = vp10_get_pred_context_switchable_interp(xd, dir);
+ const int ctx = av1_get_pred_context_switchable_interp(xd, dir);
#else
- const int ctx = vp10_get_pred_context_switchable_interp(xd);
+ const int ctx = av1_get_pred_context_switchable_interp(xd);
#endif
FRAME_COUNTS *counts = xd->counts;
- const INTERP_FILTER type = (INTERP_FILTER)vp10_read_tree(
- r, vp10_switchable_interp_tree, cm->fc->switchable_interp_prob[ctx]);
+ const INTERP_FILTER type = (INTERP_FILTER)aom_read_tree(
+ r, av1_switchable_interp_tree, cm->fc->switchable_interp_prob[ctx]);
if (counts) ++counts->switchable_interp[ctx][type];
return type;
}
}
-static void read_intra_block_mode_info(VP10_COMMON *const cm,
+static void read_intra_block_mode_info(AV1_COMMON *const cm,
MACROBLOCKD *const xd, MODE_INFO *mi,
- vp10_reader *r) {
+ aom_reader *r) {
MB_MODE_INFO *const mbmi = &mi->mbmi;
const BLOCK_SIZE bsize = mi->mbmi.sb_type;
int i;
@@ -938,14 +936,14 @@
mv->col < MV_UPP;
}
-static INLINE int assign_mv(VP10_COMMON *cm, MACROBLOCKD *xd,
+static INLINE int assign_mv(AV1_COMMON *cm, MACROBLOCKD *xd,
PREDICTION_MODE mode,
#if CONFIG_REF_MV
int block,
#endif
int_mv mv[2], int_mv ref_mv[2],
int_mv nearest_mv[2], int_mv near_mv[2],
- int is_compound, int allow_hp, vp10_reader *r) {
+ int is_compound, int allow_hp, aom_reader *r) {
int i;
int ret = 1;
#if CONFIG_REF_MV
@@ -966,8 +964,8 @@
#endif
for (i = 0; i < 1 + is_compound; ++i) {
#if CONFIG_REF_MV
- int nmv_ctx = vp10_nmv_ctx(xd->ref_mv_count[mbmi->ref_frame[i]],
- xd->ref_mv_stack[mbmi->ref_frame[i]]);
+ int nmv_ctx = av1_nmv_ctx(xd->ref_mv_count[mbmi->ref_frame[i]],
+ xd->ref_mv_stack[mbmi->ref_frame[i]]);
nmv_context_counts *const mv_counts =
counts ? &counts->mv[nmv_ctx] : NULL;
read_mv(r, &mv[i].as_mv, &ref_mv[i].as_mv,
@@ -1026,8 +1024,8 @@
assert(is_compound);
for (i = 0; i < 2; ++i) {
#if CONFIG_REF_MV
- int nmv_ctx = vp10_nmv_ctx(xd->ref_mv_count[mbmi->ref_frame[i]],
- xd->ref_mv_stack[mbmi->ref_frame[i]]);
+ int nmv_ctx = av1_nmv_ctx(xd->ref_mv_count[mbmi->ref_frame[i]],
+ xd->ref_mv_stack[mbmi->ref_frame[i]]);
nmv_context_counts *const mv_counts =
counts ? &counts->mv[nmv_ctx] : NULL;
read_mv(r, &mv[i].as_mv, &ref_mv[i].as_mv, is_compound,
@@ -1067,8 +1065,8 @@
case NEW_NEARESTMV: {
FRAME_COUNTS *counts = xd->counts;
#if CONFIG_REF_MV
- int nmv_ctx = vp10_nmv_ctx(xd->ref_mv_count[mbmi->ref_frame[0]],
- xd->ref_mv_stack[mbmi->ref_frame[0]]);
+ int nmv_ctx = av1_nmv_ctx(xd->ref_mv_count[mbmi->ref_frame[0]],
+ xd->ref_mv_stack[mbmi->ref_frame[0]]);
nmv_context_counts *const mv_counts =
counts ? &counts->mv[nmv_ctx] : NULL;
read_mv(r, &mv[0].as_mv, &ref_mv[0].as_mv, is_compound,
@@ -1086,8 +1084,8 @@
case NEAREST_NEWMV: {
FRAME_COUNTS *counts = xd->counts;
#if CONFIG_REF_MV
- int nmv_ctx = vp10_nmv_ctx(xd->ref_mv_count[mbmi->ref_frame[1]],
- xd->ref_mv_stack[mbmi->ref_frame[1]]);
+ int nmv_ctx = av1_nmv_ctx(xd->ref_mv_count[mbmi->ref_frame[1]],
+ xd->ref_mv_stack[mbmi->ref_frame[1]]);
nmv_context_counts *const mv_counts =
counts ? &counts->mv[nmv_ctx] : NULL;
mv[0].as_int = nearest_mv[0].as_int;
@@ -1106,8 +1104,8 @@
case NEAR_NEWMV: {
FRAME_COUNTS *counts = xd->counts;
#if CONFIG_REF_MV
- int nmv_ctx = vp10_nmv_ctx(xd->ref_mv_count[mbmi->ref_frame[1]],
- xd->ref_mv_stack[mbmi->ref_frame[1]]);
+ int nmv_ctx = av1_nmv_ctx(xd->ref_mv_count[mbmi->ref_frame[1]],
+ xd->ref_mv_stack[mbmi->ref_frame[1]]);
nmv_context_counts *const mv_counts =
counts ? &counts->mv[nmv_ctx] : NULL;
mv[0].as_int = near_mv[0].as_int;
@@ -1127,8 +1125,8 @@
case NEW_NEARMV: {
FRAME_COUNTS *counts = xd->counts;
#if CONFIG_REF_MV
- int nmv_ctx = vp10_nmv_ctx(xd->ref_mv_count[mbmi->ref_frame[0]],
- xd->ref_mv_stack[mbmi->ref_frame[0]]);
+ int nmv_ctx = av1_nmv_ctx(xd->ref_mv_count[mbmi->ref_frame[0]],
+ xd->ref_mv_stack[mbmi->ref_frame[0]]);
nmv_context_counts *const mv_counts =
counts ? &counts->mv[nmv_ctx] : NULL;
read_mv(r, &mv[0].as_mv, &ref_mv[0].as_mv, is_compound,
@@ -1155,13 +1153,13 @@
return ret;
}
-static int read_is_inter_block(VP10_COMMON *const cm, MACROBLOCKD *const xd,
- int segment_id, vp10_reader *r) {
+static int read_is_inter_block(AV1_COMMON *const cm, MACROBLOCKD *const xd,
+ int segment_id, aom_reader *r) {
if (segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME)) {
return get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME) != INTRA_FRAME;
} else {
- const int ctx = vp10_get_intra_inter_context(xd);
- const int is_inter = vp10_read(r, cm->fc->intra_inter_prob[ctx]);
+ const int ctx = av1_get_intra_inter_context(xd);
+ const int is_inter = aom_read(r, cm->fc->intra_inter_prob[ctx]);
FRAME_COUNTS *counts = xd->counts;
if (counts) ++counts->intra_inter[ctx][is_inter];
return is_inter;
@@ -1169,21 +1167,21 @@
}
static void fpm_sync(void *const data, int mi_row) {
- VP10Decoder *const pbi = (VP10Decoder *)data;
- vp10_frameworker_wait(pbi->frame_worker_owner, pbi->common.prev_frame,
- mi_row << pbi->common.mib_size_log2);
+ AV1Decoder *const pbi = (AV1Decoder *)data;
+ av1_frameworker_wait(pbi->frame_worker_owner, pbi->common.prev_frame,
+ mi_row << pbi->common.mib_size_log2);
}
-static void read_inter_block_mode_info(VP10Decoder *const pbi,
+static void read_inter_block_mode_info(AV1Decoder *const pbi,
MACROBLOCKD *const xd,
MODE_INFO *const mi,
#if (CONFIG_OBMC || CONFIG_EXT_INTER) && CONFIG_SUPERTX
- int mi_row, int mi_col, vp10_reader *r,
+ int mi_row, int mi_col, aom_reader *r,
int supertx_enabled) {
#else
- int mi_row, int mi_col, vp10_reader *r) {
+ int mi_row, int mi_col, aom_reader *r) {
#endif // CONFIG_OBMC && CONFIG_SUPERTX
- VP10_COMMON *const cm = &pbi->common;
+ AV1_COMMON *const cm = &pbi->common;
MB_MODE_INFO *const mbmi = &mi->mbmi;
const BLOCK_SIZE bsize = mbmi->sb_type;
const int allow_hp = cm->allow_high_precision_mv;
@@ -1211,22 +1209,22 @@
RefBuffer *ref_buf = &cm->frame_refs[frame - LAST_FRAME];
xd->block_refs[ref] = ref_buf;
- if ((!vp10_is_valid_scale(&ref_buf->sf)))
- vpx_internal_error(xd->error_info, VPX_CODEC_UNSUP_BITSTREAM,
+ if ((!av1_is_valid_scale(&ref_buf->sf)))
+ aom_internal_error(xd->error_info, AOM_CODEC_UNSUP_BITSTREAM,
"Reference frame has invalid dimensions");
- vp10_setup_pre_planes(xd, ref, ref_buf->buf, mi_row, mi_col, &ref_buf->sf);
+ av1_setup_pre_planes(xd, ref, ref_buf->buf, mi_row, mi_col, &ref_buf->sf);
}
for (ref_frame = LAST_FRAME; ref_frame < MODE_CTX_REF_FRAMES; ++ref_frame) {
- vp10_find_mv_refs(cm, xd, mi, ref_frame,
+ av1_find_mv_refs(cm, xd, mi, ref_frame,
#if CONFIG_REF_MV
- &xd->ref_mv_count[ref_frame], xd->ref_mv_stack[ref_frame],
+ &xd->ref_mv_count[ref_frame], xd->ref_mv_stack[ref_frame],
#if CONFIG_EXT_INTER
- compound_inter_mode_ctx,
+ compound_inter_mode_ctx,
#endif // CONFIG_EXT_INTER
#endif
- ref_mvs[ref_frame], mi_row, mi_col, fpm_sync, (void *)pbi,
- inter_mode_ctx);
+ ref_mvs[ref_frame], mi_row, mi_col, fpm_sync, (void *)pbi,
+ inter_mode_ctx);
}
#if CONFIG_REF_MV
@@ -1236,7 +1234,7 @@
else
#endif // CONFIG_EXT_INTER
mode_ctx =
- vp10_mode_context_analyzer(inter_mode_ctx, mbmi->ref_frame, bsize, -1);
+ av1_mode_context_analyzer(inter_mode_ctx, mbmi->ref_frame, bsize, -1);
mbmi->ref_mv_idx = 0;
#else
mode_ctx = inter_mode_ctx[mbmi->ref_frame[0]];
@@ -1245,7 +1243,7 @@
if (segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
mbmi->mode = ZEROMV;
if (bsize < BLOCK_8X8) {
- vpx_internal_error(xd->error_info, VPX_CODEC_UNSUP_BITSTREAM,
+ aom_internal_error(xd->error_info, AOM_CODEC_UNSUP_BITSTREAM,
"Invalid usage of segement feature on small blocks");
return;
}
@@ -1275,8 +1273,8 @@
if (bsize < BLOCK_8X8 || mbmi->mode != ZEROMV) {
#endif // CONFIG_EXT_INTER
for (ref = 0; ref < 1 + is_compound; ++ref) {
- vp10_find_best_ref_mvs(allow_hp, ref_mvs[mbmi->ref_frame[ref]],
- &nearestmv[ref], &nearmv[ref]);
+ av1_find_best_ref_mvs(allow_hp, ref_mvs[mbmi->ref_frame[ref]],
+ &nearestmv[ref], &nearmv[ref]);
}
}
@@ -1293,7 +1291,7 @@
if (is_compound && bsize >= BLOCK_8X8 && mbmi->mode != NEWMV &&
mbmi->mode != ZEROMV) {
#endif // CONFIG_EXT_INTER
- uint8_t ref_frame_type = vp10_ref_frame_type(mbmi->ref_frame);
+ uint8_t ref_frame_type = av1_ref_frame_type(mbmi->ref_frame);
#if CONFIG_EXT_INTER
if (xd->ref_mv_count[ref_frame_type] > 0) {
@@ -1366,8 +1364,8 @@
#if CONFIG_EXT_INTER
if (!is_compound)
#endif // CONFIG_EXT_INTER
- mode_ctx = vp10_mode_context_analyzer(inter_mode_ctx, mbmi->ref_frame,
- bsize, j);
+ mode_ctx = av1_mode_context_analyzer(inter_mode_ctx, mbmi->ref_frame,
+ bsize, j);
#endif
#if CONFIG_EXT_INTER
if (is_compound)
@@ -1395,24 +1393,24 @@
#if CONFIG_EXT_INTER
{
int_mv mv_ref_list[MAX_MV_REF_CANDIDATES];
- vp10_update_mv_context(xd, mi, mbmi->ref_frame[ref], mv_ref_list, j,
- mi_row, mi_col, NULL);
+ av1_update_mv_context(xd, mi, mbmi->ref_frame[ref], mv_ref_list, j,
+ mi_row, mi_col, NULL);
#endif // CONFIG_EXT_INTER
- vp10_append_sub8x8_mvs_for_idx(
- cm, xd, j, ref, mi_row, mi_col,
+ av1_append_sub8x8_mvs_for_idx(cm, xd, j, ref, mi_row, mi_col,
#if CONFIG_REF_MV
- ref_mv_stack[ref], &ref_mv_count[ref],
+ ref_mv_stack[ref], &ref_mv_count[ref],
#endif
#if CONFIG_EXT_INTER
- mv_ref_list,
+ mv_ref_list,
#endif // CONFIG_EXT_INTER
- &nearest_sub8x8[ref], &near_sub8x8[ref]);
+ &nearest_sub8x8[ref],
+ &near_sub8x8[ref]);
#if CONFIG_EXT_INTER
if (have_newmv_in_inter_mode(b_mode)) {
mv_ref_list[0].as_int = nearest_sub8x8[ref].as_int;
mv_ref_list[1].as_int = near_sub8x8[ref].as_int;
- vp10_find_best_ref_mvs(allow_hp, mv_ref_list, &ref_mv[0][ref],
- &ref_mv[1][ref]);
+ av1_find_best_ref_mvs(allow_hp, mv_ref_list, &ref_mv[0][ref],
+ &ref_mv[1][ref]);
}
}
#endif // CONFIG_EXT_INTER
@@ -1469,7 +1467,7 @@
for (ref = 0; ref < 1 + is_compound && mbmi->mode == NEWMV; ++ref) {
#if CONFIG_REF_MV
- uint8_t ref_frame_type = vp10_ref_frame_type(mbmi->ref_frame);
+ uint8_t ref_frame_type = av1_ref_frame_type(mbmi->ref_frame);
if (xd->ref_mv_count[ref_frame_type] > 1) {
ref_mv[ref] =
(ref == 0)
@@ -1503,7 +1501,7 @@
#endif
is_interintra_allowed(mbmi)) {
const int bsize_group = size_group_lookup[bsize];
- const int interintra = vp10_read(r, cm->fc->interintra_prob[bsize_group]);
+ const int interintra = aom_read(r, cm->fc->interintra_prob[bsize_group]);
if (xd->counts) xd->counts->interintra[bsize_group][interintra]++;
assert(mbmi->ref_frame[1] == NONE);
if (interintra) {
@@ -1520,12 +1518,12 @@
#endif // CONFIG_EXT_INTRA
if (is_interintra_wedge_used(bsize)) {
mbmi->use_wedge_interintra =
- vp10_read(r, cm->fc->wedge_interintra_prob[bsize]);
+ aom_read(r, cm->fc->wedge_interintra_prob[bsize]);
if (xd->counts)
xd->counts->wedge_interintra[bsize][mbmi->use_wedge_interintra]++;
if (mbmi->use_wedge_interintra) {
mbmi->interintra_wedge_index =
- vp10_read_literal(r, get_wedge_bits_lookup(bsize));
+ aom_read_literal(r, get_wedge_bits_lookup(bsize));
mbmi->interintra_wedge_sign = 0;
}
}
@@ -1554,13 +1552,13 @@
#endif // CONFIG_OBMC || CONFIG_WARPED_MOTION
is_interinter_wedge_used(bsize)) {
mbmi->use_wedge_interinter =
- vp10_read(r, cm->fc->wedge_interinter_prob[bsize]);
+ aom_read(r, cm->fc->wedge_interinter_prob[bsize]);
if (xd->counts)
xd->counts->wedge_interinter[bsize][mbmi->use_wedge_interinter]++;
if (mbmi->use_wedge_interinter) {
mbmi->interinter_wedge_index =
- vp10_read_literal(r, get_wedge_bits_lookup(bsize));
- mbmi->interinter_wedge_sign = vp10_read_bit(r);
+ aom_read_literal(r, get_wedge_bits_lookup(bsize));
+ mbmi->interinter_wedge_sign = aom_read_bit(r);
}
}
#endif // CONFIG_EXT_INTER
@@ -1588,13 +1586,13 @@
#endif // CONFIG_DUAL_FILTER
}
-static void read_inter_frame_mode_info(VP10Decoder *const pbi,
+static void read_inter_frame_mode_info(AV1Decoder *const pbi,
MACROBLOCKD *const xd,
#if CONFIG_SUPERTX
int supertx_enabled,
#endif // CONFIG_SUPERTX
- int mi_row, int mi_col, vp10_reader *r) {
- VP10_COMMON *const cm = &pbi->common;
+ int mi_row, int mi_col, aom_reader *r) {
+ AV1_COMMON *const cm = &pbi->common;
MODE_INFO *const mi = xd->mi[0];
MB_MODE_INFO *const mbmi = &mi->mbmi;
int inter_block = 1;
@@ -1693,8 +1691,8 @@
if (inter_block) {
if (eset > 0) {
- mbmi->tx_type = vp10_read_tree(
- r, vp10_ext_tx_inter_tree[eset],
+ mbmi->tx_type = aom_read_tree(
+ r, av1_ext_tx_inter_tree[eset],
cm->fc->inter_ext_tx_prob[eset][txsize_sqr_map[mbmi->tx_size]]);
if (counts)
++counts->inter_ext_tx[eset][txsize_sqr_map[mbmi->tx_size]]
@@ -1702,8 +1700,8 @@
}
} else if (ALLOW_INTRA_EXT_TX) {
if (eset > 0) {
- mbmi->tx_type = vp10_read_tree(
- r, vp10_ext_tx_intra_tree[eset],
+ mbmi->tx_type = aom_read_tree(
+ r, av1_ext_tx_intra_tree[eset],
cm->fc->intra_ext_tx_prob[eset][mbmi->tx_size][mbmi->mode]);
if (counts)
++counts->intra_ext_tx[eset][mbmi->tx_size][mbmi->mode]
@@ -1721,13 +1719,13 @@
!segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
FRAME_COUNTS *counts = xd->counts;
if (inter_block) {
- mbmi->tx_type = vp10_read_tree(
- r, vp10_ext_tx_tree, cm->fc->inter_ext_tx_prob[mbmi->tx_size]);
+ mbmi->tx_type = aom_read_tree(r, av1_ext_tx_tree,
+ cm->fc->inter_ext_tx_prob[mbmi->tx_size]);
if (counts) ++counts->inter_ext_tx[mbmi->tx_size][mbmi->tx_type];
} else {
const TX_TYPE tx_type_nom = intra_mode_to_tx_type_context[mbmi->mode];
- mbmi->tx_type = vp10_read_tree(
- r, vp10_ext_tx_tree,
+ mbmi->tx_type = aom_read_tree(
+ r, av1_ext_tx_tree,
cm->fc->intra_ext_tx_prob[mbmi->tx_size][tx_type_nom]);
if (counts)
++counts->intra_ext_tx[mbmi->tx_size][tx_type_nom][mbmi->tx_type];
@@ -1739,13 +1737,13 @@
}
}
-void vp10_read_mode_info(VP10Decoder *const pbi, MACROBLOCKD *xd,
+void av1_read_mode_info(AV1Decoder *const pbi, MACROBLOCKD *xd,
#if CONFIG_SUPERTX
- int supertx_enabled,
+ int supertx_enabled,
#endif // CONFIG_SUPERTX
- int mi_row, int mi_col, vp10_reader *r, int x_mis,
- int y_mis) {
- VP10_COMMON *const cm = &pbi->common;
+ int mi_row, int mi_col, aom_reader *r, int x_mis,
+ int y_mis) {
+ AV1_COMMON *const cm = &pbi->common;
MODE_INFO *const mi = xd->mi[0];
MV_REF *frame_mvs = cm->cur_frame->mvs + mi_row * cm->mi_cols + mi_col;
int w, h;
diff --git a/av1/decoder/decodemv.h b/av1/decoder/decodemv.h
index 59fdd70..cf3d917 100644
--- a/av1/decoder/decodemv.h
+++ b/av1/decoder/decodemv.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_DECODER_DECODEMV_H_
-#define VP10_DECODER_DECODEMV_H_
+#ifndef AV1_DECODER_DECODEMV_H_
+#define AV1_DECODER_DECODEMV_H_
#include "av1/decoder/bitreader.h"
@@ -19,16 +19,16 @@
extern "C" {
#endif
-void vp10_read_mode_info(VP10Decoder *const pbi, MACROBLOCKD *xd,
+void av1_read_mode_info(AV1Decoder *const pbi, MACROBLOCKD *xd,
#if CONFIG_SUPERTX
- int supertx_enabled,
+ int supertx_enabled,
#endif
- int mi_row, int mi_col, vp10_reader *r, int x_mis,
- int y_mis);
+ int mi_row, int mi_col, aom_reader *r, int x_mis,
+ int y_mis);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_DECODER_DECODEMV_H_
+#endif // AV1_DECODER_DECODEMV_H_
diff --git a/av1/decoder/decoder.c b/av1/decoder/decoder.c
index 4cea36b..58952c0 100644
--- a/av1/decoder/decoder.c
+++ b/av1/decoder/decoder.c
@@ -12,16 +12,16 @@
#include <limits.h>
#include <stdio.h>
-#include "./vp10_rtcd.h"
-#include "./vpx_dsp_rtcd.h"
-#include "./vpx_scale_rtcd.h"
+#include "./av1_rtcd.h"
+#include "./aom_dsp_rtcd.h"
+#include "./aom_scale_rtcd.h"
-#include "aom_mem/vpx_mem.h"
+#include "aom_mem/aom_mem.h"
#include "aom_ports/system_state.h"
-#include "aom_ports/vpx_once.h"
-#include "aom_ports/vpx_timer.h"
-#include "aom_scale/vpx_scale.h"
-#include "aom_util/vpx_thread.h"
+#include "aom_ports/aom_once.h"
+#include "aom_ports/aom_timer.h"
+#include "aom_scale/aom_scale.h"
+#include "aom_util/aom_thread.h"
#include "av1/common/alloccommon.h"
#include "av1/common/loopfilter.h"
@@ -38,60 +38,60 @@
static volatile int init_done = 0;
if (!init_done) {
- vp10_rtcd();
- vpx_dsp_rtcd();
- vpx_scale_rtcd();
- vp10_init_intra_predictors();
+ av1_rtcd();
+ aom_dsp_rtcd();
+ aom_scale_rtcd();
+ av1_init_intra_predictors();
#if CONFIG_EXT_INTER
- vp10_init_wedge_masks();
+ av1_init_wedge_masks();
#endif // CONFIG_EXT_INTER
init_done = 1;
}
}
-static void vp10_dec_setup_mi(VP10_COMMON *cm) {
+static void av1_dec_setup_mi(AV1_COMMON *cm) {
cm->mi = cm->mip + cm->mi_stride + 1;
cm->mi_grid_visible = cm->mi_grid_base + cm->mi_stride + 1;
memset(cm->mi_grid_base, 0,
cm->mi_stride * (cm->mi_rows + 1) * sizeof(*cm->mi_grid_base));
}
-static int vp10_dec_alloc_mi(VP10_COMMON *cm, int mi_size) {
- cm->mip = vpx_calloc(mi_size, sizeof(*cm->mip));
+static int av1_dec_alloc_mi(AV1_COMMON *cm, int mi_size) {
+ cm->mip = aom_calloc(mi_size, sizeof(*cm->mip));
if (!cm->mip) return 1;
cm->mi_alloc_size = mi_size;
- cm->mi_grid_base = (MODE_INFO **)vpx_calloc(mi_size, sizeof(MODE_INFO *));
+ cm->mi_grid_base = (MODE_INFO **)aom_calloc(mi_size, sizeof(MODE_INFO *));
if (!cm->mi_grid_base) return 1;
return 0;
}
-static void vp10_dec_free_mi(VP10_COMMON *cm) {
- vpx_free(cm->mip);
+static void av1_dec_free_mi(AV1_COMMON *cm) {
+ aom_free(cm->mip);
cm->mip = NULL;
- vpx_free(cm->mi_grid_base);
+ aom_free(cm->mi_grid_base);
cm->mi_grid_base = NULL;
}
-VP10Decoder *vp10_decoder_create(BufferPool *const pool) {
- VP10Decoder *volatile const pbi = vpx_memalign(32, sizeof(*pbi));
- VP10_COMMON *volatile const cm = pbi ? &pbi->common : NULL;
+AV1Decoder *av1_decoder_create(BufferPool *const pool) {
+ AV1Decoder *volatile const pbi = aom_memalign(32, sizeof(*pbi));
+ AV1_COMMON *volatile const cm = pbi ? &pbi->common : NULL;
if (!cm) return NULL;
- vp10_zero(*pbi);
+ av1_zero(*pbi);
if (setjmp(cm->error.jmp)) {
cm->error.setjmp = 0;
- vp10_decoder_remove(pbi);
+ av1_decoder_remove(pbi);
return NULL;
}
cm->error.setjmp = 1;
- CHECK_MEM_ERROR(cm, cm->fc, (FRAME_CONTEXT *)vpx_calloc(1, sizeof(*cm->fc)));
+ CHECK_MEM_ERROR(cm, cm->fc, (FRAME_CONTEXT *)aom_calloc(1, sizeof(*cm->fc)));
CHECK_MEM_ERROR(
cm, cm->frame_contexts,
- (FRAME_CONTEXT *)vpx_calloc(FRAME_CONTEXTS, sizeof(*cm->frame_contexts)));
+ (FRAME_CONTEXT *)aom_calloc(FRAME_CONTEXTS, sizeof(*cm->frame_contexts)));
pbi->need_resync = 1;
once(initialize_dec);
@@ -104,50 +104,50 @@
pbi->ready_for_new_data = 1;
pbi->common.buffer_pool = pool;
- cm->bit_depth = VPX_BITS_8;
- cm->dequant_bit_depth = VPX_BITS_8;
+ cm->bit_depth = AOM_BITS_8;
+ cm->dequant_bit_depth = AOM_BITS_8;
- cm->alloc_mi = vp10_dec_alloc_mi;
- cm->free_mi = vp10_dec_free_mi;
- cm->setup_mi = vp10_dec_setup_mi;
+ cm->alloc_mi = av1_dec_alloc_mi;
+ cm->free_mi = av1_dec_free_mi;
+ cm->setup_mi = av1_dec_setup_mi;
- vp10_loop_filter_init(cm);
+ av1_loop_filter_init(cm);
#if CONFIG_AOM_QM
aom_qm_init(cm);
#endif
#if CONFIG_LOOP_RESTORATION
- vp10_loop_restoration_precal();
+ av1_loop_restoration_precal();
#endif // CONFIG_LOOP_RESTORATION
cm->error.setjmp = 0;
- vpx_get_worker_interface()->init(&pbi->lf_worker);
+ aom_get_worker_interface()->init(&pbi->lf_worker);
return pbi;
}
-void vp10_decoder_remove(VP10Decoder *pbi) {
+void av1_decoder_remove(AV1Decoder *pbi) {
int i;
if (!pbi) return;
- vpx_get_worker_interface()->end(&pbi->lf_worker);
- vpx_free(pbi->lf_worker.data1);
- vpx_free(pbi->tile_data);
+ aom_get_worker_interface()->end(&pbi->lf_worker);
+ aom_free(pbi->lf_worker.data1);
+ aom_free(pbi->tile_data);
for (i = 0; i < pbi->num_tile_workers; ++i) {
- VPxWorker *const worker = &pbi->tile_workers[i];
- vpx_get_worker_interface()->end(worker);
+ AVxWorker *const worker = &pbi->tile_workers[i];
+ aom_get_worker_interface()->end(worker);
}
- vpx_free(pbi->tile_worker_data);
- vpx_free(pbi->tile_worker_info);
- vpx_free(pbi->tile_workers);
+ aom_free(pbi->tile_worker_data);
+ aom_free(pbi->tile_worker_info);
+ aom_free(pbi->tile_workers);
if (pbi->num_tile_workers > 0) {
- vp10_loop_filter_dealloc(&pbi->lf_row_sync);
+ av1_loop_filter_dealloc(&pbi->lf_row_sync);
}
- vpx_free(pbi);
+ aom_free(pbi);
}
static int equal_dimensions(const YV12_BUFFER_CONFIG *a,
@@ -156,45 +156,45 @@
a->uv_height == b->uv_height && a->uv_width == b->uv_width;
}
-vpx_codec_err_t vp10_copy_reference_dec(VP10Decoder *pbi,
- VPX_REFFRAME ref_frame_flag,
- YV12_BUFFER_CONFIG *sd) {
- VP10_COMMON *cm = &pbi->common;
+aom_codec_err_t av1_copy_reference_dec(AV1Decoder *pbi,
+ AOM_REFFRAME ref_frame_flag,
+ YV12_BUFFER_CONFIG *sd) {
+ AV1_COMMON *cm = &pbi->common;
/* TODO(jkoleszar): The decoder doesn't have any real knowledge of what the
* encoder is using the frame buffers for. This is just a stub to keep the
- * vpxenc --test-decode functionality working, and will be replaced in a
- * later commit that adds VP9-specific controls for this functionality.
+ * aomenc --test-decode functionality working, and will be replaced in a
+ * later commit that adds AV1-specific controls for this functionality.
*/
- if (ref_frame_flag == VPX_LAST_FLAG) {
+ if (ref_frame_flag == AOM_LAST_FLAG) {
const YV12_BUFFER_CONFIG *const cfg = get_ref_frame(cm, 0);
if (cfg == NULL) {
- vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
+ aom_internal_error(&cm->error, AOM_CODEC_ERROR,
"No 'last' reference frame");
- return VPX_CODEC_ERROR;
+ return AOM_CODEC_ERROR;
}
if (!equal_dimensions(cfg, sd))
- vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
+ aom_internal_error(&cm->error, AOM_CODEC_ERROR,
"Incorrect buffer dimensions");
else
- vpx_yv12_copy_frame(cfg, sd);
+ aom_yv12_copy_frame(cfg, sd);
} else {
- vpx_internal_error(&cm->error, VPX_CODEC_ERROR, "Invalid reference frame");
+ aom_internal_error(&cm->error, AOM_CODEC_ERROR, "Invalid reference frame");
}
return cm->error.error_code;
}
-vpx_codec_err_t vp10_set_reference_dec(VP10_COMMON *cm,
- VPX_REFFRAME ref_frame_flag,
- YV12_BUFFER_CONFIG *sd) {
+aom_codec_err_t av1_set_reference_dec(AV1_COMMON *cm,
+ AOM_REFFRAME ref_frame_flag,
+ YV12_BUFFER_CONFIG *sd) {
int idx;
YV12_BUFFER_CONFIG *ref_buf = NULL;
// TODO(jkoleszar): The decoder doesn't have any real knowledge of what the
// encoder is using the frame buffers for. This is just a stub to keep the
- // vpxenc --test-decode functionality working, and will be replaced in a
- // later commit that adds VP9-specific controls for this functionality.
+ // aomenc --test-decode functionality working, and will be replaced in a
+ // later commit that adds AV1-specific controls for this functionality.
// (Yunqing) The set_reference control depends on the following setting in
// encoder.
@@ -212,32 +212,32 @@
// TODO(zoeliu): To revisit following code and reconsider what assumption we
// may take on the reference frame buffer virtual indexes
- if (ref_frame_flag == VPX_LAST_FLAG) {
+ if (ref_frame_flag == AOM_LAST_FLAG) {
idx = cm->ref_frame_map[0];
#if CONFIG_EXT_REFS
- } else if (ref_frame_flag == VPX_LAST2_FLAG) {
+ } else if (ref_frame_flag == AOM_LAST2_FLAG) {
idx = cm->ref_frame_map[1];
- } else if (ref_frame_flag == VPX_LAST3_FLAG) {
+ } else if (ref_frame_flag == AOM_LAST3_FLAG) {
idx = cm->ref_frame_map[2];
- } else if (ref_frame_flag == VPX_GOLD_FLAG) {
+ } else if (ref_frame_flag == AOM_GOLD_FLAG) {
idx = cm->ref_frame_map[3];
- } else if (ref_frame_flag == VPX_BWD_FLAG) {
+ } else if (ref_frame_flag == AOM_BWD_FLAG) {
idx = cm->ref_frame_map[4];
- } else if (ref_frame_flag == VPX_ALT_FLAG) {
+ } else if (ref_frame_flag == AOM_ALT_FLAG) {
idx = cm->ref_frame_map[5];
#else
- } else if (ref_frame_flag == VPX_GOLD_FLAG) {
+ } else if (ref_frame_flag == AOM_GOLD_FLAG) {
idx = cm->ref_frame_map[1];
- } else if (ref_frame_flag == VPX_ALT_FLAG) {
+ } else if (ref_frame_flag == AOM_ALT_FLAG) {
idx = cm->ref_frame_map[2];
#endif // CONFIG_EXT_REFS
} else {
- vpx_internal_error(&cm->error, VPX_CODEC_ERROR, "Invalid reference frame");
+ aom_internal_error(&cm->error, AOM_CODEC_ERROR, "Invalid reference frame");
return cm->error.error_code;
}
if (idx < 0 || idx >= FRAME_BUFFERS) {
- vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
+ aom_internal_error(&cm->error, AOM_CODEC_ERROR,
"Invalid reference frame map");
return cm->error.error_code;
}
@@ -246,20 +246,20 @@
ref_buf = &cm->buffer_pool->frame_bufs[idx].buf;
if (!equal_dimensions(ref_buf, sd)) {
- vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
+ aom_internal_error(&cm->error, AOM_CODEC_ERROR,
"Incorrect buffer dimensions");
} else {
// Overwrite the reference frame buffer.
- vpx_yv12_copy_frame(sd, ref_buf);
+ aom_yv12_copy_frame(sd, ref_buf);
}
return cm->error.error_code;
}
/* If any buffer updating is signaled it should be done here. */
-static void swap_frame_buffers(VP10Decoder *pbi) {
+static void swap_frame_buffers(AV1Decoder *pbi) {
int ref_index = 0, mask;
- VP10_COMMON *const cm = &pbi->common;
+ AV1_COMMON *const cm = &pbi->common;
BufferPool *const pool = cm->buffer_pool;
RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
@@ -302,14 +302,14 @@
}
}
-int vp10_receive_compressed_data(VP10Decoder *pbi, size_t size,
- const uint8_t **psource) {
- VP10_COMMON *volatile const cm = &pbi->common;
+int av1_receive_compressed_data(AV1Decoder *pbi, size_t size,
+ const uint8_t **psource) {
+ AV1_COMMON *volatile const cm = &pbi->common;
BufferPool *volatile const pool = cm->buffer_pool;
RefCntBuffer *volatile const frame_bufs = cm->buffer_pool->frame_bufs;
const uint8_t *source = *psource;
int retcode = 0;
- cm->error.error_code = VPX_CODEC_OK;
+ cm->error.error_code = AOM_CODEC_OK;
if (size == 0) {
// This is used to signal that we are missing frames.
@@ -340,27 +340,27 @@
// Find a free frame buffer. Return error if can not find any.
cm->new_fb_idx = get_free_fb(cm);
- if (cm->new_fb_idx == INVALID_IDX) return VPX_CODEC_MEM_ERROR;
+ if (cm->new_fb_idx == INVALID_IDX) return AOM_CODEC_MEM_ERROR;
// Assign a MV array to the frame buffer.
cm->cur_frame = &pool->frame_bufs[cm->new_fb_idx];
pbi->hold_ref_buf = 0;
if (cm->frame_parallel_decode) {
- VPxWorker *const worker = pbi->frame_worker_owner;
- vp10_frameworker_lock_stats(worker);
+ AVxWorker *const worker = pbi->frame_worker_owner;
+ av1_frameworker_lock_stats(worker);
frame_bufs[cm->new_fb_idx].frame_worker_owner = worker;
// Reset decoding progress.
pbi->cur_buf = &frame_bufs[cm->new_fb_idx];
pbi->cur_buf->row = -1;
pbi->cur_buf->col = -1;
- vp10_frameworker_unlock_stats(worker);
+ av1_frameworker_unlock_stats(worker);
} else {
pbi->cur_buf = &frame_bufs[cm->new_fb_idx];
}
if (setjmp(cm->error.jmp)) {
- const VPxWorkerInterface *const winterface = vpx_get_worker_interface();
+ const AVxWorkerInterface *const winterface = aom_get_worker_interface();
int i;
cm->error.setjmp = 0;
@@ -399,12 +399,12 @@
decrease_ref_count(cm->new_fb_idx, frame_bufs, pool);
unlock_buffer_pool(pool);
- vpx_clear_system_state();
+ aom_clear_system_state();
return -1;
}
cm->error.setjmp = 1;
- vp10_decode_frame(pbi, source, source + size, psource);
+ av1_decode_frame(pbi, source, source + size, psource);
swap_frame_buffers(pbi);
@@ -414,9 +414,9 @@
// border.
if (pbi->dec_tile_row == -1 && pbi->dec_tile_col == -1)
#endif // CONFIG_EXT_TILE
- vpx_extend_frame_inner_borders(cm->frame_to_show);
+ aom_extend_frame_inner_borders(cm->frame_to_show);
- vpx_clear_system_state();
+ aom_clear_system_state();
if (!cm->show_existing_frame) {
cm->last_show_frame = cm->show_frame;
@@ -428,24 +428,24 @@
cm->prev_frame = cm->cur_frame;
if (cm->seg.enabled && !cm->frame_parallel_decode)
- vp10_swap_current_and_last_seg_map(cm);
+ av1_swap_current_and_last_seg_map(cm);
}
// Update progress in frame parallel decode.
if (cm->frame_parallel_decode) {
// Need to lock the mutex here as another thread may
// be accessing this buffer.
- VPxWorker *const worker = pbi->frame_worker_owner;
+ AVxWorker *const worker = pbi->frame_worker_owner;
FrameWorkerData *const frame_worker_data = worker->data1;
- vp10_frameworker_lock_stats(worker);
+ av1_frameworker_lock_stats(worker);
if (cm->show_frame) {
cm->current_video_frame++;
}
frame_worker_data->frame_decoded = 1;
frame_worker_data->frame_context_ready = 1;
- vp10_frameworker_signal_stats(worker);
- vp10_frameworker_unlock_stats(worker);
+ av1_frameworker_signal_stats(worker);
+ av1_frameworker_unlock_stats(worker);
} else {
cm->last_width = cm->width;
cm->last_height = cm->height;
@@ -458,8 +458,8 @@
return retcode;
}
-int vp10_get_raw_frame(VP10Decoder *pbi, YV12_BUFFER_CONFIG *sd) {
- VP10_COMMON *const cm = &pbi->common;
+int av1_get_raw_frame(AV1Decoder *pbi, YV12_BUFFER_CONFIG *sd) {
+ AV1_COMMON *const cm = &pbi->common;
int ret = -1;
if (pbi->ready_for_new_data == 1) return ret;
@@ -471,12 +471,12 @@
pbi->ready_for_new_data = 1;
*sd = *cm->frame_to_show;
ret = 0;
- vpx_clear_system_state();
+ aom_clear_system_state();
return ret;
}
-int vp10_get_frame_to_show(VP10Decoder *pbi, YV12_BUFFER_CONFIG *frame) {
- VP10_COMMON *const cm = &pbi->common;
+int av1_get_frame_to_show(AV1Decoder *pbi, YV12_BUFFER_CONFIG *frame) {
+ AV1_COMMON *const cm = &pbi->common;
if (!cm->show_frame || !cm->frame_to_show) return -1;
@@ -484,10 +484,10 @@
return 0;
}
-vpx_codec_err_t vp10_parse_superframe_index(const uint8_t *data, size_t data_sz,
- uint32_t sizes[8], int *count,
- vpx_decrypt_cb decrypt_cb,
- void *decrypt_state) {
+aom_codec_err_t av1_parse_superframe_index(const uint8_t *data, size_t data_sz,
+ uint32_t sizes[8], int *count,
+ aom_decrypt_cb decrypt_cb,
+ void *decrypt_state) {
// A chunk ending with a byte matching 0xc0 is an invalid chunk unless
// it is a super frame index. If the last byte of real video compression
// data is 0xc0 the encoder must add a 0 byte. If we have the marker but
@@ -508,7 +508,7 @@
// This chunk is marked as having a superframe index but doesn't have
// enough data for it, thus it's an invalid superframe index.
- if (data_sz < index_sz) return VPX_CODEC_CORRUPT_FRAME;
+ if (data_sz < index_sz) return AOM_CODEC_CORRUPT_FRAME;
{
const uint8_t marker2 =
@@ -517,7 +517,7 @@
// This chunk is marked as having a superframe index but doesn't have
// the matching marker byte at the front of the index therefore it's an
// invalid chunk.
- if (marker != marker2) return VPX_CODEC_CORRUPT_FRAME;
+ if (marker != marker2) return AOM_CODEC_CORRUPT_FRAME;
}
{
@@ -545,5 +545,5 @@
*count = frames;
}
}
- return VPX_CODEC_OK;
+ return AOM_CODEC_OK;
}
diff --git a/av1/decoder/decoder.h b/av1/decoder/decoder.h
index 47a5a7b..b399768 100644
--- a/av1/decoder/decoder.h
+++ b/av1/decoder/decoder.h
@@ -8,15 +8,15 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_DECODER_DECODER_H_
-#define VP10_DECODER_DECODER_H_
+#ifndef AV1_DECODER_DECODER_H_
+#define AV1_DECODER_DECODER_H_
-#include "./vpx_config.h"
+#include "./aom_config.h"
-#include "aom/vpx_codec.h"
+#include "aom/aom_codec.h"
#include "av1/decoder/bitreader.h"
#include "aom_scale/yv12config.h"
-#include "aom_util/vpx_thread.h"
+#include "aom_util/aom_thread.h"
#include "av1/common/thread_common.h"
#include "av1/common/onyxc_int.h"
@@ -28,8 +28,8 @@
// TODO(hkuang): combine this with TileWorkerData.
typedef struct TileData {
- VP10_COMMON *cm;
- vp10_reader bit_reader;
+ AV1_COMMON *cm;
+ aom_reader bit_reader;
DECLARE_ALIGNED(16, MACROBLOCKD, xd);
/* dqcoeff are shared by all the planes. So planes must be decoded serially */
DECLARE_ALIGNED(16, tran_low_t, dqcoeff[MAX_TX_SQUARE]);
@@ -37,14 +37,14 @@
} TileData;
typedef struct TileWorkerData {
- struct VP10Decoder *pbi;
- vp10_reader bit_reader;
+ struct AV1Decoder *pbi;
+ aom_reader bit_reader;
FRAME_COUNTS counts;
DECLARE_ALIGNED(16, MACROBLOCKD, xd);
/* dqcoeff are shared by all the planes. So planes must be decoded serially */
DECLARE_ALIGNED(16, tran_low_t, dqcoeff[MAX_TX_SQUARE]);
DECLARE_ALIGNED(16, uint8_t, color_index_map[2][MAX_SB_SQUARE]);
- struct vpx_internal_error_info error_info;
+ struct aom_internal_error_info error_info;
} TileWorkerData;
typedef struct TileBufferDec {
@@ -55,10 +55,10 @@
int col; // only used with multi-threaded decoding
} TileBufferDec;
-typedef struct VP10Decoder {
+typedef struct AV1Decoder {
DECLARE_ALIGNED(16, MACROBLOCKD, mb);
- DECLARE_ALIGNED(16, VP10_COMMON, common);
+ DECLARE_ALIGNED(16, AV1_COMMON, common);
int ready_for_new_data;
@@ -68,9 +68,9 @@
// the same.
RefCntBuffer *cur_buf; // Current decoding frame buffer.
- VPxWorker *frame_worker_owner; // frame_worker that owns this pbi.
- VPxWorker lf_worker;
- VPxWorker *tile_workers;
+ AVxWorker *frame_worker_owner; // frame_worker that owns this pbi.
+ AVxWorker lf_worker;
+ AVxWorker *tile_workers;
TileWorkerData *tile_worker_data;
TileInfo *tile_worker_info;
int num_tile_workers;
@@ -80,9 +80,9 @@
TileBufferDec tile_buffers[MAX_TILE_ROWS][MAX_TILE_COLS];
- VP10LfSync lf_row_sync;
+ AV1LfSync lf_row_sync;
- vpx_decrypt_cb decrypt_cb;
+ aom_decrypt_cb decrypt_cb;
void *decrypt_state;
int max_threads;
@@ -95,24 +95,24 @@
int tile_col_size_bytes;
int dec_tile_row, dec_tile_col;
#endif // CONFIG_EXT_TILE
-} VP10Decoder;
+} AV1Decoder;
-int vp10_receive_compressed_data(struct VP10Decoder *pbi, size_t size,
- const uint8_t **dest);
+int av1_receive_compressed_data(struct AV1Decoder *pbi, size_t size,
+ const uint8_t **dest);
-int vp10_get_raw_frame(struct VP10Decoder *pbi, YV12_BUFFER_CONFIG *sd);
+int av1_get_raw_frame(struct AV1Decoder *pbi, YV12_BUFFER_CONFIG *sd);
-int vp10_get_frame_to_show(struct VP10Decoder *pbi, YV12_BUFFER_CONFIG *frame);
+int av1_get_frame_to_show(struct AV1Decoder *pbi, YV12_BUFFER_CONFIG *frame);
-vpx_codec_err_t vp10_copy_reference_dec(struct VP10Decoder *pbi,
- VPX_REFFRAME ref_frame_flag,
- YV12_BUFFER_CONFIG *sd);
-
-vpx_codec_err_t vp10_set_reference_dec(VP10_COMMON *cm,
- VPX_REFFRAME ref_frame_flag,
+aom_codec_err_t av1_copy_reference_dec(struct AV1Decoder *pbi,
+ AOM_REFFRAME ref_frame_flag,
YV12_BUFFER_CONFIG *sd);
-static INLINE uint8_t read_marker(vpx_decrypt_cb decrypt_cb,
+aom_codec_err_t av1_set_reference_dec(AV1_COMMON *cm,
+ AOM_REFFRAME ref_frame_flag,
+ YV12_BUFFER_CONFIG *sd);
+
+static INLINE uint8_t read_marker(aom_decrypt_cb decrypt_cb,
void *decrypt_state, const uint8_t *data) {
if (decrypt_cb) {
uint8_t marker;
@@ -124,14 +124,14 @@
// This function is exposed for use in tests, as well as the inlined function
// "read_marker".
-vpx_codec_err_t vp10_parse_superframe_index(const uint8_t *data, size_t data_sz,
- uint32_t sizes[8], int *count,
- vpx_decrypt_cb decrypt_cb,
- void *decrypt_state);
+aom_codec_err_t av1_parse_superframe_index(const uint8_t *data, size_t data_sz,
+ uint32_t sizes[8], int *count,
+ aom_decrypt_cb decrypt_cb,
+ void *decrypt_state);
-struct VP10Decoder *vp10_decoder_create(BufferPool *const pool);
+struct AV1Decoder *av1_decoder_create(BufferPool *const pool);
-void vp10_decoder_remove(struct VP10Decoder *pbi);
+void av1_decoder_remove(struct AV1Decoder *pbi);
static INLINE void decrease_ref_count(int idx, RefCntBuffer *const frame_bufs,
BufferPool *const pool) {
@@ -149,9 +149,9 @@
}
#if CONFIG_EXT_REFS
-static INLINE int dec_is_ref_frame_buf(VP10Decoder *const pbi,
+static INLINE int dec_is_ref_frame_buf(AV1Decoder *const pbi,
RefCntBuffer *frame_buf) {
- VP10_COMMON *const cm = &pbi->common;
+ AV1_COMMON *const cm = &pbi->common;
int i;
for (i = 0; i < INTER_REFS_PER_FRAME; ++i) {
RefBuffer *const ref_frame = &cm->frame_refs[i];
@@ -166,4 +166,4 @@
} // extern "C"
#endif
-#endif // VP10_DECODER_DECODER_H_
+#endif // AV1_DECODER_DECODER_H_
diff --git a/av1/decoder/detokenize.c b/av1/decoder/detokenize.c
index 0fba999..0935cdf 100644
--- a/av1/decoder/detokenize.c
+++ b/av1/decoder/detokenize.c
@@ -8,7 +8,7 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "aom_mem/vpx_mem.h"
+#include "aom_mem/aom_mem.h"
#include "aom_ports/mem.h"
#include "av1/common/ans.h"
@@ -37,9 +37,9 @@
} while (0)
#if !CONFIG_ANS
-static INLINE int read_coeff(const vpx_prob *probs, int n, vp10_reader *r) {
+static INLINE int read_coeff(const aom_prob *probs, int n, aom_reader *r) {
int i, val = 0;
- for (i = 0; i < n; ++i) val = (val << 1) | vp10_read(r, probs[i]);
+ for (i = 0; i < n; ++i) val = (val << 1) | aom_read(r, probs[i]);
return val;
}
@@ -47,7 +47,7 @@
static int decode_coefs(const MACROBLOCKD *xd, PLANE_TYPE type,
tran_low_t *dqcoeff, TX_SIZE tx_size, TX_TYPE tx_type,
const int16_t *dq, int ctx, const int16_t *scan,
- const int16_t *nb, vp10_reader *r,
+ const int16_t *nb, aom_reader *r,
const qm_val_t *iqm[2][TX_SIZES])
#else
static int decode_coefs(const MACROBLOCKD *xd, PLANE_TYPE type,
@@ -57,7 +57,7 @@
dequant_val_type_nuq *dq_val,
#endif // CONFIG_NEW_QUANT
int ctx, const int16_t *scan, const int16_t *nb,
- vp10_reader *r)
+ aom_reader *r)
#endif
{
FRAME_COUNTS *counts = xd->counts;
@@ -69,9 +69,9 @@
#endif
int band, c = 0;
const int tx_size_ctx = txsize_sqr_map[tx_size];
- const vpx_prob(*coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] =
+ const aom_prob(*coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] =
fc->coef_probs[tx_size_ctx][type][ref];
- const vpx_prob *prob;
+ const aom_prob *prob;
unsigned int(*coef_counts)[COEFF_CONTEXTS][UNCONSTRAINED_NODES + 1];
unsigned int(*eob_branch_count)[COEFF_CONTEXTS];
uint8_t token_cache[MAX_TX_SQUARE];
@@ -94,38 +94,38 @@
eob_branch_count = counts->eob_branch[tx_size_ctx][type][ref];
}
-#if CONFIG_VP9_HIGHBITDEPTH
- if (xd->bd > VPX_BITS_8) {
- if (xd->bd == VPX_BITS_10) {
- cat1_prob = vp10_cat1_prob_high10;
- cat2_prob = vp10_cat2_prob_high10;
- cat3_prob = vp10_cat3_prob_high10;
- cat4_prob = vp10_cat4_prob_high10;
- cat5_prob = vp10_cat5_prob_high10;
- cat6_prob = vp10_cat6_prob_high10;
+#if CONFIG_AOM_HIGHBITDEPTH
+ if (xd->bd > AOM_BITS_8) {
+ if (xd->bd == AOM_BITS_10) {
+ cat1_prob = av1_cat1_prob_high10;
+ cat2_prob = av1_cat2_prob_high10;
+ cat3_prob = av1_cat3_prob_high10;
+ cat4_prob = av1_cat4_prob_high10;
+ cat5_prob = av1_cat5_prob_high10;
+ cat6_prob = av1_cat6_prob_high10;
} else {
- cat1_prob = vp10_cat1_prob_high12;
- cat2_prob = vp10_cat2_prob_high12;
- cat3_prob = vp10_cat3_prob_high12;
- cat4_prob = vp10_cat4_prob_high12;
- cat5_prob = vp10_cat5_prob_high12;
- cat6_prob = vp10_cat6_prob_high12;
+ cat1_prob = av1_cat1_prob_high12;
+ cat2_prob = av1_cat2_prob_high12;
+ cat3_prob = av1_cat3_prob_high12;
+ cat4_prob = av1_cat4_prob_high12;
+ cat5_prob = av1_cat5_prob_high12;
+ cat6_prob = av1_cat6_prob_high12;
}
} else {
- cat1_prob = vp10_cat1_prob;
- cat2_prob = vp10_cat2_prob;
- cat3_prob = vp10_cat3_prob;
- cat4_prob = vp10_cat4_prob;
- cat5_prob = vp10_cat5_prob;
- cat6_prob = vp10_cat6_prob;
+ cat1_prob = av1_cat1_prob;
+ cat2_prob = av1_cat2_prob;
+ cat3_prob = av1_cat3_prob;
+ cat4_prob = av1_cat4_prob;
+ cat5_prob = av1_cat5_prob;
+ cat6_prob = av1_cat6_prob;
}
#else
- cat1_prob = vp10_cat1_prob;
- cat2_prob = vp10_cat2_prob;
- cat3_prob = vp10_cat3_prob;
- cat4_prob = vp10_cat4_prob;
- cat5_prob = vp10_cat5_prob;
- cat6_prob = vp10_cat6_prob;
+ cat1_prob = av1_cat1_prob;
+ cat2_prob = av1_cat2_prob;
+ cat3_prob = av1_cat3_prob;
+ cat4_prob = av1_cat4_prob;
+ cat5_prob = av1_cat5_prob;
+ cat6_prob = av1_cat6_prob;
#endif
dq_shift = get_tx_scale(xd, tx_type, tx_size);
@@ -135,7 +135,7 @@
band = *band_translate++;
prob = coef_probs[band][ctx];
if (counts) ++eob_branch_count[band][ctx];
- if (!vp10_read(r, prob[EOB_CONTEXT_NODE])) {
+ if (!aom_read(r, prob[EOB_CONTEXT_NODE])) {
INCREMENT_COUNT(EOB_MODEL_TOKEN);
break;
}
@@ -144,7 +144,7 @@
dqv_val = &dq_val[band][0];
#endif // CONFIG_NEW_QUANT
- while (!vp10_read(r, prob[ZERO_CONTEXT_NODE])) {
+ while (!aom_read(r, prob[ZERO_CONTEXT_NODE])) {
INCREMENT_COUNT(ZERO_TOKEN);
dqv = dq[1];
token_cache[scan[c]] = 0;
@@ -158,14 +158,14 @@
#endif // CONFIG_NEW_QUANT
}
- if (!vp10_read(r, prob[ONE_CONTEXT_NODE])) {
+ if (!aom_read(r, prob[ONE_CONTEXT_NODE])) {
INCREMENT_COUNT(ONE_TOKEN);
token = ONE_TOKEN;
val = 1;
} else {
INCREMENT_COUNT(TWO_TOKEN);
- token = vp10_read_tree(r, vp10_coef_con_tree,
- vp10_pareto8_full[prob[PIVOT_NODE] - 1]);
+ token = aom_read_tree(r, av1_coef_con_tree,
+ av1_pareto8_full[prob[PIVOT_NODE] - 1]);
switch (token) {
case TWO_TOKEN:
case THREE_TOKEN:
@@ -188,15 +188,15 @@
case CATEGORY6_TOKEN: {
const int skip_bits = TX_SIZES - 1 - txsize_sqr_up_map[tx_size];
const uint8_t *cat6p = cat6_prob + skip_bits;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
switch (xd->bd) {
- case VPX_BITS_8:
+ case AOM_BITS_8:
val = CAT6_MIN_VAL + read_coeff(cat6p, 14 - skip_bits, r);
break;
- case VPX_BITS_10:
+ case AOM_BITS_10:
val = CAT6_MIN_VAL + read_coeff(cat6p, 16 - skip_bits, r);
break;
- case VPX_BITS_12:
+ case AOM_BITS_12:
val = CAT6_MIN_VAL + read_coeff(cat6p, 18 - skip_bits, r);
break;
default: assert(0); return -1;
@@ -210,7 +210,7 @@
}
#if CONFIG_NEW_QUANT
- v = vp10_dequant_abscoeff_nuq(val, dqv, dqv_val);
+ v = av1_dequant_abscoeff_nuq(val, dqv, dqv_val);
v = dq_shift ? ROUND_POWER_OF_TWO(v, dq_shift) : v;
#else
#if CONFIG_AOM_QM
@@ -221,15 +221,15 @@
#endif // CONFIG_NEW_QUANT
#if CONFIG_COEFFICIENT_RANGE_CHECKING
-#if CONFIG_VP9_HIGHBITDEPTH
- dqcoeff[scan[c]] = highbd_check_range((vp10_read_bit(r) ? -v : v), xd->bd);
+#if CONFIG_AOM_HIGHBITDEPTH
+ dqcoeff[scan[c]] = highbd_check_range((aom_read_bit(r) ? -v : v), xd->bd);
#else
- dqcoeff[scan[c]] = check_range(vp10_read_bit(r) ? -v : v);
-#endif // CONFIG_VP9_HIGHBITDEPTH
+ dqcoeff[scan[c]] = check_range(aom_read_bit(r) ? -v : v);
+#endif // CONFIG_AOM_HIGHBITDEPTH
#else
- dqcoeff[scan[c]] = vp10_read_bit(r) ? -v : v;
+ dqcoeff[scan[c]] = aom_read_bit(r) ? -v : v;
#endif // CONFIG_COEFFICIENT_RANGE_CHECKING
- token_cache[scan[c]] = vp10_pt_energy_class[token];
+ token_cache[scan[c]] = av1_pt_energy_class[token];
++c;
ctx = get_coef_context(nb, token_cache, c);
dqv = dq[1];
@@ -238,7 +238,7 @@
return c;
}
#else // !CONFIG_ANS
-static INLINE int read_coeff(const vpx_prob *const probs, int n,
+static INLINE int read_coeff(const aom_prob *const probs, int n,
struct AnsDecoder *const ans) {
int i, val = 0;
for (i = 0; i < n; ++i) val = (val << 1) | uabs_read(ans, probs[i]);
@@ -260,11 +260,11 @@
int band, c = 0;
int skip_eob = 0;
const int tx_size_ctx = txsize_sqr_map[tx_size];
- const vpx_prob(*coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] =
+ const aom_prob(*coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] =
fc->coef_probs[tx_size_ctx][type][ref];
const rans_dec_lut(*coef_cdfs)[COEFF_CONTEXTS] =
fc->coef_cdfs[tx_size_ctx][type][ref];
- const vpx_prob *prob;
+ const aom_prob *prob;
const rans_dec_lut *cdf;
unsigned int(*coef_counts)[COEFF_CONTEXTS][UNCONSTRAINED_NODES + 1];
unsigned int(*eob_branch_count)[COEFF_CONTEXTS];
@@ -290,38 +290,38 @@
eob_branch_count = counts->eob_branch[tx_size_ctx][type][ref];
}
-#if CONFIG_VP9_HIGHBITDEPTH
- if (xd->bd > VPX_BITS_8) {
- if (xd->bd == VPX_BITS_10) {
- cat1_prob = vp10_cat1_prob_high10;
- cat2_prob = vp10_cat2_prob_high10;
- cat3_prob = vp10_cat3_prob_high10;
- cat4_prob = vp10_cat4_prob_high10;
- cat5_prob = vp10_cat5_prob_high10;
- cat6_prob = vp10_cat6_prob_high10;
+#if CONFIG_AOM_HIGHBITDEPTH
+ if (xd->bd > AOM_BITS_8) {
+ if (xd->bd == AOM_BITS_10) {
+ cat1_prob = av1_cat1_prob_high10;
+ cat2_prob = av1_cat2_prob_high10;
+ cat3_prob = av1_cat3_prob_high10;
+ cat4_prob = av1_cat4_prob_high10;
+ cat5_prob = av1_cat5_prob_high10;
+ cat6_prob = av1_cat6_prob_high10;
} else {
- cat1_prob = vp10_cat1_prob_high12;
- cat2_prob = vp10_cat2_prob_high12;
- cat3_prob = vp10_cat3_prob_high12;
- cat4_prob = vp10_cat4_prob_high12;
- cat5_prob = vp10_cat5_prob_high12;
- cat6_prob = vp10_cat6_prob_high12;
+ cat1_prob = av1_cat1_prob_high12;
+ cat2_prob = av1_cat2_prob_high12;
+ cat3_prob = av1_cat3_prob_high12;
+ cat4_prob = av1_cat4_prob_high12;
+ cat5_prob = av1_cat5_prob_high12;
+ cat6_prob = av1_cat6_prob_high12;
}
} else {
- cat1_prob = vp10_cat1_prob;
- cat2_prob = vp10_cat2_prob;
- cat3_prob = vp10_cat3_prob;
- cat4_prob = vp10_cat4_prob;
- cat5_prob = vp10_cat5_prob;
- cat6_prob = vp10_cat6_prob;
+ cat1_prob = av1_cat1_prob;
+ cat2_prob = av1_cat2_prob;
+ cat3_prob = av1_cat3_prob;
+ cat4_prob = av1_cat4_prob;
+ cat5_prob = av1_cat5_prob;
+ cat6_prob = av1_cat6_prob;
}
#else
- cat1_prob = vp10_cat1_prob;
- cat2_prob = vp10_cat2_prob;
- cat3_prob = vp10_cat3_prob;
- cat4_prob = vp10_cat4_prob;
- cat5_prob = vp10_cat5_prob;
- cat6_prob = vp10_cat6_prob;
+ cat1_prob = av1_cat1_prob;
+ cat2_prob = av1_cat2_prob;
+ cat3_prob = av1_cat3_prob;
+ cat4_prob = av1_cat4_prob;
+ cat5_prob = av1_cat5_prob;
+ cat6_prob = av1_cat6_prob;
#endif
while (c < max_eob) {
@@ -371,15 +371,15 @@
case CATEGORY6_TOKEN: {
const int skip_bits = TX_SIZES - 1 - txsize_sqr_up_map[tx_size];
const uint8_t *cat6p = cat6_prob + skip_bits;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
switch (xd->bd) {
- case VPX_BITS_8:
+ case AOM_BITS_8:
val = CAT6_MIN_VAL + read_coeff(cat6p, 14 - skip_bits, ans);
break;
- case VPX_BITS_10:
+ case AOM_BITS_10:
val = CAT6_MIN_VAL + read_coeff(cat6p, 16 - skip_bits, ans);
break;
- case VPX_BITS_12:
+ case AOM_BITS_12:
val = CAT6_MIN_VAL + read_coeff(cat6p, 18 - skip_bits, ans);
break;
default: assert(0); return -1;
@@ -390,23 +390,23 @@
} break;
}
#if CONFIG_NEW_QUANT
- v = vp10_dequant_abscoeff_nuq(val, dqv, dqv_val);
+ v = av1_dequant_abscoeff_nuq(val, dqv, dqv_val);
v = dq_shift ? ROUND_POWER_OF_TWO(v, dq_shift) : v;
#else
v = (val * dqv) >> dq_shift;
#endif // CONFIG_NEW_QUANT
#if CONFIG_COEFFICIENT_RANGE_CHECKING
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
dqcoeff[scan[c]] =
highbd_check_range((uabs_read_bit(ans) ? -v : v), xd->bd);
#else
dqcoeff[scan[c]] = check_range(uabs_read_bit(ans) ? -v : v);
-#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // CONFIG_AOM_HIGHBITDEPTH
#else
dqcoeff[scan[c]] = uabs_read_bit(ans) ? -v : v;
#endif // CONFIG_COEFFICIENT_RANGE_CHECKING
- token_cache[scan[c]] = vp10_pt_energy_class[token];
+ token_cache[scan[c]] = av1_pt_energy_class[token];
skip_eob = 0;
}
++c;
@@ -418,8 +418,8 @@
}
#endif // !CONFIG_ANS
-// TODO(slavarnway): Decode version of vp10_set_context. Modify
-// vp10_set_context
+// TODO(slavarnway): Decode version of av1_set_context. Modify
+// av1_set_context
// after testing is complete, then delete this version.
static void dec_set_contexts(const MACROBLOCKD *xd,
struct macroblockd_plane *pd, TX_SIZE tx_size,
@@ -459,8 +459,8 @@
}
}
-void vp10_decode_palette_tokens(MACROBLOCKD *const xd, int plane,
- vp10_reader *r) {
+void av1_decode_palette_tokens(MACROBLOCKD *const xd, int plane,
+ aom_reader *r) {
MODE_INFO *const mi = xd->mi[0];
MB_MODE_INFO *const mbmi = &mi->mbmi;
const BLOCK_SIZE bsize = mbmi->sb_type;
@@ -472,31 +472,31 @@
int n = mbmi->palette_mode_info.palette_size[plane != 0];
int i, j;
uint8_t *color_map = xd->plane[plane != 0].color_index_map;
- const vpx_prob (*const prob)[PALETTE_COLOR_CONTEXTS][PALETTE_COLORS - 1] =
- plane ? vp10_default_palette_uv_color_prob
- : vp10_default_palette_y_color_prob;
+ const aom_prob (*const prob)[PALETTE_COLOR_CONTEXTS][PALETTE_COLORS - 1] =
+ plane ? av1_default_palette_uv_color_prob
+ : av1_default_palette_y_color_prob;
for (i = 0; i < rows; ++i) {
for (j = (i == 0 ? 1 : 0); j < cols; ++j) {
color_ctx =
- vp10_get_palette_color_context(color_map, cols, i, j, n, color_order);
- color_idx = vp10_read_tree(r, vp10_palette_color_tree[n - 2],
- prob[n - 2][color_ctx]);
+ av1_get_palette_color_context(color_map, cols, i, j, n, color_order);
+ color_idx = aom_read_tree(r, av1_palette_color_tree[n - 2],
+ prob[n - 2][color_ctx]);
assert(color_idx >= 0 && color_idx < n);
color_map[i * cols + j] = color_order[color_idx];
}
}
}
-int vp10_decode_block_tokens(MACROBLOCKD *const xd, int plane,
- const scan_order *sc, int x, int y,
- TX_SIZE tx_size, TX_TYPE tx_type,
+int av1_decode_block_tokens(MACROBLOCKD *const xd, int plane,
+ const scan_order *sc, int x, int y, TX_SIZE tx_size,
+ TX_TYPE tx_type,
#if CONFIG_ANS
- struct AnsDecoder *const r,
+ struct AnsDecoder *const r,
#else
- vp10_reader *r,
+ aom_reader *r,
#endif // CONFIG_ANS
- int seg_id) {
+ int seg_id) {
struct macroblockd_plane *const pd = &xd->plane[plane];
const int16_t *const dequant = pd->seg_dequant[seg_id];
const int ctx =
@@ -528,7 +528,7 @@
#endif // !CONFIG_ANS
dec_set_contexts(xd, pd, tx_size, eob > 0, x, y);
/*
- vp10_set_contexts(xd, pd,
+ av1_set_contexts(xd, pd,
get_plane_block_size(xd->mi[0]->mbmi.sb_type, pd),
tx_size, eob > 0, x, y);
*/
diff --git a/av1/decoder/detokenize.h b/av1/decoder/detokenize.h
index 279c193..959e374 100644
--- a/av1/decoder/detokenize.h
+++ b/av1/decoder/detokenize.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_DECODER_DETOKENIZE_H_
-#define VP10_DECODER_DETOKENIZE_H_
+#ifndef AV1_DECODER_DETOKENIZE_H_
+#define AV1_DECODER_DETOKENIZE_H_
#include "av1/decoder/decoder.h"
#include "av1/common/ans.h"
@@ -19,20 +19,19 @@
extern "C" {
#endif
-void vp10_decode_palette_tokens(MACROBLOCKD *const xd, int plane,
- vp10_reader *r);
-int vp10_decode_block_tokens(MACROBLOCKD *const xd, int plane,
- const scan_order *sc, int x, int y,
- TX_SIZE tx_size, TX_TYPE tx_type,
+void av1_decode_palette_tokens(MACROBLOCKD *const xd, int plane, aom_reader *r);
+int av1_decode_block_tokens(MACROBLOCKD *const xd, int plane,
+ const scan_order *sc, int x, int y, TX_SIZE tx_size,
+ TX_TYPE tx_type,
#if CONFIG_ANS
- struct AnsDecoder *const r,
+ struct AnsDecoder *const r,
#else
- vp10_reader *r,
+ aom_reader *r,
#endif // CONFIG_ANS
- int seg_id);
+ int seg_id);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_DECODER_DETOKENIZE_H_
+#endif // AV1_DECODER_DETOKENIZE_H_
diff --git a/av1/decoder/dsubexp.c b/av1/decoder/dsubexp.c
index 146a1de..c0fee8d 100644
--- a/av1/decoder/dsubexp.c
+++ b/av1/decoder/dsubexp.c
@@ -20,11 +20,11 @@
return (v & 1) ? m - ((v + 1) >> 1) : m + (v >> 1);
}
-static int decode_uniform(vp10_reader *r) {
+static int decode_uniform(aom_reader *r) {
const int l = 8;
const int m = (1 << l) - 190;
- const int v = vp10_read_literal(r, l - 1);
- return v < m ? v : (v << 1) - m + vp10_read_bit(r);
+ const int v = aom_read_literal(r, l - 1);
+ return v < m ? v : (v << 1) - m + aom_read_bit(r);
}
static int inv_remap_prob(int v, int m) {
@@ -57,24 +57,24 @@
}
}
-static int decode_term_subexp(vp10_reader *r) {
- if (!vp10_read_bit(r)) return vp10_read_literal(r, 4);
- if (!vp10_read_bit(r)) return vp10_read_literal(r, 4) + 16;
- if (!vp10_read_bit(r)) return vp10_read_literal(r, 5) + 32;
+static int decode_term_subexp(aom_reader *r) {
+ if (!aom_read_bit(r)) return aom_read_literal(r, 4);
+ if (!aom_read_bit(r)) return aom_read_literal(r, 4) + 16;
+ if (!aom_read_bit(r)) return aom_read_literal(r, 5) + 32;
return decode_uniform(r) + 64;
}
-void vp10_diff_update_prob(vp10_reader *r, vpx_prob *p) {
- if (vp10_read(r, DIFF_UPDATE_PROB)) {
+void av1_diff_update_prob(aom_reader *r, aom_prob *p) {
+ if (aom_read(r, DIFF_UPDATE_PROB)) {
const int delp = decode_term_subexp(r);
- *p = (vpx_prob)inv_remap_prob(delp, *p);
+ *p = (aom_prob)inv_remap_prob(delp, *p);
}
}
-int vp10_read_primitive_symmetric(vp10_reader *r, unsigned int mag_bits) {
- if (vp10_read_bit(r)) {
- int s = vp10_read_bit(r);
- int x = vp10_read_literal(r, mag_bits) + 1;
+int aom_read_primitive_symmetric(aom_reader *r, unsigned int mag_bits) {
+ if (aom_read_bit(r)) {
+ int s = aom_read_bit(r);
+ int x = aom_read_literal(r, mag_bits) + 1;
return (s > 0 ? -x : x);
} else {
return 0;
diff --git a/av1/decoder/dsubexp.h b/av1/decoder/dsubexp.h
index b8980f7..8587395 100644
--- a/av1/decoder/dsubexp.h
+++ b/av1/decoder/dsubexp.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_DECODER_DSUBEXP_H_
-#define VP10_DECODER_DSUBEXP_H_
+#ifndef AV1_DECODER_DSUBEXP_H_
+#define AV1_DECODER_DSUBEXP_H_
#include "av1/decoder/bitreader.h"
@@ -17,7 +17,7 @@
extern "C" {
#endif
-void vp10_diff_update_prob(vp10_reader *r, vpx_prob *p);
+void av1_diff_update_prob(aom_reader *r, aom_prob *p);
#ifdef __cplusplus
} // extern "C"
@@ -27,5 +27,5 @@
// 2 * 2^mag_bits + 1, symmetric around 0, where one bit is used to
// indicate 0 or non-zero, mag_bits bits are used to indicate magnitide
// and 1 more bit for the sign if non-zero.
-int vp10_read_primitive_symmetric(vp10_reader *r, unsigned int mag_bits);
-#endif // VP10_DECODER_DSUBEXP_H_
+int aom_read_primitive_symmetric(aom_reader *r, unsigned int mag_bits);
+#endif // AV1_DECODER_DSUBEXP_H_
diff --git a/av1/decoder/dthread.c b/av1/decoder/dthread.c
index d9a2ce1..6f6a934 100644
--- a/av1/decoder/dthread.c
+++ b/av1/decoder/dthread.c
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "./vpx_config.h"
-#include "aom_mem/vpx_mem.h"
+#include "./aom_config.h"
+#include "aom_mem/aom_mem.h"
#include "av1/common/reconinter.h"
#include "av1/decoder/dthread.h"
#include "av1/decoder/decoder.h"
@@ -17,7 +17,7 @@
// #define DEBUG_THREAD
// TODO(hkuang): Clean up all the #ifdef in this file.
-void vp10_frameworker_lock_stats(VPxWorker *const worker) {
+void av1_frameworker_lock_stats(AVxWorker *const worker) {
#if CONFIG_MULTITHREAD
FrameWorkerData *const worker_data = worker->data1;
pthread_mutex_lock(&worker_data->stats_mutex);
@@ -26,7 +26,7 @@
#endif
}
-void vp10_frameworker_unlock_stats(VPxWorker *const worker) {
+void av1_frameworker_unlock_stats(AVxWorker *const worker) {
#if CONFIG_MULTITHREAD
FrameWorkerData *const worker_data = worker->data1;
pthread_mutex_unlock(&worker_data->stats_mutex);
@@ -35,7 +35,7 @@
#endif
}
-void vp10_frameworker_signal_stats(VPxWorker *const worker) {
+void av1_frameworker_signal_stats(AVxWorker *const worker) {
#if CONFIG_MULTITHREAD
FrameWorkerData *const worker_data = worker->data1;
@@ -59,8 +59,8 @@
#endif
// TODO(hkuang): Remove worker parameter as it is only used in debug code.
-void vp10_frameworker_wait(VPxWorker *const worker, RefCntBuffer *const ref_buf,
- int row) {
+void av1_frameworker_wait(AVxWorker *const worker, RefCntBuffer *const ref_buf,
+ int row) {
#if CONFIG_MULTITHREAD
if (!ref_buf) return;
@@ -73,10 +73,10 @@
{
// Find the worker thread that owns the reference frame. If the reference
// frame has been fully decoded, it may not have owner.
- VPxWorker *const ref_worker = ref_buf->frame_worker_owner;
+ AVxWorker *const ref_worker = ref_buf->frame_worker_owner;
FrameWorkerData *const ref_worker_data =
(FrameWorkerData *)ref_worker->data1;
- const VP10Decoder *const pbi = ref_worker_data->pbi;
+ const AV1Decoder *const pbi = ref_worker_data->pbi;
#ifdef DEBUG_THREAD
{
@@ -87,7 +87,7 @@
}
#endif
- vp10_frameworker_lock_stats(ref_worker);
+ av1_frameworker_lock_stats(ref_worker);
while (ref_buf->row < row && pbi->cur_buf == ref_buf &&
ref_buf->buf.corrupted != 1) {
pthread_cond_wait(&ref_worker_data->stats_cond,
@@ -96,12 +96,12 @@
if (ref_buf->buf.corrupted == 1) {
FrameWorkerData *const worker_data = (FrameWorkerData *)worker->data1;
- vp10_frameworker_unlock_stats(ref_worker);
- vpx_internal_error(&worker_data->pbi->common.error,
- VPX_CODEC_CORRUPT_FRAME,
+ av1_frameworker_unlock_stats(ref_worker);
+ aom_internal_error(&worker_data->pbi->common.error,
+ AOM_CODEC_CORRUPT_FRAME,
"Worker %p failed to decode frame", worker);
}
- vp10_frameworker_unlock_stats(ref_worker);
+ av1_frameworker_unlock_stats(ref_worker);
}
#else
(void)worker;
@@ -111,9 +111,9 @@
#endif // CONFIG_MULTITHREAD
}
-void vp10_frameworker_broadcast(RefCntBuffer *const buf, int row) {
+void av1_frameworker_broadcast(RefCntBuffer *const buf, int row) {
#if CONFIG_MULTITHREAD
- VPxWorker *worker = buf->frame_worker_owner;
+ AVxWorker *worker = buf->frame_worker_owner;
#ifdef DEBUG_THREAD
{
@@ -123,27 +123,27 @@
}
#endif
- vp10_frameworker_lock_stats(worker);
+ av1_frameworker_lock_stats(worker);
buf->row = row;
- vp10_frameworker_signal_stats(worker);
- vp10_frameworker_unlock_stats(worker);
+ av1_frameworker_signal_stats(worker);
+ av1_frameworker_unlock_stats(worker);
#else
(void)buf;
(void)row;
#endif // CONFIG_MULTITHREAD
}
-void vp10_frameworker_copy_context(VPxWorker *const dst_worker,
- VPxWorker *const src_worker) {
+void av1_frameworker_copy_context(AVxWorker *const dst_worker,
+ AVxWorker *const src_worker) {
#if CONFIG_MULTITHREAD
FrameWorkerData *const src_worker_data = (FrameWorkerData *)src_worker->data1;
FrameWorkerData *const dst_worker_data = (FrameWorkerData *)dst_worker->data1;
- VP10_COMMON *const src_cm = &src_worker_data->pbi->common;
- VP10_COMMON *const dst_cm = &dst_worker_data->pbi->common;
+ AV1_COMMON *const src_cm = &src_worker_data->pbi->common;
+ AV1_COMMON *const dst_cm = &dst_worker_data->pbi->common;
int i;
// Wait until source frame's context is ready.
- vp10_frameworker_lock_stats(src_worker);
+ av1_frameworker_lock_stats(src_worker);
while (!src_worker_data->frame_context_ready) {
pthread_cond_wait(&src_worker_data->stats_cond,
&src_worker_data->stats_mutex);
@@ -153,10 +153,10 @@
? src_cm->current_frame_seg_map
: src_cm->last_frame_seg_map;
dst_worker_data->pbi->need_resync = src_worker_data->pbi->need_resync;
- vp10_frameworker_unlock_stats(src_worker);
+ av1_frameworker_unlock_stats(src_worker);
dst_cm->bit_depth = src_cm->bit_depth;
-#if CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
dst_cm->use_highbitdepth = src_cm->use_highbitdepth;
#endif
#if CONFIG_EXT_REFS
diff --git a/av1/decoder/dthread.h b/av1/decoder/dthread.h
index ef548b6..84fb714 100644
--- a/av1/decoder/dthread.h
+++ b/av1/decoder/dthread.h
@@ -8,24 +8,24 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef VP10_DECODER_DTHREAD_H_
-#define VP10_DECODER_DTHREAD_H_
+#ifndef AV1_DECODER_DTHREAD_H_
+#define AV1_DECODER_DTHREAD_H_
-#include "./vpx_config.h"
-#include "aom_util/vpx_thread.h"
-#include "aom/internal/vpx_codec_internal.h"
+#include "./aom_config.h"
+#include "aom_util/aom_thread.h"
+#include "aom/internal/aom_codec_internal.h"
#ifdef __cplusplus
extern "C" {
#endif
-struct VP10Common;
-struct VP10Decoder;
+struct AV1Common;
+struct AV1Decoder;
// WorkerData for the FrameWorker thread. It contains all the information of
// the worker and decode structures for decoding a frame.
typedef struct FrameWorkerData {
- struct VP10Decoder *pbi;
+ struct AV1Decoder *pbi;
const uint8_t *data;
const uint8_t *data_end;
size_t data_size;
@@ -48,27 +48,27 @@
int frame_decoded; // Finished decoding current frame.
} FrameWorkerData;
-void vp10_frameworker_lock_stats(VPxWorker *const worker);
-void vp10_frameworker_unlock_stats(VPxWorker *const worker);
-void vp10_frameworker_signal_stats(VPxWorker *const worker);
+void av1_frameworker_lock_stats(AVxWorker *const worker);
+void av1_frameworker_unlock_stats(AVxWorker *const worker);
+void av1_frameworker_signal_stats(AVxWorker *const worker);
// Wait until ref_buf has been decoded to row in real pixel unit.
// Note: worker may already finish decoding ref_buf and release it in order to
// start decoding next frame. So need to check whether worker is still decoding
// ref_buf.
-void vp10_frameworker_wait(VPxWorker *const worker, RefCntBuffer *const ref_buf,
- int row);
+void av1_frameworker_wait(AVxWorker *const worker, RefCntBuffer *const ref_buf,
+ int row);
// FrameWorker broadcasts its decoding progress so other workers that are
// waiting on it can resume decoding.
-void vp10_frameworker_broadcast(RefCntBuffer *const buf, int row);
+void av1_frameworker_broadcast(RefCntBuffer *const buf, int row);
// Copy necessary decoding context from src worker to dst worker.
-void vp10_frameworker_copy_context(VPxWorker *const dst_worker,
- VPxWorker *const src_worker);
+void av1_frameworker_copy_context(AVxWorker *const dst_worker,
+ AVxWorker *const src_worker);
#ifdef __cplusplus
} // extern "C"
#endif
-#endif // VP10_DECODER_DTHREAD_H_
+#endif // AV1_DECODER_DTHREAD_H_